gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
"""Pathspecs are methods of specifying the path on the client.
The GRR client has a number of drivers to virtualize access to different objects
to create a Virtual File System (VFS) abstraction. These are called 'VFS
Handlers' and they provide typical file-like operations (e.g. read, seek, tell
and stat). It is possible to recursively apply different drivers in the correct
order to arrive at a certain file like object. In order to specify how drivers
should be applied we use 'Path Specifications' or pathspec.
Each VFS handler is constructed from a previous handler and a pathspec. The
pathspec is just a collection of arguments which make sense to the specific VFS
handler. The type of the handler is carried by the pathtype parameter.
On the server the PathSpec is represented as a PathSpec object, and stored
as an attribute of the AFF4 object. This module defines this abstraction.
"""
import itertools
import posixpath
import re
from grr.lib import artifact_utils
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import standard as rdf_standard
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
# Grouping pattern: e.g. {test.exe,foo.doc,bar.txt}
GROUPING_PATTERN = re.compile("{([^}]+,[^}]+)}")
class PathSpec(rdf_structs.RDFProtoStruct):
"""A path specification.
The pathspec protobuf is a recursive protobuf which contains components. This
class makes it easier to manipulate these structures by providing useful
helpers.
"""
protobuf = jobs_pb2.PathSpec
def CopyConstructor(self, other):
# pylint: disable=protected-access
self.SetRawData(other._CopyRawData())
# pylint: enable=protected-access
self.age = other.age
def __len__(self):
"""Return the total number of path components."""
i = -1
for i, _ in enumerate(self):
pass
return i + 1
def __getitem__(self, item):
for i, element in enumerate(self):
if i == item:
return element
raise IndexError("Pathspec index (%s) out of range" % item)
def __iter__(self):
"""Only iterate over all components from the current pointer."""
element = self
while element.HasField("pathtype"):
yield element
if element.HasField("nested_path"):
element = element.nested_path
else:
break
def Insert(self, index, rdfpathspec=None, **kwarg):
"""Insert a single component at index."""
if rdfpathspec is None:
rdfpathspec = self.__class__(**kwarg)
if index == 0:
# Copy ourselves to a temp copy.
nested_proto = self.__class__()
nested_proto.SetRawData(self.GetRawData())
# Replace ourselves with the new object.
self.SetRawData(rdfpathspec.GetRawData())
# Append the temp copy to the end.
self.last.nested_path = nested_proto
else:
previous = self[index - 1]
rdfpathspec.last.nested_path = previous.nested_path
previous.nested_path = rdfpathspec
def Append(self, component=None, **kwarg):
"""Append a new pathspec component to this pathspec."""
if component is None:
component = self.__class__(**kwarg)
if self.HasField("pathtype"):
self.last.nested_path = component
else:
for k, v in kwarg.items():
setattr(self, k, v)
self.SetRawData(component.GetRawData())
return self
def CollapsePath(self):
return utils.JoinPath(*[x.path for x in self])
def Pop(self, index=0):
"""Removes and returns the pathspec at the specified index."""
if index < 0:
index += len(self)
if index == 0:
result = self.__class__()
result.SetRawData(self.GetRawData())
self.SetRawData(self.nested_path.GetRawData())
else:
# Get the raw protobufs for the previous member.
previous = self[index - 1]
result = previous.nested_path
# Manipulate the previous members protobuf to patch the next component in.
previous.nested_path = result.nested_path
result.nested_path = None
return result
@property
def first(self):
return self
@property
def last(self):
if self.HasField("pathtype") and self.pathtype != self.PathType.UNSET:
return list(self)[-1]
return self
def Dirname(self):
"""Get a new copied object with only the directory path."""
result = self.Copy()
while 1:
last_directory = posixpath.dirname(result.last.path)
if last_directory != "/" or len(result) <= 1:
result.last.path = last_directory
# Make sure to clear the inode information.
result.last.inode = None
break
result.Pop(-1)
return result
def Basename(self):
for component in reversed(self):
basename = posixpath.basename(component.path)
if basename:
return basename
return ""
def Validate(self):
if not self.HasField("pathtype") or self.pathtype == self.PathType.UNSET:
raise ValueError("No path type set in PathSpec.")
AFF4_PREFIXES = {
0: "/fs/os", # PathSpec.PathType.OS
1: "/fs/tsk", # PathSpec.PathType.TSK
2: "/registry", # PathSpec.PathType.REGISTRY
3: "/devices/memory", # PathSpec.PathType.MEMORY
4: "/temp", # PathSpec.PathType.TMPFILE
}
def AFF4Path(self, client_urn):
"""Returns the AFF4 URN this pathspec will be stored under.
Args:
client_urn: A ClientURN.
Returns:
A urn that corresponds to this pathspec.
Raises:
ValueError: If pathspec is not of the correct type.
"""
# If the first level is OS and the second level is TSK its probably a mount
# point resolution. We map it into the tsk branch. For example if we get:
# path: \\\\.\\Volume{1234}\\
# pathtype: OS
# mount_point: /c:/
# nested_path {
# path: /windows/
# pathtype: TSK
# }
# We map this to aff4://client_id/fs/tsk/\\\\.\\Volume{1234}\\/windows/
if not self.HasField("pathtype"):
raise ValueError("Can't determine AFF4 path without a valid pathtype.")
first_component = self[0]
dev = first_component.path
if first_component.HasField("offset"):
# We divide here just to get prettier numbers in the GUI
dev += ":" + str(first_component.offset / 512)
if (len(self) > 1 and first_component.pathtype == PathSpec.PathType.OS and
self[1].pathtype == PathSpec.PathType.TSK):
result = [self.AFF4_PREFIXES[PathSpec.PathType.TSK], dev]
# Skip the top level pathspec.
start = 1
else:
# For now just map the top level prefix based on the first pathtype
result = [self.AFF4_PREFIXES[first_component.pathtype]]
start = 0
for p in self[start]:
component = p.path
# The following encode different pathspec properties into the AFF4 path in
# such a way that unique files on the client are mapped to unique URNs in
# the AFF4 space. Note that this transformation does not need to be
# reversible since we always use the PathSpec when accessing files on the
# client.
if p.HasField("offset"):
component += ":" + str(p.offset / 512)
# Support ADS names.
if p.HasField("stream_name"):
component += ":" + p.stream_name
result.append(component)
return client_urn.Add("/".join(result))
class GlobExpression(rdfvalue.RDFString):
"""A glob expression for a client path.
A glob expression represents a set of regular expressions which match files on
the client. The Glob expression supports the following expansions:
1) Client attribute expansions are surrounded with %% characters. They will be
expanded from the client AFF4 object.
2) Groupings are collections of alternates. e.g. {foo.exe,bar.sys}
3) Wild cards like * and ?
"""
context_help_url = "user_manual.html#_path_globbing"
RECURSION_REGEX = re.compile(r"\*\*(\d*)")
def Validate(self):
"""GlobExpression is valid."""
if len(self.RECURSION_REGEX.findall(self._value)) > 1:
raise ValueError("Only one ** is permitted per path: %s." % self._value)
def Interpolate(self, client=None):
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
patterns = artifact_utils.InterpolateKbAttributes(self._value, kb)
for pattern in patterns:
# Normalize the component path (this allows us to resolve ../
# sequences).
pattern = utils.NormalizePath(pattern.replace("\\", "/"))
for pattern in self.InterpolateGrouping(pattern):
yield pattern
def InterpolateGrouping(self, pattern):
"""Interpolate inline globbing groups."""
components = []
offset = 0
for match in GROUPING_PATTERN.finditer(pattern):
components.append([pattern[offset:match.start()]])
# Expand the attribute into the set of possibilities:
alternatives = match.group(1).split(",")
components.append(set(alternatives))
offset = match.end()
components.append([pattern[offset:]])
# Now calculate the cartesian products of all these sets to form all
# strings.
for vector in itertools.product(*components):
yield u"".join(vector)
def _ReplaceRegExGrouping(self, grouping):
alternatives = grouping.group(1).split(",")
return "(" + "|".join(re.escape(s) for s in alternatives) + ")"
def _ReplaceRegExPart(self, part):
if part == "**/":
return "(?:.*\\/)?"
elif part == "*":
return "[^\\/]*"
elif part == "?":
return "[^\\/]"
elif GROUPING_PATTERN.match(part):
return GROUPING_PATTERN.sub(self._ReplaceRegExGrouping, part)
else:
return re.escape(part)
REGEX_SPLIT_PATTERN = re.compile("(" + "|".join(
["{[^}]+,[^}]+}", "\\?", "\\*\\*\\/?", "\\*"]) + ")")
def AsRegEx(self):
"""Return the current glob as a simple regex.
Note: No interpolation is performed.
Returns:
A RegularExpression() object.
"""
parts = self.__class__.REGEX_SPLIT_PATTERN.split(self._value)
result = "".join(self._ReplaceRegExPart(p) for p in parts)
return rdf_standard.RegularExpression("(?i)\\A%s\\Z" % result)
|
|
# -*- coding: utf-8 -*-
"""
pidSim2++.py
A simulation of a vision control to steering PID loop accounting for communication and
processing latency and variation; demonstrates the impact of variation
to successful control.
THIS VERSION models the control as a 1st order input (velocity)
and then integrates once to get position. In other words, the control
variable (CV) has indirect control over the process variable (PV); for example
this is the case when a motor controller is in the loop and effectively makes
this loop a cascaded PID
This allows students to experiment with how different elements in the scaling
of a control loop affect performance, this focusing efforts on successful
design.
The model consists of a PID processing software with an asynchronous alignment
with a camera frame which is also asynchronous to image processing software.
Communication latency and jitter are planned as well as image processing impacts.
A plot at the end shows a sample over some specified duration.
The initial conditions of the file represents a case that won't work well until
it is correct by improvements in the constants and image processing rates
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
import matplotlib.pyplot as plot
import numpy as np
tmax_sec = 5.0
dt_sec = 0.001
ts_sec = np.arange(0.0, tmax_sec, 0.001)
nmax = ts_sec.__len__() # round(tmax_sec/dt_sec)
ns = range(0, nmax)
kp = 4.0 # Proportional gain
ki = 0.0 # Integral gain
kd = 1.5 # Derivative gain
kg = 1.0 # Plant (Process) gain
tau_sec = 0.5 # This is the motor plus inertia time constant to reach velocity
sp = np.zeros(nmax) # Will initialize after first image processed
spStart = False;
spPeriod = 1.0/2.0
err = np.zeros(nmax)
intErr = np.zeros(nmax)
derrdt = np.zeros(nmax)
lastErr = 0.0
G = np.zeros(nmax) # Process output to be measured
exp = np.exp(-dt_sec/tau_sec)
# Define arrays to hold the kinematic values
# In this case we will use simple names to represent either linear or rotation
v = np.zeros(nmax) # linear or angular velocity
p = np.zeros(nmax) # linear or angular position
# Model of the pid task via a java util.timer
# We add a random normal variation for task wakeup since the util.timer
# can only assure that the task wakes up no earlier than scheduled.
# Empirical measurement of the task latency is required for accurate
# modeling, but for now we can just assume about a 10% average
pidPeriod_sec = 0.05;
pidPeriod_index = round(pidPeriod_sec / dt_sec)
pidTimer_index = 0
pidStart_index = 0 # "time" that PID computation started
pidDuration_sec = 0.001 # Time to complete PID calculation (models software latency)
pidDuration_index = round(pidDuration_sec / dt_sec)
pidEnd_index = pidStart_index + pidDuration_index # "time" that PID computation ended
pidMinJitter_sec = 0.000 # Minimum Random task jitter
pidMinJitter_index = round(pidMinJitter_sec / dt_sec)
pidMaxJitter_sec = 0.005 # Maximum Random task jitter
pidMaxJitter_index = round(pidMaxJitter_sec / dt_sec)
pidMeanJitter_index = round((pidMaxJitter_index + pidMinJitter_index)/2)
pidStdDevJitter_index = round((pidMaxJitter_index - pidMinJitter_index) / 3)
cvPid = np.zeros(nmax) # Initial value of cv coming from PID calculation
# The first communication link is assumed to be a CAN bus
# The bus overhead is assumed to be a total fixed time
# not exceeding about 1 ms for up to four (4) messages going to four (4)
# separate motors (including any increases for bit stuffing); in other words
# we assume something like 100 bits per message all mastered from the same
# location on a 1 Mbps bus.
# The underlying software is assumed to be some queue processing task that
# wakes upon a posted message. A complete review of the path is needed to
# assess whether to task that actually receives the posted message awakens
# immediately (higher priority) or must time slice with all other concurrent
# tasks. If communication tasking is forced to wait for an available cycle
# it is possible that an indeterminate delay may occur at the post-to-wire
# boundary; also, the communication tasking must post all messages queued
# to the wire in close sequence otherwise the motors will be out of phase
# We can inject an estimate of communication jitter as a whole using a
# simple normal distribution
comm0Start_index = 0 # "time" that first communication bus starts
comm0Delay_sec = 0.001 # Time to complete communication (MUST BE LESS THAN PID PERIOD)
comm0Delay_index = round(comm0Delay_sec / dt_sec)
comm0End_index = comm0Start_index + comm0Delay_index
comm0MinJitter_sec = 0.000
comm0MinJitter_index = round(comm0MinJitter_sec / dt_sec)
comm0MaxJitter_sec = 0.005
comm0MaxJitter_index = round(comm0MaxJitter_sec / dt_sec)
comm0MeanJitter_index = round((comm0MaxJitter_index + comm0MinJitter_index)/2)
comm0StdDevJitter_index = round((comm0MaxJitter_index - comm0MinJitter_index) / 3)
cvComm0 = np.zeros(nmax) # cv value delayed for first communication bus
camOffset_sec = 0.0 # Offset to represent asynchronous camera start
camOffset_index = round(camOffset_sec / dt_sec)
camStart_index = camOffset_index # "time" that camera runs
camRate_Hz = 80 # Camera frame rate
camPeriod_sec = 1.0/camRate_Hz
camPeriod_index = round(camPeriod_sec / dt_sec)
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index) / 2) # Time associated with center of image
pvCam = np.zeros(nmax) # process variable delayed for camera framing
# The second communication bus is polled by the imaging software
# The time that the imaging software starts is asynchronous to the
# other system components, and it will not execute again until the
# image processing completes (which itself has some variation)
comm1Start_index = 0 # "time" that second communication bus starts
comm1Delay_sec = 0.020 # Time to complete communication
comm1Delay_index = round(comm1Delay_sec / dt_sec)
comm1End_index = comm1Start_index + comm1Delay_index
comm1MinJitter_sec = 0.000
comm1MinJitter_index = round(comm1MinJitter_sec / dt_sec)
comm1MaxJitter_sec = 0.002
comm1MaxJitter_index = round(comm1MaxJitter_sec / dt_sec)
comm1MeanJitter_index = round((comm1MaxJitter_index + comm1MinJitter_index)/2)
comm1StdDevJitter_index = round((comm1MaxJitter_index - comm1MinJitter_index) / 3)
pvComm1 = np.zeros(nmax) # pv value delayed for second communication bus
pvComm1StartTags = np.NaN * np.zeros(nmax)
# Image processing consists of a bounded, but variable process
# The content of the image and the operating environment will cause the
# associated software to vary; we will use emprical estimates for a current
# approach and will assume the variation has a normal distribution with a
# 3-sigma distribution between the upper and lower limits
pvImageStart_index = 0
pvImageMaxRate_Hz = 80.0
pvImageMinRate_Hz = 80.0
pvImageRateSigma = 3
pvImageMaxDuration_sec = 1.0 / pvImageMinRate_Hz
pvImageMinDuration_sec = 1.0 / pvImageMaxRate_Hz
pvImageMaxDuration_index = round(pvImageMaxDuration_sec / dt_sec)
pvImageMinDuration_index = round(pvImageMinDuration_sec / dt_sec)
pvImageMeanDuration_index = round((pvImageMinDuration_index + pvImageMaxDuration_index)/2)
pvImageStdDevDuration_index = round((pvImageMaxDuration_index - pvImageMinDuration_index) / pvImageRateSigma)
pvImageEnd_index = pvImageStart_index + 2*pvImageMaxDuration_index
pvImage = np.zeros(nmax)
dPvImage = 0; # average rate of change of pvImage to use in simple estimator
# Final communication link between image processing and the PID
comm2Start_index = 2*pvImageMaxDuration_index # "time" that third communication bus starts (always after image processing)
comm2Delay_sec = 0.020 # Time to complete communication
comm2Delay_index = round(comm2Delay_sec / dt_sec)
comm2End_index = comm2Start_index + comm1Delay_index
comm2Jitter_sec = 0.0 # Later we will add a "random" jitter that delays communication
comm2Jitter_index = round(comm2Jitter_sec / dt_sec)
pvComm2 = np.zeros(nmax) # pv value delayed for third communication bus
pvFinal = np.zeros(nmax)
for n in ns:
# Only run the PID calculation on a period boundary
# i.e., this branch represents the task scheduled on a boundary
# When jitter is enabled we will occasionally add a delay
# representing a late task start (independent of measurement jitter)
# We assume here that the task is delayed and not immediately preempted
# and thus able to make full use of its time slice
if (pidStdDevJitter_index == 0):
pidJitter_index = 0
else:
pidJitter_index = round(np.random.normal(pidMeanJitter_index, pidStdDevJitter_index))
if (pidJitter_index < 0):
pidJitter_index = 0
if (n == pidTimer_index):
lastPidStart_index = pidStart_index
pidStart_index = pidTimer_index + pidJitter_index
pidTimer_index += pidPeriod_index
if (n == pidStart_index):
deltaT = dt_sec * (pidStart_index - lastPidStart_index) # compute realized period this cycle
#print("@ " + str(n) + " pid start = (" + str(pidPeriod_index) + ", " + str(pidJitter_index) + ") + deltaT = " + str(deltaT))
pidEnd_index = n + pidDuration_index
# Once we get going, we can compute the error as the
# difference of the setpoint and the latest output
# of the process variable (delivered after all sensor and
# communication delays)
if (n > 0):
err[n] = sp[n] - pvFinal[n-1]
# Assume we currently have no way of directly measuring derr
# so we use the err measurement to estimate the error rate
# In this sense, the error rate is an average over the
# previous interval of time since we last looked, thus the
# error rate is in the past
#
# NOTE: Here we make an ASSUMPTION that the period is accurate
# even though we are jittering actual task start. This will cause
# rate of error to be, itself, in error; using this error rate with
# the additional uncertainty makes use of derivative gain problematic
# because of the apparent noise induced by the incorrect timing assumption
derrdt[n] = (err[n] - err[n-1]) / pidPeriod_sec
# Integrate the error (i.e., add it up)
intErr[n] = intErr[n-1] + err[n]
# Compute the control variable by summing the PID parts
# When the pidEnd_index is reached, the output will be
# forwarded to the communication sequence
cvPid[n] = (kp * err[n]) + (ki * intErr[n]) + (kd * derrdt[n])
elif (n > 0): # Previous output is held until the next task wakeup time
err[n] = err[n-1]
derrdt[n] = derrdt[n-1]
intErr[n] = intErr[n-1]
cvPid[n] = cvPid[n-1]
# Initiate communication delay
if (n == pidEnd_index):
#print("@ " + str(n) + " pid end = " + str(cvPid[n]))
comm0Start_index = n
if (comm0StdDevJitter_index == 0):
comm0Jitter_index = 0
else:
comm0Jitter_index = round(np.random.normal(comm0MeanJitter_index, comm0StdDevJitter_index))
comm0End_index = comm0Start_index + comm0Delay_index + comm0Jitter_index
# When communication delay has been met, move the information along
if (n == comm0End_index):
cvComm0[comm0End_index] = cvPid[comm0Start_index]
#print("@ " + str(n) + " comm0 end = " + str(cvComm0[comm0End_index]))
elif (n > 0): # Otherwise, just hold the previous command
cvComm0[n] = cvComm0[n-1]
# Currently just model the motor, gears, and kinematics as a simple
# time constant without limits
# The kinematics (physics) runs "continuously" so we update it
# every time step
# First, model a simple time constant representing controlled process
G[n] = (kg * cvComm0[n] * (1.0 - exp)) + (G[n-1] * exp)
# Torque applied to the robot mass induced motion
# We don't yet care about the specific (how much mass nor whether the
# motion is linear or rotational); in this case all we want to demonstrate
# is the process effects of integrating from force to a position representing
# the process variable being compared to the set point
#
# The form is F = m a or M = I alpha, but we will just use simple names
# here
v[n] = G[n]
# Integrate to a position
# Here will use a simple trapezoidal rule; we can upgrade this to Runge-Kutta
# or other methods later but if our time step is small enough compared to
# the rate of change, then trapezoidal is fine
if (n > 0):
p[n] = p[n-1] + v[n-1]*dt_sec + (v[n] - v[n-1])*dt_sec/2
# Next is the sensor delay, communication, processing, and communication
# on the return path
# The process output will be sensed by a camera and delivered at the
# camera frame rate; the frame interval is asynchronous to all other
# processing periods.
# We currently assume insignificant jitter in the camera rate
# We also are neglecting any blur occuring due to motion
#
# However, we will pick a point midway in the frame to represent
# the time of the relevant image data; depending on the simulation
# time step and modeled frame rate for the camera can cause a jitter
# of up to a time step
if ((n % camPeriod_index) == camOffset_index):
#print("@ " + str(n) + " camera start")
camStart_index = n
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index)/2) # Midpoint in time
# This is a point in time associated with the center pixel of
# the image. For now we will just assume that the item we will measure in the
# image is at the same point in time as the image center.
# Reality is that the difference is small and only important for
# very rapid target motion
# While the center point of the image time is important for averaging
# state on the image data, the frame will not be deliverable until the
# entire frame is ready for the next communication boundary (when the frame
# can be fetched)
if (n == (camEnd_index-1)):
pvCam[camStart_index:camEnd_index] = p[camImage_index]
#print("@ " + str(n) + " camera = " + str(G[camImage_index]))
# Image processing is assumed to operate as fast as it can
# but will have asynchronous start and duration will vary based on
# image content with a well defined lower and upper limit.
#
# The modeling is a small communication delay followed by a variable
# image processing delay; we will model a small normal distribution in
# time but will not model imaging errors
if (n == comm1Start_index):
#print("@ " + str(n) + " COMM1 start")
if (comm1StdDevJitter_index == 0):
comm1Jitter_index = 0
else:
comm1Jitter_index = round(np.random.normal(comm1MeanJitter_index, comm1StdDevJitter_index))
comm1End_index = comm1Start_index + comm1Delay_index + comm1Jitter_index
# Whichever image frame is available will now be forwarded
# We back up one camera period from when communication startsbecause the
# image information won't be available while a frame is being sampled
# The information is placed in the outgoing comm1 buffer at the end of
# communication, effectively delaying the image information and keeping
# the boundaries aynchronous to the resolution of the time step.
if (n == comm1End_index):
if (comm1Start_index >= camPeriod_index):
pvComm1StartTags[comm1Start_index] = pvCam[comm1Start_index - camPeriod_index]
pvComm1[comm1End_index] = pvComm1StartTags[comm1Start_index]
else:
pvComm1StartTags[comm1Start_index] = pvCam[comm1Start_index]
pvComm1[comm1End_index] = pvComm1StartTags[comm1Start_index]
#print("@ " + str(n) + " COMM1 end = " + str(pvComm1[comm1End_index]))
# Now that communication has completed, the image processing
# can start; here we represent a variable processing latency
# as a normal distribution between a min and max time assumed
# to be 3-sigma limit
# This is not a precise model of the statistical variation
# of actual image processing, but rather just enough variation
# to observe the impact to a control loop (if any)
pvImageStart_index = comm1End_index
if (pvImageStdDevDuration_index == 0):
pvImageJitter_index = pvImageMeanDuration_index
else:
pvImageJitter_index = round(np.random.normal(pvImageMeanDuration_index, pvImageStdDevDuration_index))
pvImageEnd_index = pvImageStart_index + pvImageJitter_index
elif (n > 0):
pvComm1[n] = pvComm1[n-1]
# When image processing is complete, we can begin to send the result
# to the final communication link and then restart the second comm link
# to read the camera again
if (n == pvImageEnd_index):
pvImage[pvImageEnd_index] = pvComm1[comm1End_index]
dPvImage = (pvImage[pvImageEnd_index] - pvImage[comm1Start_index - 1]) / (pvImageEnd_index - comm1Start_index)
#print("@ " + str(n) + " IMAGE PROCESSING end = " + str(pvImage[pvImageEnd_index]))
comm2Start_index = pvImageEnd_index
elif (n > 0):
pvImage[n] = pvImage[n-1] + dPvImage
if (n == comm2Start_index):
comm2End_index = comm2Start_index + comm2Delay_index
#print("@ " + str(n) + " COMM2 start --> end = " + str(comm2End_index))
if (n == comm2End_index):
pvComm2[comm2End_index] = pvImage[comm2Start_index]
#print("@ " + str(n) + " COMM2 end = " + str(pvComm2[comm2End_index]))
comm1Start_index = comm2End_index + 1 # Restart image processing immediately
# Enforce causality
spStart = True;
elif (n > 0):
pvComm2[n] = pvImage[n-1]
if (spStart == True):
if ((n+1) < nmax):
sp[n+1] = np.sin(ts_sec[n+1] * spPeriod)
sp[n+1] = sp[n+1]/np.abs(sp[n+1])
pvFinal[n] = pvComm2[n]
plot.figure(1)
plot.cla()
plot.grid()
plot.plot(ts_sec,sp,label='sp')
plot.plot(ts_sec,err,label='err')
#plot.plot(ts_sec,cvPid,label='cvPid')
#plot.plot(ts_sec,cvComm0,'o',label='cvComm0')
plot.plot(ts_sec,v,label='v')
plot.plot(ts_sec,p,label='p')
plot.plot(ts_sec,pvCam,label='CameraFrame'),
plot.plot(ts_sec,pvComm1StartTags,'o',label='CamCommStart')
plot.plot(ts_sec,pvComm1,label='ImageProcessing')
plot.plot(ts_sec,pvImage,label='NetworkTableStart')
plot.plot(ts_sec,pvComm2,label='NetworkTableEnd')
#plot.plot(ts_sec,pvFinal,label='pvFinal')
#plot.legend(loc='best')
plot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
#plot.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
|
|
"""
A collection of NMR processing functions for filtering, smoothing, and
correcting spectral baselines.
"""
import numpy as np
import scipy
import scipy.ndimage
# Linear (First order) baseline correction
def base(data,nl,nw=0):
"""
Linear (first-order) Baseline Correction based on node list.
Parameters:
* data Array of spectral data.
* nl List of baseline nodes.
* nw Node half-width in points.
"""
if data.ndim == 1:
data = data-calc_bl_linear(data,nl,nw)
else: # for 2D array loop over traces
for i,vec in enumerate(data):
data[i] = data[i]-calc_bl_linear(vec,nl,nw)
return data
def calc_bl_linear(x,nl,nw=0):
"""
Calculate a baseline using linear approximation between nodes
Parameters:
* x 1D data
* nl List of baseline nodes
* nw Node half-width in points
"""
bl = np.zeros_like(x)
for i in range(len(nl)-1):
# minimum and maximum index
min = nl[i]
max = nl[i+1]
# linspace s1 and s2
s1 = x[min-nw:min+nw+1].mean()
s2 = x[max-nw:max+nw+1].mean()
bl[min:max+1] = np.linspace(s1,s2,max-min+1)
return bl
# Constant baseline correction
def cbf(data,last=10,apply=slice(None)):
"""
Constant Baseline correction
Parameters:
* data Array of spectral data.
* last Percent of -1 axis used to calculate correction.
* apply Slice describing 0th-axis region(s) to apply correction to.
Ignored in 1D data.
"""
# calculate the correction
n = data.shape[-1]*last/100.+1.
corr = data[...,-n:].sum(axis=-1)/n
# apply correction
if data.ndim == 2:
data[apply] = data[apply] - np.array([corr]).transpose()[apply]
return data
else:
return data-corr
def cbf_explicit(data,calc=slice(None),apply=slice(None)):
"""
Constant Baseline - explicit region
Parameters:
* data Array of spectral data.
* calc Slice describing region to use for calculating correction.
* apply Slice describing 0th-axis region(s) to apply correction to.
Ignored in 1D data.
"""
# calculate correction
n = len(range(data.shape[-1])[calc])
corr = data[...,calc].sum(axis=-1)/n
# apply correction
if data.ndim == 2:
data[apply] = data[apply] - np.array([corr]).transpose()[apply]
return data
else:
return data-corr
# Median baseline correction
def med(data,mw=24,sf=16,sigma=5.0):
"""
Median baseline correction
Algorith described in:
Friedrichs, M.S. JBNMR 1995 5 147-153.
Parameters:
* data Array of spectral data.
* mw Median Window size in pts.
* sf Smooth window size in pts.
* sigma Standard-deviation of Gaussian in convolution
"""
if data.ndim == 1:
data = data - calc_bl_med(data,mw,sf,sigma)
else:
for i,vec in enumerate(data):
data[i] = vec - calc_bl_med(vec,mw,sf,sigma)
return out
def calc_bl_med(x,mw,sf,sigma):
"""
Calculate a baseline using median baseline correction.
Algorithm described in:
Friedrichs, M.S. JBNMR 1995 5 147-153
Parameter:
x 1D data
mw Median Window size in pts.
sf Smooth window size in pts.
sigma Standard-deviation of Gaussian in convolution.
"""
# create extrema array (non extrema values are masked out)
mask = x == scipy.ndimage.median_filter(x,size=3)
mask[0] = False # first pt always extrema
mask[-1] = False # last pt always extrema
e = np.ma.masked_array(x,mask)
# fill in the median vector
half_mw = mw/2
m = scipy.ndimage.median_filter(e,mw+1,mode="mirror")
# using the median_filter might give slightly different results than
# described algorithm but is MUCH faster
# convolve with a gaussian
g = scipy.signal.gaussian(sf,sigma)
g = g/g.sum()
return scipy.signal.convolve(m,g,mode='same')
# Solvent Filter
def sol_general(data,filter,w=16,mode='same'):
"""
Solvent filter with generic filter.
Algorithm described in:
Marion et al. JMR 1989 84 425-430
Parameters:
* data Array of spectral data.
* filter filter to convolve with data
* mode mode for output ('valid','same', or 'full')
"""
A = filter.sum()
if data.ndim == 2:
filter = np.atleast_2d(filter)
return data-scipy.signal.convolve(data,filter,mode=mode)/A
def sol_boxcar(data,w=16,mode='same'):
"""
Solvent filter with boxcar filter.
Parameters:
* data Array of spectral data.
* w Width of convolution window.
* mode mode for output ('valid','same', or 'full')
"""
filter = scipy.signal.boxcar(w)
return sol_general(data,filter,w=w,mode=mode)
def sol_sine(data,w=16,mode='same'):
"""
Solvent filter with sine-bell filter.
Parameters:
* data Array of spectral data.
* w Width of convolution window.
* mode mode for output ('valid','same', or 'full')
"""
filter = np.cos(np.pi*np.linspace(-0.5,0.5,w))
return sol_general(data,filter,w=w,mode=mode)
def sol_sine2(data,w=16,mode='same'):
"""
Solvent filter with square sine-bell filter.
Parameters:
* data Array of spectral data.
* w Width of convolution window.
* mode mode for output ('valid','same', or 'full')
"""
filter = np.cos(np.pi*np.linspace(-0.5,0.5,w))**2
return sol_general(data,filter,w=w,mode=mode)
def sol_gaussian(data,w=16,mode='same'):
"""
Solvent filter with square gaussian filter.
Parameters:
* data Array of spectral data.
* w Width of convolution window.
* mode mode for output ('valid','same', or 'full')
"""
filter = scipy.signal.gaussian(w,w/2.)
return sol_general(data,filter,w=w,mode=mode)
# Polynomial Solvent Subtraction
def poly_td(data):
"""
Polynomial time domain solvent subtraction
From NMRPipe paper(appendix):
when used with the argument -time, fits all data points to a polynomial,
which is then subtracted from the original data. It is intended to fit
and subtract low-freqency solvent signal in the FID, a procedure that
often causes less distortions than time-domain convolution methods.
By default, a fourth-order polynomials is used. For speed successive
averages of regions are usually fit, rather than fitting all of the data.
Alg:
1. Calculate average of blocks
2. Fit these averages to polynomial (block parameters)
3. Back out "real" polynomial parameters from these block parameters
4. Subtract off the polynomial from data
"""
# XXX
pass
def poly_fd(data):
"""
Polynomial frequency domain baseline correction
From NMRPipe paper (appendix):
applies a polynomial baseline correction of the order specified by
argument -ord via an automated base-line detection method when used
with argument -auto. The defauly is a forth-order polynomial. The
automated base-line mode works as follows: a copy of a given vector is
divided into a series of adjacent sections, typically eight points wide.
The average value of each section is subtracted from all points in that
section, to generate a 'centered' vector. The intensities of the entire
centered vector are sorted, and the standard deviation of the noise is
estimated under the assumption that a given fraction (typically about
30%) of the smallest intensities belong to the base-line, and that the
noise is normally distributed. This noise estimate is multiplied by a
constant, typically about 1.5, to yield a classification threshold.
Then, each section in the centered vector is classified as base line only
if its standard deviation does not exceed the threshold. These
classifications are used to correct the original vector.
Alg:
1. Divide into 'blocks'
2. Center each block and create centered vector
3. Calculate intensity (abs) of each centered vector
4. Sort intensities, lower 30% belong to baseline
5. Fit base line intensities to Normal distribution, gives estimation
of standard deviation (std) of noise
6. Classification threshold set to 1.5*std
7. Qualify each block in centered vector as baseline only
(its std < thres) or not (std > thres)
8. Fit baseline only points to polynomial and substract off
"""
# XXX
pass
|
|
import os
from tools_testcase import ToolTestCase, skipped
from aql import Tempdir, Tempfile, Project, ProjectConfig
# ==============================================================================
class TestToolMsvc(ToolTestCase):
def test_msvc_compiler(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'output')
src_dir = os.path.join(tmp_dir, 'src')
os.makedirs(src_dir)
num_src_files = 5
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
res_file = self.generate_res_file(src_dir, 'foo')
cfg = ProjectConfig(
args=["build_dir=%s" % build_dir, "batch_build=0"])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
return
cpp.Compile(src_files, batch_build=False)
cpp.CompileResource(res_file)
self.build_prj(prj, num_src_files + 1)
cpp.Compile(src_files)
cpp.CompileResource(res_file)
self.build_prj(prj, 0)
self.touch_cpp_file(hdr_files[0])
cpp.Compile(src_files)
self.build_prj(prj, 1)
# -----------------------------------------------------------
def test_msvc_compiler_batch(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'output')
src_dir = os.path.join(tmp_dir, 'src')
os.makedirs(src_dir)
num_groups = 4
group_size = 8
num_src_files = num_groups * group_size
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
cfg = ProjectConfig(args=["build_dir=%s" % build_dir])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__),
'../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
return
cpp.Compile(src_files, batch_build=True)
self.build_prj(prj, num_groups, jobs=num_groups)
cpp.Compile(src_files, batch_build=False)
self.build_prj(prj, 0)
self.touch_cpp_file(hdr_files[0])
cpp.Compile(src_files, batch_build=False)
self.build_prj(prj, 1)
self.touch_cpp_files(hdr_files[:group_size])
cpp.Compile(src_files, batch_build=True, batch_groups=num_groups)
self.build_prj(prj, num_groups)
# -----------------------------------------------------------
def test_msvc_compiler_batch_error(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'output')
src_dir = os.path.join(tmp_dir, 'src')
cfg = ProjectConfig(args=["build_dir=%s" % build_dir])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
os.makedirs(src_dir)
num_src_files = 5
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
src_file_orig = Tempfile(root_dir=tmp_dir)
src_file_orig.close()
self.copy_file(src_files[0], src_file_orig)
self.add_error_to_cpp_file(src_files[0])
cpp.Compile(src_files, batch_build=True, batch_groups=1)
self.build_prj(prj, 0, num_failed_nodes=1)
self.copy_file(src_file_orig, src_files[0])
cpp.Compile(src_files)
self.build_prj(prj, 1)
# -----------------------------------------------------------
def test_msvc_archiver(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'output')
src_dir = os.path.join(tmp_dir, 'src')
os.makedirs(src_dir)
num_groups = 4
group_size = 8
num_src_files = num_groups * group_size
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
res_file = self.generate_res_file(src_dir, 'foo')
cfg = ProjectConfig(args=["build_dir=%s" % build_dir])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
cpp.LinkLibrary(src_files, res_file, target='foo',
batch_build=True, batch_groups=num_groups)
self.build_prj(prj, num_groups + 2)
cpp.LinkLibrary(src_files, res_file, target='foo')
self.build_prj(prj, 0)
self.touch_cpp_file(hdr_files[0])
cpp.LinkLibrary(src_files, res_file, target='foo')
self.build_prj(prj, 1)
cpp.LinkLibrary(
src_files, res_file, target='foo', batch_build=True)
self.build_prj(prj, 0)
self.touch_cpp_files(hdr_files)
cpp.LinkLibrary(src_files, res_file, target='foo',
batch_build=True, batch_groups=num_groups)
self.build_prj(prj, num_groups)
# -----------------------------------------------------------
def test_msvc_linker(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'output')
src_dir = os.path.join(tmp_dir, 'src')
os.makedirs(src_dir)
num_groups = 4
group_size = 2
num_src_files = num_groups * group_size
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
res_file = self.generate_res_file(src_dir, 'foo')
main_src_file = self.generate_main_cpp_file(src_dir, 'main')
cfg = ProjectConfig(
args=["build_dir=%s" % build_dir, "batch_build=0"])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
cpp.LinkSharedLibrary(src_files, res_file, target='foo')
cpp.LinkSharedLibrary(src_files, res_file, target='foo')
cpp.LinkProgram(src_files, main_src_file, res_file, target='foo')
self.build_prj(prj, num_src_files + 4)
cpp.LinkSharedLibrary(src_files, res_file, target='foo')
cpp.LinkProgram(src_files, main_src_file, res_file, target='foo')
self.build_prj(prj, 0)
self.touch_cpp_file(hdr_files[0])
cpp.LinkSharedLibrary(src_files, res_file, target='foo')
cpp.LinkProgram(src_files, main_src_file, res_file, target='foo')
self.build_prj(prj, 1)
self.touch_cpp_files(hdr_files)
cpp.LinkSharedLibrary(src_files, res_file, target='foo',
batch_build=True, batch_groups=num_groups)
cpp.LinkProgram(src_files, main_src_file, res_file, target='foo',
batch_build=True, batch_groups=num_groups)
self.build_prj(prj, num_groups, jobs=1)
# -----------------------------------------------------------
def test_msvc_res_compiler(self):
with Tempdir() as tmp_dir:
build_dir = os.path.join(tmp_dir, 'build')
src_dir = os.path.join(tmp_dir, 'src')
os.makedirs(src_dir)
num_src_files = 2
src_files, hdr_files = self.generate_cpp_files(
src_dir, 'foo', num_src_files)
res_file = self.generate_res_file(src_dir, 'foo')
cfg = ProjectConfig(args=["build_dir=%s" % build_dir])
prj = Project(cfg)
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
cpp = prj.tools.try_tool('msvc++', tools_path=tools_path)
if cpp is None:
skipped("MSVC tool has not been found.")
tools_path = os.path.join(os.path.dirname(__file__), '../tools')
rc = prj.tools.try_tool('msrc', tools_path=tools_path)
if rc is None:
skipped("MS RC tool has not been found.")
cpp.Compile(src_files, batch_build=False)
rc.Compile(res_file)
self.build_prj(prj, num_src_files + 1)
cpp.Compile(src_files, batch_build=False)
rc.Compile(res_file)
self.build_prj(prj, 0)
cpp.Compile(src_files, batch_build=False)
rc.Compile(res_file)
self.touch_cpp_file(res_file)
self.build_prj(prj, 1)
cpp.Compile(src_files, batch_build=False)
rc.Compile(res_file)
self.build_prj(prj, 0)
|
|
from test.test_support import run_unittest, verbose
import unittest
import locale
import sys
import codecs
enUS_locale = None
def get_enUS_locale():
global enUS_locale
if sys.platform == 'darwin':
import os
tlocs = ("en_US.UTF-8", "en_US.ISO8859-1", "en_US")
if int(os.uname()[2].split('.')[0]) < 10:
# The locale test work fine on OSX 10.6, I (ronaldoussoren)
# haven't had time yet to verify if tests work on OSX 10.5
# (10.4 is known to be bad)
raise unittest.SkipTest("Locale support on MacOSX is minimal")
if sys.platform.startswith("win"):
tlocs = ("En", "English")
else:
tlocs = ("en_US.UTF-8", "en_US.US-ASCII", "en_US")
oldlocale = locale.setlocale(locale.LC_NUMERIC)
for tloc in tlocs:
try:
locale.setlocale(locale.LC_NUMERIC, tloc)
except locale.Error:
continue
break
else:
raise unittest.SkipTest(
"Test locale not supported (tried %s)" % (', '.join(tlocs)))
enUS_locale = tloc
locale.setlocale(locale.LC_NUMERIC, oldlocale)
class BaseLocalizedTest(unittest.TestCase):
#
# Base class for tests using a real locale
#
def setUp(self):
self.oldlocale = locale.setlocale(self.locale_type)
locale.setlocale(self.locale_type, enUS_locale)
if verbose:
print "testing with \"%s\"..." % enUS_locale,
def tearDown(self):
locale.setlocale(self.locale_type, self.oldlocale)
class BaseCookedTest(unittest.TestCase):
#
# Base class for tests using cooked localeconv() values
#
def setUp(self):
locale._override_localeconv = self.cooked_values
def tearDown(self):
locale._override_localeconv = {}
class CCookedTest(BaseCookedTest):
# A cooked "C" locale
cooked_values = {
'currency_symbol': '',
'decimal_point': '.',
'frac_digits': 127,
'grouping': [],
'int_curr_symbol': '',
'int_frac_digits': 127,
'mon_decimal_point': '',
'mon_grouping': [],
'mon_thousands_sep': '',
'n_cs_precedes': 127,
'n_sep_by_space': 127,
'n_sign_posn': 127,
'negative_sign': '',
'p_cs_precedes': 127,
'p_sep_by_space': 127,
'p_sign_posn': 127,
'positive_sign': '',
'thousands_sep': ''
}
class EnUSCookedTest(BaseCookedTest):
# A cooked "en_US" locale
cooked_values = {
'currency_symbol': '$',
'decimal_point': '.',
'frac_digits': 2,
'grouping': [3, 3, 0],
'int_curr_symbol': 'USD ',
'int_frac_digits': 2,
'mon_decimal_point': '.',
'mon_grouping': [3, 3, 0],
'mon_thousands_sep': ',',
'n_cs_precedes': 1,
'n_sep_by_space': 0,
'n_sign_posn': 1,
'negative_sign': '-',
'p_cs_precedes': 1,
'p_sep_by_space': 0,
'p_sign_posn': 1,
'positive_sign': '',
'thousands_sep': ','
}
class FrFRCookedTest(BaseCookedTest):
# A cooked "fr_FR" locale with a space character as decimal separator
# and a non-ASCII currency symbol.
cooked_values = {
'currency_symbol': '\xe2\x82\xac',
'decimal_point': ',',
'frac_digits': 2,
'grouping': [3, 3, 0],
'int_curr_symbol': 'EUR ',
'int_frac_digits': 2,
'mon_decimal_point': ',',
'mon_grouping': [3, 3, 0],
'mon_thousands_sep': ' ',
'n_cs_precedes': 0,
'n_sep_by_space': 1,
'n_sign_posn': 1,
'negative_sign': '-',
'p_cs_precedes': 0,
'p_sep_by_space': 1,
'p_sign_posn': 1,
'positive_sign': '',
'thousands_sep': ' '
}
class BaseFormattingTest(object):
#
# Utility functions for formatting tests
#
def _test_formatfunc(self, format, value, out, func, **format_opts):
self.assertEqual(
func(format, value, **format_opts), out)
def _test_format(self, format, value, out, **format_opts):
self._test_formatfunc(format, value, out,
func=locale.format, **format_opts)
def _test_format_string(self, format, value, out, **format_opts):
self._test_formatfunc(format, value, out,
func=locale.format_string, **format_opts)
def _test_currency(self, value, out, **format_opts):
self.assertEqual(locale.currency(value, **format_opts), out)
class EnUSNumberFormatting(BaseFormattingTest):
# XXX there is a grouping + padding bug when the thousands separator
# is empty but the grouping array contains values (e.g. Solaris 10)
def setUp(self):
self.sep = locale.localeconv()['thousands_sep']
def test_grouping(self):
self._test_format("%f", 1024, grouping=1, out='1%s024.000000' % self.sep)
self._test_format("%f", 102, grouping=1, out='102.000000')
self._test_format("%f", -42, grouping=1, out='-42.000000')
self._test_format("%+f", -42, grouping=1, out='-42.000000')
def test_grouping_and_padding(self):
self._test_format("%20.f", -42, grouping=1, out='-42'.rjust(20))
if self.sep:
self._test_format("%+10.f", -4200, grouping=1,
out=('-4%s200' % self.sep).rjust(10))
self._test_format("%-10.f", -4200, grouping=1,
out=('-4%s200' % self.sep).ljust(10))
def test_integer_grouping(self):
self._test_format("%d", 4200, grouping=True, out='4%s200' % self.sep)
self._test_format("%+d", 4200, grouping=True, out='+4%s200' % self.sep)
self._test_format("%+d", -4200, grouping=True, out='-4%s200' % self.sep)
def test_integer_grouping_and_padding(self):
self._test_format("%10d", 4200, grouping=True,
out=('4%s200' % self.sep).rjust(10))
self._test_format("%-10d", -4200, grouping=True,
out=('-4%s200' % self.sep).ljust(10))
def test_simple(self):
self._test_format("%f", 1024, grouping=0, out='1024.000000')
self._test_format("%f", 102, grouping=0, out='102.000000')
self._test_format("%f", -42, grouping=0, out='-42.000000')
self._test_format("%+f", -42, grouping=0, out='-42.000000')
def test_padding(self):
self._test_format("%20.f", -42, grouping=0, out='-42'.rjust(20))
self._test_format("%+10.f", -4200, grouping=0, out='-4200'.rjust(10))
self._test_format("%-10.f", 4200, grouping=0, out='4200'.ljust(10))
def test_complex_formatting(self):
# Spaces in formatting string
self._test_format_string("One million is %i", 1000000, grouping=1,
out='One million is 1%s000%s000' % (self.sep, self.sep))
self._test_format_string("One million is %i", 1000000, grouping=1,
out='One million is 1%s000%s000' % (self.sep, self.sep))
# Dots in formatting string
self._test_format_string(".%f.", 1000.0, out='.1000.000000.')
# Padding
if self.sep:
self._test_format_string("--> %10.2f", 4200, grouping=1,
out='--> ' + ('4%s200.00' % self.sep).rjust(10))
# Asterisk formats
self._test_format_string("%10.*f", (2, 1000), grouping=0,
out='1000.00'.rjust(10))
if self.sep:
self._test_format_string("%*.*f", (10, 2, 1000), grouping=1,
out=('1%s000.00' % self.sep).rjust(10))
# Test more-in-one
if self.sep:
self._test_format_string("int %i float %.2f str %s",
(1000, 1000.0, 'str'), grouping=1,
out='int 1%s000 float 1%s000.00 str str' %
(self.sep, self.sep))
class TestFormatPatternArg(unittest.TestCase):
# Test handling of pattern argument of format
def test_onlyOnePattern(self):
# Issue 2522: accept exactly one % pattern, and no extra chars.
self.assertRaises(ValueError, locale.format, "%f\n", 'foo')
self.assertRaises(ValueError, locale.format, "%f\r", 'foo')
self.assertRaises(ValueError, locale.format, "%f\r\n", 'foo')
self.assertRaises(ValueError, locale.format, " %f", 'foo')
self.assertRaises(ValueError, locale.format, "%fg", 'foo')
self.assertRaises(ValueError, locale.format, "%^g", 'foo')
self.assertRaises(ValueError, locale.format, "%f%%", 'foo')
class TestLocaleFormatString(unittest.TestCase):
"""General tests on locale.format_string"""
def test_percent_escape(self):
self.assertEqual(locale.format_string('%f%%', 1.0), '%f%%' % 1.0)
self.assertEqual(locale.format_string('%d %f%%d', (1, 1.0)),
'%d %f%%d' % (1, 1.0))
self.assertEqual(locale.format_string('%(foo)s %%d', {'foo': 'bar'}),
('%(foo)s %%d' % {'foo': 'bar'}))
def test_mapping(self):
self.assertEqual(locale.format_string('%(foo)s bing.', {'foo': 'bar'}),
('%(foo)s bing.' % {'foo': 'bar'}))
self.assertEqual(locale.format_string('%(foo)s', {'foo': 'bar'}),
('%(foo)s' % {'foo': 'bar'}))
class TestNumberFormatting(BaseLocalizedTest, EnUSNumberFormatting):
# Test number formatting with a real English locale.
locale_type = locale.LC_NUMERIC
def setUp(self):
BaseLocalizedTest.setUp(self)
EnUSNumberFormatting.setUp(self)
class TestEnUSNumberFormatting(EnUSCookedTest, EnUSNumberFormatting):
# Test number formatting with a cooked "en_US" locale.
def setUp(self):
EnUSCookedTest.setUp(self)
EnUSNumberFormatting.setUp(self)
def test_currency(self):
self._test_currency(50000, "$50000.00")
self._test_currency(50000, "$50,000.00", grouping=True)
self._test_currency(50000, "USD 50,000.00",
grouping=True, international=True)
class TestCNumberFormatting(CCookedTest, BaseFormattingTest):
# Test number formatting with a cooked "C" locale.
def test_grouping(self):
self._test_format("%.2f", 12345.67, grouping=True, out='12345.67')
def test_grouping_and_padding(self):
self._test_format("%9.2f", 12345.67, grouping=True, out=' 12345.67')
class TestFrFRNumberFormatting(FrFRCookedTest, BaseFormattingTest):
# Test number formatting with a cooked "fr_FR" locale.
def test_decimal_point(self):
self._test_format("%.2f", 12345.67, out='12345,67')
def test_grouping(self):
self._test_format("%.2f", 345.67, grouping=True, out='345,67')
self._test_format("%.2f", 12345.67, grouping=True, out='12 345,67')
def test_grouping_and_padding(self):
self._test_format("%6.2f", 345.67, grouping=True, out='345,67')
self._test_format("%7.2f", 345.67, grouping=True, out=' 345,67')
self._test_format("%8.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%9.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%10.2f", 12345.67, grouping=True, out=' 12 345,67')
self._test_format("%-6.2f", 345.67, grouping=True, out='345,67')
self._test_format("%-7.2f", 345.67, grouping=True, out='345,67 ')
self._test_format("%-8.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%-9.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%-10.2f", 12345.67, grouping=True, out='12 345,67 ')
def test_integer_grouping(self):
self._test_format("%d", 200, grouping=True, out='200')
self._test_format("%d", 4200, grouping=True, out='4 200')
def test_integer_grouping_and_padding(self):
self._test_format("%4d", 4200, grouping=True, out='4 200')
self._test_format("%5d", 4200, grouping=True, out='4 200')
self._test_format("%10d", 4200, grouping=True, out='4 200'.rjust(10))
self._test_format("%-4d", 4200, grouping=True, out='4 200')
self._test_format("%-5d", 4200, grouping=True, out='4 200')
self._test_format("%-10d", 4200, grouping=True, out='4 200'.ljust(10))
def test_currency(self):
euro = u'\u20ac'.encode('utf-8')
self._test_currency(50000, "50000,00 " + euro)
self._test_currency(50000, "50 000,00 " + euro, grouping=True)
# XXX is the trailing space a bug?
self._test_currency(50000, "50 000,00 EUR ",
grouping=True, international=True)
class TestStringMethods(BaseLocalizedTest):
locale_type = locale.LC_CTYPE
if sys.platform != 'sunos5' and not sys.platform.startswith("win"):
# Test BSD Rune locale's bug for isctype functions.
def test_isspace(self):
self.assertEqual('\x20'.isspace(), True)
self.assertEqual('\xa0'.isspace(), False)
self.assertEqual('\xa1'.isspace(), False)
def test_isalpha(self):
self.assertEqual('\xc0'.isalpha(), False)
def test_isalnum(self):
self.assertEqual('\xc0'.isalnum(), False)
def test_isupper(self):
self.assertEqual('\xc0'.isupper(), False)
def test_islower(self):
self.assertEqual('\xc0'.islower(), False)
def test_lower(self):
self.assertEqual('\xcc\x85'.lower(), '\xcc\x85')
def test_upper(self):
self.assertEqual('\xed\x95\xa0'.upper(), '\xed\x95\xa0')
def test_strip(self):
self.assertEqual('\xed\x95\xa0'.strip(), '\xed\x95\xa0')
def test_split(self):
self.assertEqual('\xec\xa0\xbc'.split(), ['\xec\xa0\xbc'])
class TestMiscellaneous(unittest.TestCase):
def test_getpreferredencoding(self):
# Invoke getpreferredencoding to make sure it does not cause exceptions.
enc = locale.getpreferredencoding()
if enc:
# If encoding non-empty, make sure it is valid
codecs.lookup(enc)
if hasattr(locale, "strcoll"):
def test_strcoll_3303(self):
# test crasher from bug #3303
self.assertRaises(TypeError, locale.strcoll, u"a", None)
def test_setlocale_category(self):
locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_TIME)
locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_MONETARY)
locale.setlocale(locale.LC_NUMERIC)
# crasher from bug #7419
self.assertRaises(locale.Error, locale.setlocale, 12345)
def test_main():
tests = [
TestMiscellaneous,
TestFormatPatternArg,
TestLocaleFormatString,
TestEnUSNumberFormatting,
TestCNumberFormatting,
TestFrFRNumberFormatting,
]
# SkipTest can't be raised inside unittests, handle it manually instead
try:
get_enUS_locale()
except unittest.SkipTest as e:
if verbose:
print "Some tests will be disabled: %s" % e
else:
tests += [TestNumberFormatting, TestStringMethods]
run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
outputs)
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs.
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document',
'fix_method_name',
'key2param',
]
# Standard library imports
import copy
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
import keyword
import logging
import mimetypes
import os
import re
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
# Third-party imports
import httplib2
import mimeparse
import uritemplate
# Local imports
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidJsonError
from googleapiclient.errors import MediaUploadSizeError
from googleapiclient.errors import UnacceptableMimeTypeError
from googleapiclient.errors import UnknownApiNameOrVersion
from googleapiclient.errors import UnknownFileType
from googleapiclient.http import HttpRequest
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaUpload
from googleapiclient.model import JsonModel
from googleapiclient.model import MediaModel
from googleapiclient.model import RawModel
from googleapiclient.schema import Schemas
from oauth2client.anyjson import simplejson
from oauth2client.util import _add_query_parameter
from oauth2client.util import positional
# The client library requires a version of httplib2 that supports RETRIES.
httplib2.RETRIES = 1
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
BODY_PARAMETER_DEFAULT_VALUE = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
'description': ('The filename of the media request body, or an instance '
'of a MediaUpload object.'),
'type': 'string',
'required': False,
}
# Parameters accepted by the stack, but not visible via discovery.
# TODO(dhermes): Remove 'userip' in 'v2'.
STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
# Library-specific reserved words beyond Python keywords.
RESERVED_WORDS = frozenset(['body'])
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if keyword.iskeyword(name) or name in RESERVED_WORDS:
return name + '_'
else:
return name
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
@positional(2)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: googleapiclient.Model, converts to and from the wire format.
requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, uri=requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, base=discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
@positional(1)
def build_from_document(
service,
base=None,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string or object, the JSON discovery document describing the API.
The value passed in may either be the JSON string or the deserialized
JSON.
base: string, base URI for all HTTP requests, usually the discovery URI.
This parameter is no longer used as rootUrl and servicePath are included
within the discovery document. (deprecated)
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
if isinstance(service, basestring):
service = simplejson.loads(service)
base = urlparse.urljoin(service['rootUrl'], service['servicePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
return Resource(http=http, baseUrl=base, model=model,
developerKey=developerKey, requestBuilder=requestBuilder,
resourceDesc=service, rootDesc=service, schema=schema)
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0L
units = maxSize[-2:].upper()
bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
if bit_shift is not None:
return long(maxSize[:-2]) << bit_shift
else:
return long(maxSize)
def _media_path_url_from_info(root_desc, path_url):
"""Creates an absolute media path URL.
Constructed using the API root URI and service path from the discovery
document and the relative path for the API method.
Args:
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
Returns:
String; the absolute URI for media upload for the API method.
"""
return '%(root)supload/%(service_path)s%(path)s' % {
'root': root_desc['rootUrl'],
'service_path': root_desc['servicePath'],
'path': path_url,
}
def _fix_up_parameters(method_desc, root_desc, http_method):
"""Updates parameters of an API method with values specific to this library.
Specifically, adds whatever global parameters are specified by the API to the
parameters for the individual method. Also adds parameters which don't
appear in the discovery document, but are available to all discovery based
APIs (these are listed in STACK_QUERY_PARAMETERS).
SIDE EFFECTS: This updates the parameters dictionary object in the method
description.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
http_method: String; the HTTP method used to call the API method described
in method_desc.
Returns:
The updated Dictionary stored in the 'parameters' key of the method
description dictionary.
"""
parameters = method_desc.setdefault('parameters', {})
# Add in the parameters common to all methods.
for name, description in root_desc.get('parameters', {}).iteritems():
parameters[name] = description
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
# Add 'body' (our own reserved word) to parameters if the method supports
# a request payload.
if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
body = BODY_PARAMETER_DEFAULT_VALUE.copy()
body.update(method_desc['request'])
parameters['body'] = body
return parameters
def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
"""Updates parameters of API by adding 'media_body' if supported by method.
SIDE EFFECTS: If the method supports media upload and has a required body,
sets body to be optional (required=False) instead. Also, if there is a
'mediaUpload' in the method description, adds 'media_upload' key to
parameters.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
path_url: String; the relative URL for the API method. Relative to the API
root, which is specified in the discovery document.
parameters: A dictionary describing method parameters for method described
in method_desc.
Returns:
Triple (accept, max_size, media_path_url) where:
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
media_upload = method_desc.get('mediaUpload', {})
accept = media_upload.get('accept', [])
max_size = _media_size_to_long(media_upload.get('maxSize', ''))
media_path_url = None
if media_upload:
media_path_url = _media_path_url_from_info(root_desc, path_url)
parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
if 'body' in parameters:
parameters['body']['required'] = False
return accept, max_size, media_path_url
def _fix_up_method_description(method_desc, root_desc):
"""Updates a method description in a discovery document.
SIDE EFFECTS: Changes the parameters dictionary in the method description with
extra parameters which are used locally.
Args:
method_desc: Dictionary with metadata describing an API method. Value comes
from the dictionary of methods stored in the 'methods' key in the
deserialized discovery document.
root_desc: Dictionary; the entire original deserialized discovery document.
Returns:
Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)
where:
- path_url is a String; the relative URL for the API method. Relative to
the API root, which is specified in the discovery document.
- http_method is a String; the HTTP method used to call the API method
described in the method description.
- method_id is a String; the name of the RPC method associated with the
API method, and is in the method description in the 'id' key.
- accept is a list of strings representing what content types are
accepted for media upload. Defaults to empty list if not in the
discovery document.
- max_size is a long representing the max size in bytes allowed for a
media upload. Defaults to 0L if not in the discovery document.
- media_path_url is a String; the absolute URI for media upload for the
API method. Constructed using the API root URI and service path from
the discovery document and the relative path for the API method. If
media upload is not supported, this is None.
"""
path_url = method_desc['path']
http_method = method_desc['httpMethod']
method_id = method_desc['id']
parameters = _fix_up_parameters(method_desc, root_desc, http_method)
# Order is important. `_fix_up_media_upload` needs `method_desc` to have a
# 'parameters' key and needs to know if there is a 'body' parameter because it
# also sets a 'media_body' parameter.
accept, max_size, media_path_url = _fix_up_media_upload(
method_desc, root_desc, path_url, parameters)
return path_url, http_method, method_id, accept, max_size, media_path_url
# TODO(dhermes): Convert this class to ResourceMethod and make it callable
class ResourceMethodParameters(object):
"""Represents the parameters associated with a method.
Attributes:
argmap: Map from method parameter name (string) to query parameter name
(string).
required_params: List of required parameters (represented by parameter
name as string).
repeated_params: List of repeated parameters (represented by parameter
name as string).
pattern_params: Map from method parameter name (string) to regular
expression (as a string). If the pattern is set for a parameter, the
value for that parameter must match the regular expression.
query_params: List of parameters (represented by parameter name as string)
that will be used in the query string.
path_params: Set of parameters (represented by parameter name as string)
that will be used in the base URL path.
param_types: Map from method parameter name (string) to parameter type. Type
can be any valid JSON schema type; valid values are 'any', 'array',
'boolean', 'integer', 'number', 'object', or 'string'. Reference:
http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
enum_params: Map from method parameter name (string) to list of strings,
where each list of strings is the list of acceptable enum values.
"""
def __init__(self, method_desc):
"""Constructor for ResourceMethodParameters.
Sets default values and defers to set_parameters to populate.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
self.argmap = {}
self.required_params = []
self.repeated_params = []
self.pattern_params = {}
self.query_params = []
# TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
# parsing is gotten rid of.
self.path_params = set()
self.param_types = {}
self.enum_params = {}
self.set_parameters(method_desc)
def set_parameters(self, method_desc):
"""Populates maps and lists based on method description.
Iterates through each parameter for the method and parses the values from
the parameter dictionary.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
for arg, desc in method_desc.get('parameters', {}).iteritems():
param = key2param(arg)
self.argmap[param] = arg
if desc.get('pattern'):
self.pattern_params[param] = desc['pattern']
if desc.get('enum'):
self.enum_params[param] = desc['enum']
if desc.get('required'):
self.required_params.append(param)
if desc.get('repeated'):
self.repeated_params.append(param)
if desc.get('location') == 'query':
self.query_params.append(param)
if desc.get('location') == 'path':
self.path_params.add(param)
self.param_types[param] = desc.get('type', 'string')
# TODO(dhermes): Determine if this is still necessary. Discovery based APIs
# should have all path parameters already marked with
# 'location: path'.
for match in URITEMPLATE.finditer(method_desc['path']):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
self.path_params.add(name)
if name in self.query_params:
self.query_params.remove(name)
def createMethod(methodName, methodDesc, rootDesc, schema):
"""Creates a method for attaching to a Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
methodName = fix_method_name(methodName)
(pathUrl, httpMethod, methodId, accept,
maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)
parameters = ResourceMethodParameters(methodDesc)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in parameters.argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in parameters.required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in parameters.pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in parameters.enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in parameters.repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = parameters.param_types.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in parameters.repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in parameters.query_params:
actual_query_params[parameters.argmap[key]] = cast_value
if key in parameters.path_params:
actual_path_params[parameters.argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename,
mimetype=media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(parameters.argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.extend(STACK_QUERY_PARAMETERS)
all_args = parameters.argmap.keys()
args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
# Move body to the front of the line.
if 'body' in all_args:
args_ordered.append('body')
for name in all_args:
if name not in args_ordered:
args_ordered.append(name)
for arg in args_ordered:
if arg in skip_parameters:
continue
repeated = ''
if arg in parameters.repeated_params:
repeated = ' (repeated)'
required = ''
if arg in parameters.required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
return (methodName, method)
def createNextMethod(methodName):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
methodName: string, name of the method to use.
"""
methodName = fix_method_name(methodName)
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page. (required)
previous_response: The response from the request for the previous page. (required)
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
return (methodName, methodNext)
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: googleapiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
googleapiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
"""
self._dynamic_attrs = []
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
self._resourceDesc = resourceDesc
self._rootDesc = rootDesc
self._schema = schema
self._set_service_methods()
def _set_dynamic_attr(self, attr_name, value):
"""Sets an instance attribute and tracks it in a list of dynamic attributes.
Args:
attr_name: string; The name of the attribute to be set
value: The value being set on the object and tracked in the dynamic cache.
"""
self._dynamic_attrs.append(attr_name)
self.__dict__[attr_name] = value
def __getstate__(self):
"""Trim the state down to something that can be pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
state_dict = copy.copy(self.__dict__)
for dynamic_attr in self._dynamic_attrs:
del state_dict[dynamic_attr]
del state_dict['_dynamic_attrs']
return state_dict
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
Uses the fact that the instance variable _dynamic_attrs holds attrs that
will be wiped and restored on pickle serialization.
"""
self.__dict__.update(state)
self._dynamic_attrs = []
self._set_service_methods()
def _set_service_methods(self):
self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
self._add_next_methods(self._resourceDesc, self._schema)
def _add_basic_methods(self, resourceDesc, rootDesc, schema):
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
fixedMethodName, method = createMethod(
methodName, methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
fixedMethodName, method = createMethod(
methodName + '_media', methodDesc, rootDesc, schema)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_nested_resources(self, resourceDesc, rootDesc, schema):
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(methodName, methodDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return Resource(http=self._http, baseUrl=self._baseUrl,
model=self._model, developerKey=self._developerKey,
requestBuilder=self._requestBuilder,
resourceDesc=methodDesc, rootDesc=rootDesc,
schema=schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
return (methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
fixedMethodName, method = createResourceMethod(methodName, methodDesc)
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
def _add_next_methods(self, resourceDesc, schema):
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
fixedMethodName, method = createNextMethod(methodName + '_next')
self._set_dynamic_attr(fixedMethodName,
method.__get__(self, self.__class__))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import TransactionStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from synapse.api.urls import FEDERATION_PREFIX
from synapse.federation.transport.server import TransportLayerServer
from synapse.crypto import context_factory
from synapse import events
from twisted.internet import reactor, defer
from twisted.web.resource import Resource
from daemonize import Daemonize
import sys
import logging
import gc
logger = logging.getLogger("synapse.app.federation_reader")
class FederationReaderSlavedStore(
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
TransactionStore,
BaseSlavedStore,
):
pass
class FederationReaderServer(HomeServer):
def get_db_conn(self, run_new_connection=True):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
if run_new_connection:
self.database_engine.on_new_connection(db_conn)
return db_conn
def setup(self):
logger.info("Setting up.")
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "federation":
resources.update({
FEDERATION_PREFIX: TransportLayerServer(self),
})
root_resource = create_resource_tree(resources, Resource())
for address in bind_addresses:
reactor.listenTCP(
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
interface=address
)
logger.info("Synapse federation reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
bind_addresses = listener["bind_addresses"]
for address in bind_addresses:
reactor.listenTCP(
listener["port"],
manhole(
username="matrix",
password="rabbithole",
globals={"hs": self},
),
interface=address
)
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@defer.inlineCallbacks
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
replication_url = self.config.worker_replication_url
while True:
try:
args = store.stream_positions()
args["timeout"] = 30000
result = yield http_client.get_json(replication_url, args=args)
yield store.process_replication(result)
except:
logger.exception("Error replicating from %r", replication_url)
yield sleep(5)
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse federation reader", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.federation_reader"
setup_logging(config.worker_log_config, config.worker_log_file)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
ss = FederationReaderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.get_handlers()
ss.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
ss.replicate()
reactor.callWhenRunning(start)
if config.worker_daemonize:
daemon = Daemonize(
app="synapse-federation-reader",
pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
if __name__ == '__main__':
with LoggingContext("main"):
start(sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Cryptodome.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCryptodome hash modules"""
import sys
import unittest
import binascii
import Cryptodome.Hash
from Cryptodome.Util.py3compat import b, tobytes
from Cryptodome.Util.strxor import strxor_c
class HashDigestSizeSelfTest(unittest.TestCase):
def __init__(self, hashmod, description, expected):
unittest.TestCase.__init__(self)
self.hashmod = hashmod
self.expected = expected
self.description = description
def shortDescription(self):
return self.description
def runTest(self):
self.failUnless(hasattr(self.hashmod, "digest_size"))
self.assertEquals(self.hashmod.digest_size, self.expected)
h = self.hashmod.new()
self.failUnless(hasattr(h, "digest_size"))
self.assertEquals(h.digest_size, self.expected)
class HashSelfTest(unittest.TestCase):
def __init__(self, hashmod, description, expected, input):
unittest.TestCase.__init__(self)
self.hashmod = hashmod
self.expected = expected
self.input = input
self.description = description
def shortDescription(self):
return self.description
def runTest(self):
h = self.hashmod.new()
h.update(self.input)
out1 = binascii.b2a_hex(h.digest())
out2 = h.hexdigest()
h = self.hashmod.new(self.input)
out3 = h.hexdigest()
out4 = binascii.b2a_hex(h.digest())
# PY3K: hexdigest() should return str(), and digest() bytes
self.assertEqual(self.expected, out1) # h = .new(); h.update(data); h.digest()
if sys.version_info[0] == 2:
self.assertEqual(self.expected, out2) # h = .new(); h.update(data); h.hexdigest()
self.assertEqual(self.expected, out3) # h = .new(data); h.hexdigest()
else:
self.assertEqual(self.expected.decode(), out2) # h = .new(); h.update(data); h.hexdigest()
self.assertEqual(self.expected.decode(), out3) # h = .new(data); h.hexdigest()
self.assertEqual(self.expected, out4) # h = .new(data); h.digest()
# Verify that the .new() method produces a fresh hash object, except
# for MD5 and SHA1, which are hashlib objects. (But test any .new()
# method that does exist.)
if self.hashmod.__name__ not in ('Cryptodome.Hash.MD5', 'Cryptodome.Hash.SHA1') or hasattr(h, 'new'):
h2 = h.new()
h2.update(self.input)
out5 = binascii.b2a_hex(h2.digest())
self.assertEqual(self.expected, out5)
class HashTestOID(unittest.TestCase):
def __init__(self, hashmod, oid):
unittest.TestCase.__init__(self)
self.hashmod = hashmod
self.oid = oid
def runTest(self):
h = self.hashmod.new()
self.assertEqual(h.oid, self.oid)
class HashDocStringTest(unittest.TestCase):
def __init__(self, hashmod):
unittest.TestCase.__init__(self)
self.hashmod = hashmod
def runTest(self):
docstring = self.hashmod.__doc__
self.assert_(hasattr(self.hashmod, '__doc__'))
self.assert_(isinstance(self.hashmod.__doc__, str))
class GenericHashConstructorTest(unittest.TestCase):
def __init__(self, hashmod):
unittest.TestCase.__init__(self)
self.hashmod = hashmod
def runTest(self):
obj1 = self.hashmod.new("foo")
obj2 = self.hashmod.new()
obj3 = Cryptodome.Hash.new(obj1.name, "foo")
obj4 = Cryptodome.Hash.new(obj1.name)
obj5 = Cryptodome.Hash.new(obj1, "foo")
obj6 = Cryptodome.Hash.new(obj1)
self.assert_(isinstance(self.hashmod, obj1))
self.assert_(isinstance(self.hashmod, obj2))
self.assert_(isinstance(self.hashmod, obj3))
self.assert_(isinstance(self.hashmod, obj4))
self.assert_(isinstance(self.hashmod, obj5))
self.assert_(isinstance(self.hashmod, obj6))
class MACSelfTest(unittest.TestCase):
def __init__(self, module, description, result, input, key, params):
unittest.TestCase.__init__(self)
self.module = module
self.result = result
self.input = input
self.key = key
self.params = params
self.description = description
def shortDescription(self):
return self.description
def runTest(self):
key = binascii.a2b_hex(b(self.key))
data = binascii.a2b_hex(b(self.input))
# Strip whitespace from the expected string (which should be in lowercase-hex)
expected = b("".join(self.result.split()))
h = self.module.new(key, **self.params)
h.update(data)
out1_bin = h.digest()
out1 = binascii.b2a_hex(h.digest())
out2 = h.hexdigest()
# Verify that correct MAC does not raise any exception
h.hexverify(out1)
h.verify(out1_bin)
# Verify that incorrect MAC does raise ValueError exception
wrong_mac = strxor_c(out1_bin, 255)
self.assertRaises(ValueError, h.verify, wrong_mac)
self.assertRaises(ValueError, h.hexverify, "4556")
h = self.module.new(key, data, **self.params)
out3 = h.hexdigest()
out4 = binascii.b2a_hex(h.digest())
# Test .copy()
h2 = h.copy()
h.update(b("blah blah blah")) # Corrupt the original hash object
out5 = binascii.b2a_hex(h2.digest()) # The copied hash object should return the correct result
# PY3K: Check that hexdigest() returns str and digest() returns bytes
if sys.version_info[0] > 2:
self.assertTrue(isinstance(h.digest(), type(b(""))))
self.assertTrue(isinstance(h.hexdigest(), type("")))
# PY3K: Check that .hexverify() accepts bytes or str
if sys.version_info[0] > 2:
h.hexverify(h.hexdigest())
h.hexverify(h.hexdigest().encode('ascii'))
# PY3K: hexdigest() should return str, and digest() should return bytes
self.assertEqual(expected, out1)
if sys.version_info[0] == 2:
self.assertEqual(expected, out2)
self.assertEqual(expected, out3)
else:
self.assertEqual(expected.decode(), out2)
self.assertEqual(expected.decode(), out3)
self.assertEqual(expected, out4)
self.assertEqual(expected, out5)
def make_hash_tests(module, module_name, test_data, digest_size, oid=None):
tests = []
for i in range(len(test_data)):
row = test_data[i]
(expected, input) = map(tobytes,row[0:2])
if len(row) < 3:
description = repr(input)
else:
description = row[2]
name = "%s #%d: %s" % (module_name, i+1, description)
tests.append(HashSelfTest(module, name, expected, input))
name = "%s #%d: digest_size" % (module_name, i+1)
tests.append(HashDigestSizeSelfTest(module, name, digest_size))
if oid is not None:
tests.append(HashTestOID(module, oid))
tests.append(HashDocStringTest(module))
if getattr(module, 'name', None) is not None:
tests.append(GenericHashConstructorTest(module))
return tests
def make_mac_tests(module, module_name, test_data):
tests = []
for i in range(len(test_data)):
row = test_data[i]
(key, data, results, description, params) = row
name = "%s #%d: %s" % (module_name, i+1, description)
tests.append(MACSelfTest(module, name, results, data, key, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_log import log as logging
import six
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest import clients
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
from tempest.services.identity.v2.json import identity_client as v2_identity
CONF = config.CONF
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class CredsClient(object):
"""This class is a wrapper around the identity clients, to provide a
single interface for managing credentials in both v2 and v3 cases.
It's not bound to created credentials, only to a specific set of admin
credentials used for generating credentials.
"""
def __init__(self, identity_client):
# The client implies version and credentials
self.identity_client = identity_client
self.credentials = self.identity_client.auth_provider.credentials
def create_user(self, username, password, project, email):
user = self.identity_client.create_user(
username, password, project['id'], email)
return user
@abc.abstractmethod
def create_project(self, name, description):
pass
def assign_user_role(self, user, project, role_name):
try:
roles = self._list_roles()
role = next(r for r in roles if r['name'] == role_name)
except StopIteration:
msg = 'No "%s" role found' % role_name
raise lib_exc.NotFound(msg)
try:
self.identity_client.assign_user_role(project['id'], user['id'],
role['id'])
except lib_exc.Conflict:
LOG.debug("Role %s already assigned on project %s for user %s" % (
role['id'], project['id'], user['id']))
@abc.abstractmethod
def get_credentials(self, user, project, password):
pass
def delete_user(self, user_id):
self.identity_client.delete_user(user_id)
def _list_roles(self):
roles = self.identity_client.list_roles()
return roles
class V2CredsClient(CredsClient):
def create_project(self, name, description):
tenant = self.identity_client.create_tenant(
name=name, description=description)
return tenant
def get_credentials(self, user, project, password):
return cred_provider.get_credentials(
identity_version='v2',
username=user['name'], user_id=user['id'],
tenant_name=project['name'], tenant_id=project['id'],
password=password)
def delete_project(self, project_id):
self.identity_client.delete_tenant(project_id)
class V3CredsClient(CredsClient):
def __init__(self, identity_client, domain_name):
super(V3CredsClient, self).__init__(identity_client)
try:
# Domain names must be unique, in any case a list is returned,
# selecting the first (and only) element
self.creds_domain = self.identity_client.list_domains(
params={'name': domain_name})[0]
except lib_exc.NotFound:
# TODO(andrea) we could probably create the domain on the fly
msg = "Configured domain %s could not be found" % domain_name
raise exceptions.InvalidConfiguration(msg)
def create_project(self, name, description):
project = self.identity_client.create_project(
name=name, description=description,
domain_id=self.creds_domain['id'])
return project
def get_credentials(self, user, project, password):
return cred_provider.get_credentials(
identity_version='v3',
username=user['name'], user_id=user['id'],
project_name=project['name'], project_id=project['id'],
password=password,
project_domain_name=self.creds_domain['name'])
def delete_project(self, project_id):
self.identity_client.delete_project(project_id)
def get_creds_client(identity_client, project_domain_name=None):
if isinstance(identity_client, v2_identity.IdentityClientJSON):
return V2CredsClient(identity_client)
else:
return V3CredsClient(identity_client, project_domain_name)
class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, identity_version=None, name=None, password='pass',
network_resources=None):
super(IsolatedCreds, self).__init__(identity_version, name, password,
network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.ports = []
self.password = password
self.default_admin_creds = cred_provider.get_configured_credentials(
'identity_admin', fill_in=True,
identity_version=self.identity_version)
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
if self.identity_version == 'v3':
self.creds_domain_name = (
CONF.auth.tenant_isolation_domain_name or
self.default_admin_creds.project_domain_name)
self.creds_client = get_creds_client(
self.identity_admin_client, self.creds_domain_name)
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
order):
identity
network
"""
os = clients.Manager(self.default_admin_creds)
if self.identity_version == 'v2':
return os.identity_client, os.network_client
else:
return os.identity_v3_client, os.network_client
def _create_creds(self, suffix="", admin=False, roles=None):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
project_name = data_utils.rand_name(root) + suffix
project_desc = project_name + "-desc"
project = self.creds_client.create_project(
name=project_name, description=project_desc)
username = data_utils.rand_name(root) + suffix
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self.creds_client.create_user(
username, self.password, project, email)
if admin:
self.creds_client.assign_user_role(user, project,
CONF.identity.admin_role)
# Add roles specified in config file
for conf_role in CONF.auth.tempest_roles:
self.creds_client.assign_user_role(user, project, conf_role)
# Add roles requested by caller
if roles:
for role in roles:
self.creds_client.assign_user_role(user, project, role)
creds = self.creds_client.get_credentials(user, project, self.password)
return cred_provider.TestResources(creds)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'], network['name'])
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
resp_body = self.network_admin_client.create_network(
name=name, tenant_id=tenant_id)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.network_resources:
resp_body = self.network_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp_body = self.network_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
break
except lib_exc.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise Exception(message)
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
resp_body = self.network_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
self.network_admin_client.add_router_interface_with_subnet_id(
router_id, subnet_id)
def get_credentials(self, credential_type):
if self.isolated_creds.get(str(credential_type)):
credentials = self.isolated_creds[str(credential_type)]
else:
if credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
self.isolated_creds[str(credential_type)] = credentials
# Maintained until tests are ported
LOG.info("Acquired isolated creds:\n credentials: %s"
% credentials)
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
credentials.set_resources(network=network, subnet=subnet,
router=router)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
# the created credentials set in the isolated_creds dict.
exist_creds = self.isolated_creds.get(str(roles))
# If force_new flag is True 2 cred sets with the same roles are needed
# handle this by creating a separate index for old one to store it
# separately for cleanup
if exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self.isolated_creds))
self.isolated_creds[new_index] = exist_creds
del self.isolated_creds[str(roles)]
return self.get_credentials(roles)
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
try:
net_client.delete_router(router_id)
except lib_exc.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
try:
net_client.delete_subnet(subnet_id)
except lib_exc.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
def _cleanup_default_secgroup(self, tenant):
net_client = self.network_admin_client
resp_body = net_client.list_security_groups(tenant_id=tenant,
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
net_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
LOG.warn('Security group %s, id %s not found for clean-up' %
(secgroup['name'], secgroup['id']))
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_creds:
creds = self.isolated_creds.get(cred)
if (not creds or not any([creds.router, creds.network,
creds.subnet])):
continue
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': creds.network, 'subnet': creds.subnet,
'router': creds.router})
if (not self.network_resources or
(self.network_resources.get('router') and creds.subnet)):
try:
net_client.remove_router_interface_with_subnet_id(
creds.router['id'], creds.subnet['id'])
except lib_exc.NotFound:
LOG.warn('router with name: %s not found for delete' %
creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(creds.subnet['id'],
creds.subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(creds.network['id'],
creds.network['name'])
def clear_isolated_creds(self):
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
for creds in self.isolated_creds.itervalues():
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
LOG.warn("user with name: %s not found for delete" %
creds.username)
try:
if CONF.service_available.neutron:
self._cleanup_default_secgroup(creds.tenant_id)
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
LOG.warn("tenant with name: %s not found for delete" %
creds.tenant_name)
self.isolated_creds = {}
def is_multi_user(self):
return True
def is_multi_tenant(self):
return True
def is_role_available(self, role):
return True
|
|
# -*- coding: utf-8 -*-
import copy
from functools import wraps
import json
import sys
from cms.utils.compat import DJANGO_1_7
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import get_deleted_objects
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError
from django.db import router, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.template.defaultfilters import escape
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (PageForm, AdvancedSettingsForm, PagePermissionForm,
PublicationDatesForm)
from cms.admin.permissionadmin import (PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_PENDING
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation, current_site
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from reversion.admin import VersionAdmin as ModelAdmin
from reversion import create_revision
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
INITIAL_COMMENT = "Initial version."
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/descendants/$', self.descendants), # menu html for page descendants
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
]
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
pk = obj.pk
if obj.parent_id:
try:
parent = Page.objects.get(pk=obj.parent_id)
except Page.DoesNotExist:
parent = None
else:
parent = None
obj.pk = None
obj.path = None
obj.numchild = 0
obj.depth = 0
if parent:
saved_obj = parent.add_child(instance=obj)
else:
saved_obj = obj.add_root(instance=obj)
tmp_pk = saved_obj.pk
saved_obj.pk = pk
Page.objects.get(pk=tmp_pk).delete()
saved_obj.save(no_signals=True)
else:
if 'history' in request.path_info:
old_obj = Page.objects.get(pk=obj.pk)
obj.depth = old_obj.depth
obj.parent_id = old_obj.parent_id
obj.path = old_obj.path
obj.numchild = old_obj.numchild
new = False
if not obj.pk:
new = True
obj.save()
if 'recover' in request.path_info or 'history' in request.path_info:
revert_plugins(request, obj.version.pk, obj)
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
if position == 'last-child' or position == 'first-child':
obj.parent_id = target.pk
else:
obj.parent_id = target.parent_id
obj.save()
obj = obj.move(target, pos=position)
page_type_id = form.cleaned_data.get('page_type')
copy_target_id = request.GET.get('copy_target')
if copy_target_id or page_type_id:
if page_type_id:
copy_target_id = page_type_id
copy_target = Page.objects.get(pk=copy_target_id)
if not copy_target.has_view_permission(request):
raise PermissionDenied()
obj = Page.objects.get(pk=obj.pk) #mptt reload
copy_target._copy_attributes(obj, clean=True)
obj.save()
for lang in copy_target.languages.split(','):
copy_target._copy_contents(obj, lang)
if not 'permission' in request.path_info:
language = form.cleaned_data['language']
Title.objects.set_or_create(
request,
obj,
form,
language,
)
# is it home? publish it right away
if new and Page.objects.filter(site_id=obj.site_id).count() == 1:
obj.publish(language)
def get_fieldsets(self, request, obj=None):
form = self.get_form(request, obj, fields=None)
if getattr(form, 'fieldsets', None) is None:
fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
else:
return form.fieldsets
def get_inline_classes(self, request, obj=None, **kwargs):
if obj and 'permission' in request.path_info:
return PERMISSION_ADMIN_INLINES
return []
def get_form_class(self, request, obj=None, **kwargs):
if 'advanced' in request.path_info:
return AdvancedSettingsForm
elif 'permission' in request.path_info:
return PagePermissionForm
elif 'dates' in request.path_info:
return PublicationDatesForm
return self.form
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
form_cls = self.get_form_class(request, obj)
form = super(PageAdmin, self).get_form(request, obj, form=form_cls, **kwargs)
# get_form method operates by overriding initial fields value which
# may persist across invocation. Code below deepcopies fields definition
# to avoid leaks
for field in form.base_fields.keys():
form.base_fields[field] = copy.deepcopy(form.base_fields[field])
if 'language' in form.base_fields:
form.base_fields['language'].initial = language
if 'page_type' in form.base_fields:
if 'copy_target' in request.GET or 'add_page_type' in request.GET or obj:
del form.base_fields['page_type']
elif not Title.objects.filter(page__parent__reverse_id=PAGE_TYPES_ID, language=language).exists():
del form.base_fields['page_type']
if 'add_page_type' in request.GET:
del form.base_fields['menu_title']
del form.base_fields['meta_description']
del form.base_fields['page_title']
self.inlines = self.get_inline_classes(request, obj, **kwargs)
if obj:
if 'history' in request.path_info or 'recover' in request.path_info:
version_id = request.path_info.split('/')[-2]
else:
version_id = None
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
if 'site' in form.base_fields and form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ('slug', 'title', 'meta_description', 'menu_title', 'page_title', 'redirect'):
if name in form.base_fields:
form.base_fields[name].initial = getattr(title_obj, name)
if 'overwrite_url' in form.base_fields:
if title_obj.has_url_overwrite:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ''
else:
for name in ('slug', 'title'):
form.base_fields[name].initial = u''
if 'target' in request.GET or 'copy_target' in request.GET:
target = request.GET.get('copy_target') or request.GET.get('target')
if 'position' in request.GET:
position = request.GET['position']
if position == 'last-child' or position == 'first-child':
form.base_fields['parent'].initial = request.GET.get('target', None)
else:
sibling = Page.objects.get(pk=target)
form.base_fields['parent'].initial = sibling.parent_id
else:
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
return form
def advanced(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_advanced_settings_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'advanced_settings': True, 'title': _("Advanced Settings")})
def dates(self, request, object_id):
return self.change_view(request, object_id, extra_context={'publishing_dates': True, 'title': _("Publishing dates")})
def permissions(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permissions_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'show_permissions': True, 'title': _("Change Permissions")})
def get_inline_instances(self, request, obj=None):
inlines = super(PageAdmin, self).get_inline_instances(request, obj)
if get_cms_setting('PERMISSION') and obj:
filtered_inlines = []
for inline in inlines:
if (isinstance(inline, PagePermissionInlineAdmin)
and not isinstance(inline, ViewRestrictionInlineAdmin)):
if "recover" in request.path or "history" in request.path:
# do not display permissions in recover mode
continue
if not obj.has_change_permissions_permission(request):
continue
filtered_inlines.append(inline)
inlines = filtered_inlines
return inlines
def get_unihandecode_context(self, language):
if language[:2] in get_cms_setting('UNIHANDECODE_DECODERS'):
uhd_lang = language[:2]
else:
uhd_lang = get_cms_setting('UNIHANDECODE_DEFAULT_DECODER')
uhd_host = get_cms_setting('UNIHANDECODE_HOST')
uhd_version = get_cms_setting('UNIHANDECODE_VERSION')
if uhd_lang and uhd_host and uhd_version:
uhd_urls = [
'%sunihandecode-%s.core.min.js' % (uhd_host, uhd_version),
'%sunihandecode-%s.%s.min.js' % (uhd_host, uhd_version, uhd_lang),
]
else:
uhd_urls = []
return {'unihandecode_lang': uhd_lang, 'unihandecode_urls': uhd_urls}
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
if not request.GET.get('add_page_type') is None:
extra_context.update({
'add_page_type': True,
'title': _("Add Page Type"),
})
elif 'copy_target' in request.GET:
extra_context.update({
'title': _("Add Page Copy"),
})
else:
extra_context = self.update_language_tab_context(request, context=extra_context)
extra_context.update(self.get_unihandecode_context(language))
return super(PageAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
The 'change' admin view for the Page model.
"""
if extra_context is None:
extra_context = {'basic_info': True}
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
#activate(user_lang_set)
context = {
'page': obj,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'ADMIN_MEDIA_URL': settings.STATIC_URL,
'can_change': obj.has_change_permission(request),
'can_change_permissions': obj.has_change_permissions_permission(request),
'current_site_id': settings.SITE_ID,
}
context.update(extra_context or {})
extra_context = self.update_language_tab_context(request, obj, context)
tab_language = get_language_from_request(request)
extra_context.update(self.get_unihandecode_context(tab_language))
response = super(PageAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path_info:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [lang[0] for lang in self._get_site_languages(obj)]
context.update({
'filled_languages': [lang for lang in filled_languages if lang in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj=None):
site_id = None
if obj:
site_id = obj.site_id
else:
site_id = Site.objects.get_current().pk
return get_language_tuple(site_id)
def update_language_tab_context(self, request, obj=None, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
# Dates are not language dependent, thus we hide the language
# selection bar: the language is forced through the form class
'show_language_tabs': len(list(languages)) > 1 and not context.get('publishing_dates', False),
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if get_cms_setting('PERMISSION'):
return permissions.has_page_add_permission(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if get_cms_setting('PERMISSION'):
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if get_cms_setting('PERMISSION') and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not is_installed('reversion'):
return False
user = request.user
if user.is_superuser:
return True
try:
if has_global_page_permission(request, can_recover_page=True):
return True
except:
pass
return False
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
page = placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
source_page = source_placeholder.page
if source_page and not source_page.has_change_permission(request):
return False
target_page = target_placeholder.page
if target_page and not target_page.has_change_permission(request):
return False
if target_page and not target_page.publisher_is_draft:
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
page = plugin.placeholder.page if plugin.placeholder else None
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
page = plugin.placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
page = plugin.placeholder.page
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
page = placeholder.page if placeholder else None
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
if is_installed('reversion') and placeholder.page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(u"%(plugin_name)s plugin added to %(placeholder)s") % {
'plugin_name': plugin_name, 'placeholder': placeholder}
self.cleanup_history(placeholder.page)
helpers.make_revision_with_plugins(placeholder.page, request.user, message)
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
page = target_placeholder.page
if page and is_installed('reversion'):
message = _(u"Copied plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_edit_plugin(self, request, plugin):
page = plugin.placeholder.page
if page:
# if reversion is installed, save version of the page plugins
if is_installed('reversion') and page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(
u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder.slot
}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
page = target_placeholder.page
if page and is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _(u"Plugins were moved"))
def post_delete_plugin(self, request, plugin):
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
page = plugin.placeholder.page
if page:
page.save()
comment = _("%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def post_clear_placeholder(self, request, placeholder):
page = placeholder.page
if page:
page.save()
comment = _('All plugins in the placeholder "%(name)s" were deleted.') % {
'name': force_text(placeholder)
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def get_placeholder_template(self, request, placeholder):
page = placeholder.page
if page:
return page.get_template()
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
return HttpResponseForbidden(force_text(_("You do not have permission to change pages.")))
try:
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render(request, 'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path_info + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = current_site(request).pk
site_id = int(site_id)
# languages
languages = get_language_list(site_id)
# parse the cookie that saves which page trees have
# been opened already and extracts the page ID
djangocms_nodes_open = request.COOKIES.get('djangocms_nodes_open', '')
raw_nodes = unquote(djangocms_nodes_open).split(',')
try:
open_menu_trees = [int(c.split('page_', 1)[1]) for c in raw_nodes]
except IndexError:
open_menu_trees = []
# Language may be present in the GET dictionary but empty
language = request.GET.get('language', get_language())
if not language:
language = get_language()
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts': opts,
'has_add_permission': self.has_add_permission(request),
'root_path': admin_reverse('index'),
'app_label': app_label,
'preview_language': language,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'DEBUG': settings.DEBUG,
'site_languages': languages,
'open_menu_trees': open_menu_trees,
}
if is_installed('reversion'):
context['has_recover_permission'] = self.has_recover_permission(request)
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
response = super(PageAdmin, self).revision_view(request, object_id, version_id, extra_context)
return response
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@require_POST
def undo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(Page, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.undo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
def redo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
page = get_object_or_404(Page, pk=object_id)
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
try:
reverted, clean = page.redo()
if not clean:
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
except IndexError as e:
return HttpResponseBadRequest(e.message)
return HttpResponse("ok")
@require_POST
@create_revision()
def change_template(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change the template")))
to_template = request.POST.get("template", None)
if to_template not in dict(get_cms_setting('TEMPLATES')):
return HttpResponseBadRequest(force_text(_("Template not valid")))
page.template = to_template
page.save()
if is_installed('reversion'):
message = _("Template changed to %s") % dict(get_cms_setting('TEMPLATES'))[to_template]
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_text(_("The template was successfully changed")))
@require_POST
@transaction.atomic
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
if target is None or position is None:
return HttpResponseRedirect('../../')
try:
page = self.model.objects.get(pk=page_id)
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("error"))
# does he haves permissions to do this...?
if not page.has_move_page_permission(request) or \
not target.has_add_permission(request):
return jsonify_request(
HttpResponseForbidden(force_text(_("Error! You don't have permissions to move this page. Please reload the page"))))
# move page
page.move_page(target, position)
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _("Page moved"))
return jsonify_request(HttpResponse(admin_utils.render_admin_menu_item(request, page).content))
def get_permissions(self, request, page_id):
page = get_object_or_404(Page, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render(request, 'admin/cms/page/permissions.html', context)
@require_POST
@transaction.atomic
def copy_language(self, request, page_id):
with create_revision():
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
page = Page.objects.get(pk=page_id)
placeholders = page.get_placeholders()
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
for placeholder in placeholders:
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
if not self.has_copy_plugin_permission(request, placeholder, placeholder, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
if page and is_installed('reversion'):
message = _(u"Copied plugins from %(source_language)s to %(target_language)s") % {
'source_language': source_language, 'target_language': target_language}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
@require_POST
@transaction.atomic
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested target, at the given position
"""
context = {}
page = Page.objects.get(pk=page_id)
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site = request.POST.get('site', None)
if target is not None and position is not None and site is not None:
try:
target = self.model.objects.get(pk=target)
# does he have permissions to copy this page under target?
assert target.has_add_permission(request)
site = Site.objects.get(pk=site)
except (ObjectDoesNotExist, AssertionError):
return HttpResponse("error")
#context.update({'error': _('Page could not been moved.')})
else:
try:
kwargs = {
'copy_permissions': request.REQUEST.get('copy_permissions', False),
}
page.copy_page(target, site, position, **kwargs)
return jsonify_request(HttpResponse("ok"))
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
context.update(extra_context or {})
return HttpResponseRedirect('../../')
@require_POST
@transaction.atomic
@create_revision()
def publish_page(self, request, page_id, language):
try:
page = Page.objects.get(id=page_id, publisher_is_draft=True)
except Page.DoesNotExist:
page = None
# ensure user has permissions to publish this page
all_published = True
if page:
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to publish this page")))
published = page.publish(language)
if not published:
all_published = False
statics = request.GET.get('statics', '')
if not statics and not page:
return Http404("No page or stack found for publishing.")
if statics:
static_ids = statics .split(',')
for pk in static_ids:
static_placeholder = StaticPlaceholder.objects.get(pk=pk)
published = static_placeholder.publish(request, language)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if is_installed('reversion') and page:
self.cleanup_history(page, publish=True)
helpers.make_revision_with_plugins(page, request.user, PUBLISH_COMMENT)
# create a new publish reversion
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
def cleanup_history(self, page, publish=False):
if is_installed('reversion') and page:
# delete revisions that are not publish revisions
from reversion.models import Version
content_type = ContentType.objects.get_for_model(Page)
# reversion 1.8+ removes type field, revision filtering must be based on comments
versions_qs = Version.objects.filter(content_type=content_type, object_id_int=page.pk)
history_limit = get_cms_setting("MAX_PAGE_HISTORY_REVERSIONS")
deleted = []
for version in versions_qs.exclude(revision__comment__in=(INITIAL_COMMENT, PUBLISH_COMMENT)).order_by(
'-revision__pk')[history_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
# delete all publish revisions that are more then MAX_PAGE_PUBLISH_REVERSIONS
publish_limit = get_cms_setting("MAX_PAGE_PUBLISH_REVERSIONS")
if publish_limit and publish:
deleted = []
for version in versions_qs.filter(revision__comment__exact=PUBLISH_COMMENT).order_by(
'-revision__pk')[publish_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
@require_POST
@transaction.atomic
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(Page, pk=page_id)
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_text(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
@require_POST
@transaction.atomic
def revert_page(self, request, page_id, language):
page = get_object_or_404(Page, id=page_id)
# ensure user has permissions to publish this page
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
page.revert(language)
messages.info(request, _('The page "%s" was successfully reverted.') % page)
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
referer = request.META.get('HTTP_REFERER', '')
path = '../../'
if admin_reverse('index') not in referer:
path = '%s?%s' % (referer.split('?')[0], get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@create_revision()
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
if DJANGO_1_7:
deleted_objects, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:2]
to_delete_plugins, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:2]
else:
deleted_objects, __, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:3]
to_delete_plugins, __, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:3]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_text(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.info(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if is_installed('reversion'):
self.cleanup_history(obj)
helpers.make_revision_with_plugins(obj, request.user, message)
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_text(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
request.current_app = self.admin_site.name
return render(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context)
def preview_page(self, request, object_id, language):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(Page, id=object_id)
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
@require_POST
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(Page, pk=page_id)
if page.has_change_permission(request):
page.toggle_in_navigation()
language = request.GET.get('language') or get_language_from_request(request)
return admin_utils.render_admin_menu_item(request, page, language=language)
return HttpResponseForbidden(force_text(_("You do not have permission to change this page's in_navigation status")))
def descendants(self, request, page_id, language):
"""
Get html for descendants of given page
Used for lazy loading pages in cms.changelist.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page = get_object_or_404(Page, pk=page_id)
return admin_utils.render_admin_menu_item(request, page,
template="admin/cms/page/tree/lazy_menu.html", language=language)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = Page.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
return HttpResponse('/', content_type='text/plain')
obj = False
url = False
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
# This is a test if the object url can be retrieved
# In case it can't, object it's not taken into account
try:
force_text(obj.get_absolute_url())
except:
obj = None
else:
obj = None
if not obj:
pk = request.REQUEST.get('pk')
full_model = request.REQUEST.get('model')
if pk and full_model:
app_label, model = full_model.split('.')
if pk and app_label:
ctype = ContentType.objects.get(app_label=app_label, model=model)
try:
obj = ctype.get_object_for_this_type(pk=pk)
except ctype.model_class().DoesNotExist:
obj = None
try:
force_text(obj.get_absolute_url())
except:
obj = None
if obj:
if not request.toolbar or not request.toolbar.edit_mode:
if isinstance(obj, Page):
if obj.get_public_object():
url = obj.get_public_object().get_absolute_url()
else:
url = '%s?%s' % (
obj.get_draft_object().get_absolute_url(),
get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
)
else:
url = obj.get_absolute_url()
else:
url = obj.get_absolute_url()
if url:
return HttpResponse(force_text(url), content_type='text/plain')
return HttpResponse('', content_type='text/plain')
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
def edit_title_fields(self, request, page_id, language):
title = Title.objects.get(page_id=page_id, language=language)
saved_successfully = False
raw_fields = request.GET.get("edit_fields", 'title')
edit_fields = [field for field in raw_fields.split(",") if field in self.title_frontend_editable_fields]
cancel_clicked = request.POST.get("_cancel", False)
opts = Title._meta
if not edit_fields:
# Defaults to title
edit_fields = ('title',)
if not has_generic_permission(title.page.pk, request.user, "change",
title.page.site.pk):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this page")))
class PageTitleForm(django.forms.ModelForm):
"""
Dynamic form showing only the fields to be edited
"""
class Meta:
model = Title
fields = edit_fields
if not cancel_clicked and request.method == 'POST':
form = PageTitleForm(instance=title, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = PageTitleForm(instance=title)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': edit_fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': 'Title',
'plugin': title.page,
'plugin_id': title.page.id,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render(request, 'admin/cms/page/plugin/confirm_form.html', context)
return render(request, 'admin/cms/page/plugin/change_form.html', context)
def get_published_pagelist(self, *args, **kwargs):
"""
This view is used by the PageSmartLinkWidget as the user type to feed the autocomplete drop-down.
"""
request = args[0]
if request.is_ajax():
query_term = request.GET.get('q','').strip('/')
language_code = request.GET.get('language_code', settings.LANGUAGE_CODE)
matching_published_pages = Page.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language_code)
| Q(title_set__path__icontains=query_term, title_set__language=language_code)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language_code)
| Q(title_set__page_title__icontains=query_term, title_set__language=language_code)
).distinct()
results = []
for page in matching_published_pages:
results.append(
{
'path': page.get_path(language=language_code),
'title': page.get_title(language=language_code),
'redirect_url': page.get_absolute_url(language=language_code)
}
)
return HttpResponse(json.dumps(results), content_type='application/json')
else:
return HttpResponseForbidden()
def add_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).add_plugin(*args, **kwargs)
def copy_plugins(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).copy_plugins(*args, **kwargs)
def edit_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).edit_plugin(*args, **kwargs)
def move_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).move_plugin(*args, **kwargs)
def delete_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).delete_plugin(*args, **kwargs)
def clear_placeholder(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).clear_placeholder(*args, **kwargs)
admin.site.register(Page, PageAdmin)
|
|
from basic import Basic, Atom
from core import C
from sympify import sympify, _sympify, SympifyError
from singleton import S, Singleton
from expr import Expr, AtomicExpr
from cache import cacheit
from compatibility import any, all
from function import FunctionClass
from sympy.logic.boolalg import Boolean
import re
class Symbol(AtomicExpr, Boolean):
"""
Assumptions::
commutative = True
You can override the default assumptions in the constructor::
>>> from sympy import symbols
>>> A,B = symbols('A,B', commutative = False)
>>> bool(A*B != B*A)
True
>>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative
True
"""
is_comparable = False
__slots__ = ['is_commutative', 'name']
is_Symbol = True
def __new__(cls, name, commutative=True, **assumptions):
"""Symbols are identified by name and commutativity::
>>> from sympy import Symbol
>>> Symbol("x") == Symbol("x")
True
>>> Symbol("x", real=True) == Symbol("x", real=False)
True
>>> Symbol("x", commutative=True) == Symbol("x", commutative=False)
False
"""
if 'dummy' in assumptions:
import warnings
warnings.warn(
"\nThe syntax Symbol('x', dummy=True) is deprecated and will"
"\nbe dropped in a future version of Sympy. Please use Dummy()"
"\nor symbols(..., cls=Dummy) to create dummy symbols.",
DeprecationWarning)
if assumptions.pop('dummy'):
return Dummy(name, commutative, **assumptions)
return Symbol.__xnew_cached_(cls, name, commutative, **assumptions)
def __new_stage2__(cls, name, commutative=True, **assumptions):
assert isinstance(name, str),`type(name)`
obj = Expr.__new__(cls, **assumptions)
obj.is_commutative = commutative
obj.name = name
return obj
__xnew__ = staticmethod(__new_stage2__) # never cached (e.g. dummy)
__xnew_cached_ = staticmethod(cacheit(__new_stage2__)) # symbols are always cached
def __getnewargs__(self):
return (self.name, self.is_commutative)
def _hashable_content(self):
return (self.is_commutative, self.name)
def as_dummy(self):
return Dummy(self.name, self.is_commutative, **self.assumptions0)
def __call__(self, *args):
from function import Function
return Function(self.name, nargs=len(args))(*args, **self.assumptions0)
def as_real_imag(self, deep=True):
return (C.re(self), C.im(self))
def _eval_expand_complex(self, deep=True, **hints):
re, im = self.as_real_imag()
return re + im*S.ImaginaryUnit
def _sage_(self):
import sage.all as sage
return sage.var(self.name)
@property
def is_number(self):
return False
@property
def free_symbols(self):
return set([self])
class Dummy(Symbol):
"""Dummy symbols are each unique, identified by an internal count index ::
>>> from sympy import Dummy
>>> bool(Dummy("x") == Dummy("x")) == True
False
If a name is not supplied then a string value of the count index will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important. ::
>>> Dummy._count = 0 # /!\ this should generally not be changed; it is being
>>> Dummy() # used here to make sure that the doctest passes.
_0
"""
_count = 0
__slots__ = ['dummy_index']
is_Dummy = True
def __new__(cls, name=None, commutative=True, **assumptions):
if name is None:
name = str(Dummy._count)
obj = Symbol.__xnew__(cls, name, commutative=commutative, **assumptions)
Dummy._count += 1
obj.dummy_index = Dummy._count
return obj
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
class Wild(Symbol):
"""
Wild() matches any expression but another Wild().
"""
__slots__ = ['exclude', 'properties']
is_Wild = True
def __new__(cls, name, exclude=None, properties=None, **assumptions):
if type(exclude) is list:
exclude = tuple(exclude)
if type(properties) is list:
properties = tuple(properties)
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
if exclude is None:
obj.exclude = None
else:
obj.exclude = tuple([sympify(x) for x in exclude])
if properties is None:
obj.properties = None
else:
obj.properties = tuple(properties)
return obj
def _hashable_content(self):
return (self.name, self.exclude, self.properties )
# TODO add check against another Wild
def matches(self, expr, repl_dict={}, evaluate=False):
if self in repl_dict:
if repl_dict[self] == expr:
return repl_dict
else:
return None
if self.exclude:
for x in self.exclude:
if x in expr:
return None
if self.properties:
for f in self.properties:
if not f(expr):
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
def __call__(self, *args, **assumptions):
from sympy.core.function import WildFunction
return WildFunction(self.name, nargs=len(args))(*args, **assumptions)
class Pure(Dummy):
def __new__(cls):
obj = Symbol.__xnew__(cls, 'pure')
obj.dummy_index = 0
return obj
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
_re_var_range = re.compile(r"^(.*?)(\d*):(\d+)$")
_re_var_scope = re.compile(r"^(.):(.)$")
_re_var_split = re.compile(r"\s|,")
def symbols(names, **args):
"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> x = symbols('x')
>>> (x,) = symbols('x,')
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed set ``seq`` argument to ``True``::
>>> symbols('x', seq=True)
(x,)
To cut on typing, range syntax is supported co create indexed symbols::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
To cut on typing even more, lexicographic range syntax is supported::
>>> symbols('x:z')
(x, y, z)
>>> symbols('a:d,x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
All newly created symbols have assumptions set accordingly to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol--like objects of
other type, for example instances of Function or Wild classes. To
achieve this, set ``cls`` keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if isinstance(names, basestring):
names = _re_var_split.split(names)
cls = args.pop('cls', Symbol)
seq = args.pop('seq', False)
for name in names:
if not name:
continue
if ':' not in name:
symbol = cls(name, **args)
result.append(symbol)
continue
match = _re_var_range.match(name)
if match is not None:
name, start, end = match.groups()
if not start:
start = 0
else:
start = int(start)
for i in xrange(start, int(end)):
symbol = cls("%s%i" % (name, i), **args)
result.append(symbol)
seq = True
continue
match = _re_var_scope.match(name)
if match is not None:
start, end = match.groups()
for name in xrange(ord(start), ord(end)+1):
symbol = cls(chr(name), **args)
result.append(symbol)
seq = True
continue
raise ValueError("'%s' is not a valid symbol range specification" % name)
if not seq and len(result) <= 1:
if not result:
return None
elif names[-1]:
return result[0]
return tuple(result)
else:
for name in names:
syms = symbols(name, **args)
if syms is not None:
result.append(syms)
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
>>> from sympy import var
>>> var('x')
x
>>> x
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real
True
See :func:`symbol` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
|
|
import pytest
from datetime import date, time
from chronophore import controller
from chronophore.models import Entry
UNREGISTERED_ID = '000000000'
def test_signed_in_users(db_session, test_users):
"""List all signed in users. Don't list users
who forgot to sign out on previous days."""
sam_id = test_users['sam'].user_id
db_session.add(
Entry(
uuid='781d8a2a-104b-480c-baba-98c55f11e80b',
date=date(2016, 2, 10),
forgot_sign_out=True,
time_in=time(10, 25, 7),
time_out=None,
user_id=sam_id,
user_type='student',
)
)
db_session.commit()
signed_in_users = [
user for user in controller.signed_in_users(db_session, today=date(2016, 2, 17))
]
assert [test_users['pippin'], test_users['merry']] == signed_in_users
assert test_users['sam'] not in signed_in_users
def test_sign_starting(db_session, test_users):
"""Frodo, who is registered, signs in."""
status = controller.sign(
test_users['frodo'].user_id,
user_type='student',
today=date(2016, 2, 17),
session=db_session,
)
assert status.valid
assert status.in_or_out == 'in'
assert status.user_name == 'Frodo Baggins'
assert status.user_type == 'student'
assert (
db_session
.query(Entry)
.filter(Entry.user_id == test_users['frodo'].user_id)
.filter(Entry.time_out.is_(None))
.one()
)
def test_sign_finishing(db_session, test_users):
"""Merry is done. He signs out."""
merry_id = test_users['merry'].user_id
status = controller.sign(merry_id, today=date(2016, 2, 17), session=db_session)
assert status.valid
assert status.in_or_out == 'out'
assert status.user_name == 'Merry Brandybuck'
assert status.user_type == 'tutor'
assert (
db_session
.query(Entry)
.filter(Entry.user_id == merry_id)
.filter(Entry.time_out.is_(None))
.one_or_none()
) is None
def test_sign_not_registered(db_session, test_users):
"""Someone tries to sign in with an unregistered
ID. They are told to register at the front desk.
The entry is not added to the database.
"""
with pytest.raises(controller.UnregisteredUser):
controller.sign(UNREGISTERED_ID, session=db_session)
assert (
db_session
.query(Entry)
.filter(Entry.user_id == UNREGISTERED_ID)
.one_or_none()
) is None
def test_sign_duplicates(db_session, test_users):
"""Somehow, Sam has 2 signed in entries in the
database. When he tries to sign in, a message is
displayed and the duplicate entries are all signed
out.
"""
sam_id = test_users['sam'].user_id
db_session.add_all([
Entry(
uuid='781d8a2a-104b-480c-baba-98c55f11e80b',
date=date(2016, 2, 17),
time_in=time(10, 25, 7),
time_out=None,
user_id=sam_id,
user_type='student',
),
Entry(
uuid='621d98db-92e0-46d1-9cd5-55013777a7d9',
date=date(2016, 2, 17),
time_in=time(13, 55, 00),
time_out=None,
user_id=sam_id,
user_type='student',
),
])
db_session.commit()
status = controller.sign(
sam_id, today=date(2016, 2, 17), session=db_session
)
assert status.valid
assert status.in_or_out == 'out'
assert status.user_name == 'Sam Gamgee'
assert status.user_type == 'student'
assert (
db_session
.query(Entry)
.filter(Entry.user_id == sam_id)
.filter(Entry.time_out.is_(None))
.one_or_none()
) is None
def test_sign_in_previously_forgot_sign_out(db_session, test_users):
"""Gandalf forgot to sign out on a previous day.
When he enters his user id today, he is signed in.
"""
gandalf_id = test_users['gandalf'].user_id
db_session.add(
Entry(
uuid='781d8a2a-104b-480c-baba-98c55f11e80b',
date=date(2016, 2, 10),
forgot_sign_out=True,
time_in=time(10, 25, 7),
time_out=None,
user_id=gandalf_id,
user_type='tutor',
)
)
db_session.commit()
status = controller.sign(gandalf_id, session=db_session)
assert status.valid
assert status.in_or_out == 'in'
assert status.user_name == 'Gandalf the Grey'
assert status.user_type == 'tutor'
assert (
db_session
.query(Entry)
.filter(Entry.forgot_sign_out.is_(False))
.filter(Entry.user_id == test_users['gandalf'].user_id)
.filter(Entry.time_out.is_(None))
.one()
)
def test_flag_forgotten_entries(db_session, test_users):
"""Frodo and sam forgot to sign out yesterday. Their
entries are flagged automatically.
"""
today = date(2016, 2, 17)
yesterday = date(2016, 2, 16)
db_session.add_all([
Entry(
uuid='f0030733-b216-430b-be34-79fa26cbf87d',
date=yesterday,
forgot_sign_out=False,
time_in=time(14, 5, 2),
time_out=None,
user_id=test_users['frodo'].user_id,
user_type='tutor',
),
Entry(
uuid='ffac853d-12ac-4a85-8b6f-7c9793479633',
date=yesterday,
forgot_sign_out=False,
time_in=time(10, 45, 3),
time_out=None,
user_id=test_users['sam'].user_id,
user_type='student',
),
])
db_session.commit()
controller.flag_forgotten_entries(db_session, today)
flagged = db_session.query(Entry).filter(Entry.date == yesterday)
for entry in flagged:
assert entry.time_out is None
assert entry.forgot_sign_out
def test_sign_in_student(test_users):
"""Sam, who is just a student, signs in."""
entry = controller.sign_in(test_users['sam'])
assert entry.user_type == 'student'
def test_sign_in_tutor(test_users):
"""Gandalf, who is just a tutor, signs in."""
entry = controller.sign_in(test_users['gandalf'])
assert entry.user_type == 'tutor'
def test_sign_in_ambiguous(test_users):
"""Frodo, who is both a student and a tutor,
signs in. An AmbiguousUserType exception is raised
to be handled by the gui.
"""
with pytest.raises(controller.AmbiguousUserType):
controller.sign_in(test_users['frodo'])
def test_undo_sign_in(test_users, db_session):
"""Pippin signs in, but then presses 'cancel'.
His entry is deleted.
"""
signed_in_entry = Entry(
uuid='781d8a2a-104b-480c-baba-98c55f11e80b',
date=date(2016, 2, 10),
forgot_sign_out=True,
time_in=time(10, 25, 7),
time_out=None,
user_id=test_users['pippin'].user_id,
user_type='student',
)
db_session.add(signed_in_entry)
controller.undo_sign_in(signed_in_entry, db_session)
deleted = (
db_session
.query(Entry)
.filter(Entry.uuid == signed_in_entry.uuid)
.one_or_none()
)
assert deleted is None
def test_undo_sign_out(test_users, db_session):
"""Pippin signs out, but then presses 'cancel'.
He is signed back in.
"""
signed_out_entry = Entry(
uuid='781d8a2a-104b-480c-baba-98c55f11e80b',
date=date(2016, 2, 10),
forgot_sign_out=True,
time_in=time(10, 25, 7),
time_out=time(11, 30, 10),
user_id=test_users['pippin'].user_id,
user_type='student',
)
db_session.add(signed_out_entry)
controller.undo_sign_out(signed_out_entry, db_session)
signed_back_in = (
db_session
.query(Entry)
.filter(Entry.uuid == signed_out_entry.uuid)
.one()
)
assert signed_back_in.time_out is None
def test_get_user_name(test_users):
"""Look up Sam's name."""
name = controller.get_user_name(test_users['sam'])
assert name == 'Sam Gamgee'
def test_get_user_first_name(test_users):
"""Look up Sam's first name."""
name = controller.get_user_name(test_users['sam'], full_name=False)
assert name == 'Sam'
def test_get_multi_word_user_name(test_users):
"""Get Gandalf's full name, which has more than
just 2 words.
"""
name = controller.get_user_name(test_users['gandalf'])
assert name == 'Gandalf the Grey'
|
|
''' Work of Cameron Palk '''
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
classes = [
'NONE',
'SOUTHERN_US',
'FILIPINO',
'INDIAN',
'ITALIAN',
'MEXICAN',
'CHINESE',
'BRITISH',
'THAI',
'VIETNAMESE',
'CAJUN_CREOLE',
'BRAZILIAN',
'FRENCH',
'JAPANESE',
'JAMAICAN',
'IRISH',
'KOREAN',
'SPANISH',
'MOROCCAN',
'RUSSIAN',
'GREEK'
]
def printAccuracy( c_name, acc ):
print( "\t{:20} Accuracy: {}".format( c_name, toPercent( acc ) ) )
def toPercent( x, decimals=2 ):
return "{}%".format( round( x * 100, decimals ) )
def num_to_str( x ):
return classes[ x ]
def str_to_num( x ):
return classes.index( x )
def getColumnAccuracy( X, Y ):
correct = 0
incorrect = 0
total = len( Y )
for idx in range( total ):
if X[ idx ] == Y[ idx ]:
correct += 1
else:
incorrect += 1
return ( correct/total, correct, incorrect )
def print_individual_classifier_accuracies( df ):
print( "\nClassifier Accuracies: " )
X = df.ix[:,0:-1]
Y = df.ix[:,-1]
for column in X.columns:
accuracy_stats = getColumnAccuracy( X[ column ], Y )
printAccuracy( column, accuracy_stats[0] )
def main( argv ):
try:
input_csv = argv[ 1 ]
output_model = argv[ 2 ]
except IndexError:
print( "Error, usage: \"python3 {} <input_csv> <output_csv>\"".format( argv[ 0 ] ) )
return
df = pd.read_csv( input_csv )
convert_to_num = True
print_individual_classifier_accuracies( df )
# Split data into test and train
msk = np.random.rand( len( df ) ) < 0.8
Training_DataFrame = df[ msk ].copy()
if convert_to_num:
X = Training_DataFrame.ix[:,0:-1].applymap( str_to_num )
Y = Training_DataFrame.ix[:,-1].map( str_to_num )
else:
X = Training_DataFrame.ix[:,0:-1]
Y = Training_DataFrame.ix[:,-1]
Testing_DataFrame = df[ ~msk ].copy()
if convert_to_num:
testing_X = Testing_DataFrame.ix[:,0:-1].applymap( str_to_num )
testing_Y = Testing_DataFrame.ix[:,-1].map( str_to_num )
else:
testing_X = Testing_DataFrame.ix[:,0:-1]
testing_Y = Testing_DataFrame.ix[:,-1]
print( "\nTraining on Classifier Predictions:" )
''' LINEAR CLASSIFIERS '''
print( "Linear Classifiers\n" )
''' Logistic Regression '''
from sklearn.linear_model import LogisticRegression
# Hyper Parameters:
tol = 0.0001
# Fit Classifier
LR_classifier = LogisticRegression( )
LR_classifier.fit( X, Y )
# Report results
LR_score = LR_classifier.score( testing_X, testing_Y )
printAccuracy( "Logistic Regression", LR_score )
#
''' Perceptron '''
from sklearn.linear_model import Perceptron
# Hyper Parameters:
# Fit Classifier
P_classifier = Perceptron( )
P_classifier.fit( X, Y )
# Report results
P_score = P_classifier.score( testing_X, testing_Y )
printAccuracy( "Perceptron", P_score )
#
''' Gaussian Naive Bayes '''
from sklearn.naive_bayes import GaussianNB
# Hyper Parameters
# Fit Classifier
MNB_classifier = GaussianNB( )
MNB_classifier.fit( X, Y )
# Report results
MNB_score = MNB_classifier.score( testing_X, testing_Y )
printAccuracy( "Gaussian Naive Bayes", MNB_score )
#
''' Linear Support Vector Machine ( SVM ) '''
from sklearn.svm import LinearSVC
# Hyper Parameters
# Fit Classifier
LSVC_classifier = LinearSVC( )
LSVC_classifier.fit( X, Y )
# Report results
LSVC_score = LSVC_classifier.score( testing_X, testing_Y )
printAccuracy( "Linear SVM", LSVC_score )
#
''' NONLINEAR ALGOS '''
print( "\nNonlinear Classifiers\n" )
''' Decision Tree '''
from sklearn.tree import DecisionTreeClassifier
# Hyper Parameters
# Fit Classifier
DT_classifier = DecisionTreeClassifier( )
DT_classifier.fit( X, Y )
# Report results
DT_score = DT_classifier.score( testing_X, testing_Y )
printAccuracy( "Decision Tree", DT_score )
#
''' Random Forest '''
from sklearn.ensemble import RandomForestClassifier
# Hyper Parameters
n_estimators = 22
# Fit Classifier
RF_classifier = RandomForestClassifier(
n_estimators=n_estimators
)
RF_classifier.fit( X, Y )
# Report results
RF_score = RF_classifier.score( testing_X, testing_Y )
printAccuracy( "Random Forest", RF_score )
#
''' KNN '''
from sklearn.neighbors import KNeighborsClassifier
# Hyper Parameters
n_neighbors = 20
# Fit Classifier
KNN_classifier = KNeighborsClassifier( )
KNN_classifier.fit( X, Y )
# Report results
KNN_score = KNN_classifier.score( testing_X, testing_Y )
printAccuracy( "KNN", KNN_score )
#
''' VOTING '''
print( "\nMajority Vote Classifier\n" )
V_correct = 0
V_incorrect = 0
V_total = len( testing_X )
for idx, row in testing_X.iterrows():
prediction = Counter( row ).most_common()[0][0]
if testing_Y[ idx ] == prediction:
V_correct += 1
else:
V_incorrect += 1
printAccuracy( "Voting", V_correct / V_total )
print( "\n\nDone." )
if __name__=='__main__':
main( sys.argv )
|
|
"""
Files Pipeline
"""
import hashlib
import os
import os.path
import rfc822
import time
from six.moves.urllib.parse import urlparse
from collections import defaultdict
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.internet import defer, threads
from pyrake import log
from pyrake.contrib.pipeline.media import MediaPipeline
from pyrake.exceptions import NotConfigured, IgnoreRequest
from pyrake.http import Request
from pyrake.utils.misc import md5sum
class FileException(Exception):
"""General media error exception"""
class FSFilesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, path, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(path)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, 'wb') as f:
f.write(buf.getvalue())
def stat_file(self, path, info):
absolute_path = self._get_filesystem_path(path)
try:
last_modified = os.path.getmtime(absolute_path)
except: # FIXME: catching everything!
return {}
with open(absolute_path, 'rb') as f:
checksum = md5sum(f)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, path):
path_comps = path.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
POLICY = 'public-read'
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_file(self, path, info):
def _onsuccess(boto_key):
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = rfc822.parsedate_tz(last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(path).addCallback(_onsuccess)
def _get_boto_bucket(self):
from boto.s3.connection import S3Connection
# disable ssl (is_secure=False) because of this python bug:
# http://bugs.python.org/issue5103
c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, path):
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
k = b.new_key(key_name)
if meta:
for metakey, metavalue in six.iteritems(meta):
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
buf.seek(0)
return threads.deferToThread(k.set_contents_from_string, buf.getvalue(),
headers=h, policy=self.POLICY)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
`new` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
`uptodate` files are the ones that the pipeline processed and are still
valid files.
`expired` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
'': FSFilesStore,
'file': FSFilesStore,
's3': S3FilesStore,
}
DEFAULT_FILES_URLS_FIELD = 'file_urls'
DEFAULT_FILES_RESULT_FIELD = 'files'
def __init__(self, store_uri, download_func=None):
if not store_uri:
raise NotConfigured
self.store = self._get_store(store_uri)
super(FilesPipeline, self).__init__(download_func=download_func)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
cls.FILES_URLS_FIELD = settings.get('FILES_URLS_FIELD', cls.DEFAULT_FILES_URLS_FIELD)
cls.FILES_RESULT_FIELD = settings.get('FILES_RESULT_FIELD', cls.DEFAULT_FILES_RESULT_FIELD)
cls.EXPIRES = settings.getint('FILES_EXPIRES', 90)
store_uri = settings['FILES_STORE']
return cls(store_uri)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.EXPIRES:
return # returning None force download
referer = request.headers.get('Referer')
log.msg(format='File (uptodate): Downloaded %(medianame)s from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
medianame=self.MEDIA_NAME, request=request, referer=referer)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': path, 'checksum': checksum}
path = self.file_path(request, info=info)
dfd = defer.maybeDeferred(self.store.stat_file, path, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(log.err, self.__class__.__name__ + '.store.stat_file')
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = request.headers.get('Referer')
log.msg(format='File (unknown-error): Error downloading '
'%(medianame)s from %(request)s referred in '
'<%(referer)s>: %(exception)s',
level=log.WARNING, spider=info.spider, exception=failure.value,
medianame=self.MEDIA_NAME, request=request, referer=referer)
raise FileException
def media_downloaded(self, response, request, info):
referer = request.headers.get('Referer')
if response.status != 200:
log.msg(format='File (code: %(status)s): Error downloading file from %(request)s referred in <%(referer)s>',
level=log.WARNING, spider=info.spider,
status=response.status, request=request, referer=referer)
raise FileException('download-error')
if not response.body:
log.msg(format='File (empty-content): Empty file from %(request)s referred in <%(referer)s>: no-content',
level=log.WARNING, spider=info.spider,
request=request, referer=referer)
raise FileException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
log.msg(format='File (%(status)s): Downloaded file from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
status=status, request=request, referer=referer)
self.inc_stats(info.spider, status)
try:
path = self.file_path(request, response=response, info=info)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
whyfmt = 'File (error): Error processing file from %(request)s referred in <%(referer)s>: %(errormsg)s'
log.msg(format=whyfmt, level=log.WARNING, spider=info.spider,
request=request, referer=referer, errormsg=str(exc))
raise
except Exception as exc:
whyfmt = 'File (unknown-error): Error processing file from %(request)s referred in <%(referer)s>'
log.err(None, whyfmt % {'request': request, 'referer': referer}, spider=info.spider)
raise FileException(str(exc))
return {'url': request.url, 'path': path, 'checksum': checksum}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('file_count', spider=spider)
spider.crawler.stats.inc_value('file_status_count/%s' % status, spider=spider)
### Overridable Interface
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.FILES_URLS_FIELD, [])]
def file_downloaded(self, response, request, info):
path = self.file_path(request, response=response, info=info)
buf = BytesIO(response.body)
self.store.persist_file(path, buf, info)
checksum = md5sum(buf)
return checksum
def item_completed(self, results, item, info):
if self.FILES_RESULT_FIELD in item.fields:
item[self.FILES_RESULT_FIELD] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from pyrake.exceptions import pyrakeDeprecationWarning
import warnings
warnings.warn('FilesPipeline.file_key(url) method is deprecated, please use '
'file_path(request, response=None, info=None) instead',
category=pyrakeDeprecationWarning, stacklevel=1)
# check if called from file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() method has been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
## end of deprecation warning block
media_guid = hashlib.sha1(url).hexdigest() # change to request.url after deprecation
media_ext = os.path.splitext(url)[1] # change to request.url after deprecation
return 'full/%s%s' % (media_guid, media_ext)
# deprecated
def file_key(self, url):
return self.file_path(url)
file_key._base = True
|
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import collections
import unittest
import TestUnit
import SCons.Errors
import SCons.Platform
import SCons.Environment
import SCons.Action
class Environment(collections.UserDict):
def Detect(self, cmd):
return cmd
def AppendENVPath(self, key, value):
pass
class PlatformTestCase(unittest.TestCase):
def test_Platform(self):
"""Test the Platform() function"""
p = SCons.Platform.Platform('cygwin')
assert str(p) == 'cygwin', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('os2')
assert str(p) == 'os2', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.lib', env
p = SCons.Platform.Platform('posix')
assert str(p) == 'posix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('irix')
assert str(p) == 'irix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('aix')
assert str(p) == 'aix', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('sunos')
assert str(p) == 'sunos', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('hpux')
assert str(p) == 'hpux', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '', env
assert env['LIBSUFFIX'] == '.a', env
assert env['SHELL'] == 'sh', env
p = SCons.Platform.Platform('win32')
assert str(p) == 'win32', p
env = Environment()
p(env)
assert env['PROGSUFFIX'] == '.exe', env
assert env['LIBSUFFIX'] == '.lib', env
assert str
try:
p = SCons.Platform.Platform('_does_not_exist_')
except SCons.Errors.UserError:
pass
else:
raise
env = Environment()
SCons.Platform.Platform()(env)
assert env != {}, env
class TempFileMungeTestCase(unittest.TestCase):
def test_MAXLINELENGTH(self):
""" Test different values for MAXLINELENGTH with the same
size command string to ensure that the temp file mechanism
kicks in only at MAXLINELENGTH+1, or higher
"""
# Init class with cmd, such that the fully expanded
# string reads "a test command line".
# Note, how we're using a command string here that is
# actually longer than the substituted one. This is to ensure
# that the TempFileMunge class internally really takes the
# length of the expanded string into account.
defined_cmd = "a $VERY $OVERSIMPLIFIED line"
t = SCons.Platform.TempFileMunge(defined_cmd)
env = SCons.Environment.SubstitutionEnvironment(tools=[])
# Setting the line length high enough...
env['MAXLINELENGTH'] = 1024
env['VERY'] = 'test'
env['OVERSIMPLIFIED'] = 'command'
expanded_cmd = env.subst(defined_cmd)
# Call the tempfile munger
cmd = t(None,None,env,0)
assert cmd == defined_cmd, cmd
# Let MAXLINELENGTH equal the string's length
env['MAXLINELENGTH'] = len(expanded_cmd)
cmd = t(None,None,env,0)
assert cmd == defined_cmd, cmd
# Finally, let the actual tempfile mechanism kick in
# Disable printing of actions...
old_actions = SCons.Action.print_actions
SCons.Action.print_actions = 0
env['MAXLINELENGTH'] = len(expanded_cmd)-1
cmd = t(None,None,env,0)
# ...and restoring its setting.
SCons.Action.print_actions = old_actions
assert cmd != defined_cmd, cmd
def test_tempfilecreation_once(self):
# Init class with cmd, such that the fully expanded
# string reads "a test command line".
# Note, how we're using a command string here that is
# actually longer than the substituted one. This is to ensure
# that the TempFileMunge class internally really takes the
# length of the expanded string into account.
defined_cmd = "a $VERY $OVERSIMPLIFIED line"
t = SCons.Platform.TempFileMunge(defined_cmd)
env = SCons.Environment.SubstitutionEnvironment(tools=[])
# Setting the line length high enough...
env['VERY'] = 'test'
env['OVERSIMPLIFIED'] = 'command'
expanded_cmd = env.subst(defined_cmd)
env['MAXLINELENGTH'] = len(expanded_cmd)-1
# Disable printing of actions...
old_actions = SCons.Action.print_actions
SCons.Action.print_actions = 0
# Create an instance of object derived class to allow setattrb
class Node(object) :
class Attrs(object):
pass
def __init__(self):
self.attributes = self.Attrs()
target = [Node()]
cmd = t(target, None, env, 0)
# ...and restoring its setting.
SCons.Action.print_actions = old_actions
assert cmd != defined_cmd, cmd
assert cmd == getattr(target[0].attributes, 'tempfile_cmdlist', None)
class PlatformEscapeTestCase(unittest.TestCase):
def test_posix_escape(self):
""" Check that paths with parens are escaped properly
"""
import SCons.Platform.posix
test_string = "/my (really) great code/main.cpp"
output = SCons.Platform.posix.escape(test_string)
# We expect the escape function to wrap the string
# in quotes, but not escape any internal characters
# in the test_string. (Parens doesn't require shell
# escaping if their quoted)
assert output[1:-1] == test_string
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ PlatformTestCase,
TempFileMungeTestCase,
PlatformEscapeTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
import time
from zope.interface import implementer
from twisted.internet import defer
from tensor.interfaces import ITensorSource
from tensor.objects import Source
from tensor.utils import fork
from tensor.aggregators import Counter64
@implementer(ITensorSource)
class LoadAverage(Source):
"""Reports system load average for the current host
**Metrics:**
:(service name): Load average
"""
def _parse_loadaverage(self, data):
la1 = data.split()[0]
return self.createEvent('ok', 'Load average', float(la1))
def get(self):
return self._parse_loadaverage(open('/proc/loadavg', 'rt').read())
@defer.inlineCallbacks
def sshGet(self):
loadavg, err, code = yield self.fork('/bin/cat /proc/loadavg')
if code == 0:
defer.returnValue(self._parse_loadaverage(loadavg))
else:
raise Exception(err)
@implementer(ITensorSource)
class DiskIO(Source):
"""Reports disk IO statistics per device
**Configuration arguments:**
:param devices: List of devices to check (optional)
:type devices: list.
**Metrics:**
:(service name).(device name).reads: Number of completed reads
:(service name).(device name).read_bytes: Bytes read per second
:(service name).(device name).read_latency: Disk read latency
:(service name).(device name).writes: Number of completed writes
:(service name).(device name).write_bytes: Bytes written per second
:(service name).(device name).write_latency: Disk write latency
"""
def __init__(self, *a, **kw):
Source.__init__(self, *a, **kw)
self.devices = self.config.get('devices')
self.tcache = {}
self.trc = {}
self.twc = {}
def _parse_stats(self, stats):
disks = {}
events = []
for s in stats:
parts = s.strip().split()
n = parts[2]
# Filter things we don't care about
if (n[:4] != 'loop') and (n[:3] != 'ram'):
dname = "/dev/" + n
if self.devices and (dname not in self.devices):
continue
nums = [int(i) for i in parts[3:]]
reads, read_m, read_sec, read_t = nums[:4]
writes, write_m, write_sec, write_t = nums[4:8]
cur_io, io_t, io_wt = nums[8:]
# Calculate the average latency of read/write ops
if n in self.tcache:
(last_r, last_w, last_rt, last_wt) = self.tcache[n]
r_delta = float(reads - last_r)
w_delta = float(writes - last_w)
if r_delta > 0:
read_lat = (read_t - last_rt)/float(reads - last_r)
self.trc[n] = read_lat
else:
read_lat = self.trc.get(n, None)
if w_delta > 0:
write_lat = (write_t - last_wt)/float(writes - last_w)
self.twc[n] = write_lat
else:
write_lat = self.twc.get(n, None)
else:
if reads > 0:
read_lat = read_t / float(reads)
self.trc[n] = read_lat
else:
read_lat = None
if writes > 0:
write_lat = write_t / float(writes)
self.twc[n] = write_lat
else:
write_lat = None
self.tcache[n] = (reads, writes, read_t, write_t)
if read_lat:
events.append(self.createEvent('ok',
'Read latency (ms)', read_lat,
prefix='%s.read_latency' % dname))
if write_lat:
events.append(self.createEvent('ok',
'Write latency (ms)', write_lat,
prefix='%s.write_latency' % dname))
events.extend([
self.createEvent('ok', 'Reads' , reads,
prefix='%s.reads' % dname, aggregation=Counter64),
self.createEvent('ok', 'Read Bps' , read_sec * 512,
prefix='%s.read_bytes' % dname, aggregation=Counter64),
self.createEvent('ok', 'Writes', writes,
prefix='%s.writes' % dname, aggregation=Counter64),
self.createEvent('ok', 'Write Bps', write_sec * 512,
prefix='%s.write_bytes' % dname, aggregation=Counter64),
])
return events
@defer.inlineCallbacks
def sshGet(self):
diskstats, err, code = yield self.fork('/bin/cat /proc/diskstats')
if code == 0:
stats = diskstats.strip('\n').split('\n')
defer.returnValue(
self._parse_stats(stats))
else:
raise Exception(err)
def _getstats(self):
stats = open('/proc/diskstats', 'rt').read()
return stats.strip('\n').split('\n')
def get(self):
stats = self._getstats()
return self._parse_stats(stats)
@implementer(ITensorSource)
class CPU(Source):
"""Reports system CPU utilisation as a percentage/100
**Metrics:**
:(service name): Percentage CPU utilisation
:(service name).(type): Percentage CPU utilisation by type
"""
cols = ['user', 'nice', 'system', 'idle', 'iowait', 'irq',
'softirq', 'steal', 'guest', 'guest_nice']
def __init__(self, *a):
Source.__init__(self, *a)
self.cpu = None
def _read_proc_stat(self):
with open('/proc/stat', 'rt') as procstat:
return procstat.readline().strip('\n')
def _calculate_metrics(self, stat):
cpu = [int(i) for i in stat.split()[1:]]
# We might not have all the virt-related numbers, so zero-pad.
cpu = (cpu + [0, 0, 0])[:10]
(user, nice, system, idle, iowait, irq,
softirq, steal, guest, guest_nice) = cpu
usage = user + nice + system + irq + softirq + steal
total = usage + iowait + idle
if not self.cpu:
# No initial values, so set them and return no events.
self.cpu = cpu
self.prev_total = total
self.prev_usage = usage
return None
total_diff = total - self.prev_total
if total_diff != 0:
metrics = [(None, (usage - self.prev_usage) / float(total_diff))]
for i, name in enumerate(self.cols):
prev = self.cpu[i]
cpu_m = (cpu[i] - prev) / float(total_diff)
metrics.append((name, cpu_m))
self.cpu = cpu
self.prev_total = total
self.prev_usage = usage
return metrics
return None
def _transpose_metrics(self, metrics):
if metrics:
events = [
self.createEvent('ok', 'CPU %s %s%%' % (name, int(cpu_m * 100)), cpu_m, prefix=name)
for name, cpu_m in metrics[1:]
]
events.append(self.createEvent(
'ok', 'CPU %s%%' % int(metrics[0][1] * 100), metrics[0][1]))
return events
return None
@defer.inlineCallbacks
def sshGet(self):
procstat, err, code = yield self.fork('/usr/bin/head -n 1 /proc/stat')
if code == 0:
stats = self._calculate_metrics(procstat.strip('\n'))
defer.returnValue(self._transpose_metrics(stats))
else:
raise Exception(err)
def get(self):
stat = self._read_proc_stat()
stats = self._calculate_metrics(stat)
return self._transpose_metrics(stats)
@implementer(ITensorSource)
class Memory(Source):
"""Reports system memory utilisation as a percentage/100
**Metrics:**
:(service name): Percentage memory utilisation
"""
def _parse_stats(self, mem):
dat = {}
for l in mem:
k, v = l.replace(':', '').split()[:2]
dat[k] = int(v)
free = dat['MemFree'] + dat['Buffers'] + dat['Cached']
total = dat['MemTotal']
used = total - free
return self.createEvent('ok', 'Memory %s/%s' % (used, total),
used/float(total))
def get(self):
mem = open('/proc/meminfo')
return self._parse_stats(mem)
@defer.inlineCallbacks
def sshGet(self):
mem, err, code = yield self.fork('/bin/cat /proc/meminfo')
if code == 0:
defer.returnValue(self._parse_stats(mem.strip('\n').split('\n')))
else:
raise Exception(err)
@implementer(ITensorSource)
class DiskFree(Source):
"""Returns the free space for all mounted filesystems
**Configuration arguments:**
:param disks: List of devices to check (optional)
:type disks: list.
**Metrics:**
:(service name).(device).used: Used space (%)
:(service name).(device).bytes: Used space (kbytes)
:(service name).(device).free: Free space (kbytes)
"""
ssh = True
@defer.inlineCallbacks
def get(self):
disks = self.config.get('disks')
out, err, code = yield self.fork('/bin/df', args=('-lPx', 'tmpfs',))
out = [i.split() for i in out.strip('\n').split('\n')[1:]]
events = []
for disk, size, used, free, util, mount in out:
if disks and (disk not in disks):
continue
if disk != "udev":
util = int(util.strip('%'))
used = int(used)
free = int(free)
events.extend([
self.createEvent('ok', 'Disk usage %s%%' % (util),
util, prefix="%s.used" % disk),
self.createEvent('ok', 'Disk usage %s kB' % (used),
used, prefix="%s.bytes" % disk),
self.createEvent('ok', 'Disk free %s kB' % (free),
free, prefix="%s.free" % disk)
])
defer.returnValue(events)
@implementer(ITensorSource)
class Network(Source):
"""Returns all network interface statistics
**Configuration arguments:**
:param interfaces: List of interfaces to check (optional)
:type interfaces: list.
**Metrics:**
:(service name).(device).tx_bytes: Bytes transmitted
:(service name).(device).tx_packets: Packets transmitted
:(service name).(device).tx_errors: Errors
:(service name).(device).rx_bytes: Bytes received
:(service name).(device).rx_packets: Packets received
:(service name).(device).rx_errors: Errors
"""
def _parse_stats(self, stats):
ifaces = self.config.get('interfaces')
ev = []
for stat in stats:
items = stat.split()
iface = items[0].strip(':')
if ifaces and (iface not in ifaces):
continue
tx_bytes = int(items[1])
tx_packets = int(items[2])
tx_err = int(items[3])
rx_bytes = int(items[9])
rx_packets = int(items[10])
rx_err = int(items[11])
ev.extend([
self.createEvent('ok',
'Network %s TX bytes/sec' % (iface),
tx_bytes, prefix='%s.tx_bytes' % iface,
aggregation=Counter64),
self.createEvent('ok',
'Network %s TX packets/sec' % (iface),
tx_packets, prefix='%s.tx_packets' % iface,
aggregation=Counter64),
self.createEvent('ok',
'Network %s TX errors/sec' % (iface),
tx_err, prefix='%s.tx_errors' % iface,
aggregation=Counter64),
self.createEvent('ok',
'Network %s RX bytes/sec' % (iface),
rx_bytes, prefix='%s.rx_bytes' % iface,
aggregation=Counter64),
self.createEvent('ok',
'Network %s RX packets/sec' % (iface),
rx_packets, prefix='%s.rx_packets' % iface,
aggregation=Counter64),
self.createEvent('ok',
'Network %s RX errors/sec' % (iface),
rx_err, prefix='%s.rx_errors' % iface,
aggregation=Counter64),
])
return ev
def _readStats(self):
proc_dev = open('/proc/net/dev', 'rt').read()
return proc_dev.strip('\n').split('\n')[2:]
@defer.inlineCallbacks
def sshGet(self):
net, err, code = yield self.fork('/bin/cat /proc/net/dev')
if code == 0:
defer.returnValue(self._parse_stats(net.strip('\n').split('\n')[2:]))
else:
raise Exception(err)
def get(self):
return self._parse_stats(self._readStats())
|
|
from django.contrib import admin
from django.db.models.fields import FieldDoesNotExist
from .models import (
Charge,
CurrentSubscription,
Customer,
Event,
EventProcessingException,
Invoice,
InvoiceItem,
Transfer
)
from .utils import get_user_model
def user_search_fields():
User = get_user_model()
USERNAME_FIELD = getattr(User, "USERNAME_FIELD", None)
fields = []
if USERNAME_FIELD is not None:
# Using a Django 1.5+ User model
fields = [
"user__{0}".format(USERNAME_FIELD)
]
try:
# get_field_by_name throws FieldDoesNotExist if the field is not
# present on the model
# pylint: disable=W0212,E1103
User._meta.get_field_by_name("email")
fields += ["user__email"]
except FieldDoesNotExist:
pass
else:
# Using a pre-Django 1.5 User model
fields = [
"user__username",
"user__email"
]
return fields
def customer_search_fields():
return [
"customer__{0}".format(field)
for field in user_search_fields()
]
class CustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(card_fingerprint="")
if self.value() == "no":
return queryset.filter(card_fingerprint="")
class InvoiceCustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(customer__card_fingerprint="")
if self.value() == "no":
return queryset.filter(customer__card_fingerprint="")
class CustomerSubscriptionStatusListFilter(admin.SimpleListFilter):
title = "subscription status"
parameter_name = "sub_status"
def lookups(self, request, model_admin):
statuses = [
[x, x.replace("_", " ").title()]
for x in CurrentSubscription.objects.all().values_list(
"status",
flat=True
).distinct()
]
statuses.append(["none", "No Subscription"])
return statuses
def queryset(self, request, queryset):
if self.value() is None:
return queryset.all()
else:
return queryset.filter(current_subscription__status=self.value())
admin.site.register(
Charge,
list_display=[
"stripe_id",
"customer",
"amount",
"description",
"paid",
"disputed",
"refunded",
"fee",
"receipt_sent",
"created_at"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
"card_last_4",
"invoice__stripe_id"
] + customer_search_fields(),
list_filter=[
"paid",
"disputed",
"refunded",
"card_kind",
"created_at"
],
raw_id_fields=[
"customer",
"invoice"
],
)
admin.site.register(
EventProcessingException,
list_display=[
"message",
"event",
"created_at"
],
search_fields=[
"message",
"traceback",
"data"
],
raw_id_fields=[
"event"
],
)
admin.site.register(
Event,
raw_id_fields=["customer"],
list_display=[
"stripe_id",
"kind",
"livemode",
"valid",
"processed",
"created_at"
],
list_filter=[
"kind",
"created_at",
"valid",
"processed"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
"validated_message"
] + customer_search_fields(),
)
class CurrentSubscriptionInline(admin.TabularInline):
model = CurrentSubscription
def subscription_status(obj):
return obj.current_subscription.status
subscription_status.short_description = "Subscription Status"
admin.site.register(
Customer,
raw_id_fields=["user"],
list_display=[
"stripe_id",
"user",
"card_kind",
"card_last_4",
subscription_status
],
list_filter=[
"card_kind",
CustomerHasCardListFilter,
CustomerSubscriptionStatusListFilter
],
search_fields=[
"stripe_id",
] + user_search_fields(),
inlines=[CurrentSubscriptionInline]
)
class InvoiceItemInline(admin.TabularInline):
model = InvoiceItem
def customer_has_card(obj):
return obj.customer.card_fingerprint != ""
customer_has_card.short_description = "Customer Has Card"
def customer_user(obj):
User = get_user_model()
if hasattr(User, "USERNAME_FIELD"):
# Using a Django 1.5+ User model
username = getattr(obj.customer.user, "USERNAME_FIELD")
else:
# Using a pre-Django 1.5 User model
username = obj.customer.user.username
# In Django 1.5+ a User is not guaranteed to have an email field
email = getattr(obj, "email", "")
return "{0} <{1}>".format(
username,
email
)
customer_user.short_description = "Customer"
admin.site.register(
Invoice,
raw_id_fields=["customer"],
list_display=[
"stripe_id",
"paid",
"closed",
customer_user,
customer_has_card,
"period_start",
"period_end",
"subtotal",
"total"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
] + customer_search_fields(),
list_filter=[
InvoiceCustomerHasCardListFilter,
"paid",
"closed",
"attempted",
"attempts",
"created_at",
"date",
"period_end",
"total"
],
inlines=[InvoiceItemInline]
)
admin.site.register(
Transfer,
raw_id_fields=["event"],
list_display=[
"stripe_id",
"amount",
"status",
"date",
"description"
],
search_fields=[
"stripe_id",
"event__stripe_id"
]
)
|
|
import numpy as np
#todo: finish specifications
#todo: add input checking (with ability to turn off)
#todo: Cython implementation
def sequential_rounding(rho, Z, C_0, compute_loss_from_scores_real, get_L0_penalty, objval_cutoff = float('Inf')):
"""
Parameters
----------
rho: P x 1 vector of continuous coefficients
Z: N x P data matrix computed as X * Y
C_0: N x 1 vector of L0 penalties. C_0[j] = L0 penalty for rho[j] for j = 0,..., P.
compute_loss_from_scores_real: function handle to compute loss using N x 1 vector of scores, where scores = Z.dot(rho)
get_L0_penalty: function handle to compute L0_penalty from rho
objval_cutoff: objective value used for early stopping.
the procedure will stop if the objective value achieved by an intermediate solution will exceeds objval_cutoff
Returns
-------
rho: P x 1 vector of integer coefficients (if early_stop_flag = False, otherwise continuous solution)
best_objval: objective value achieved by rho (if early_stop_flag = False, otherwise NaN)
early_stop_flag: True if procedure was stopped early (in which case rho is not integer feasible)
"""
assert callable(compute_loss_from_scores_real)
assert callable(get_L0_penalty)
P = rho.shape[0]
rho_floor = np.floor(rho)
floor_is_zero = np.equal(rho_floor, 0)
dist_from_start_to_floor = rho_floor - rho
rho_ceil = np.ceil(rho)
ceil_is_zero = np.equal(rho_ceil, 0)
dist_from_start_to_ceil = rho_ceil - rho
dimensions_to_round = np.flatnonzero(np.not_equal(rho_floor, rho_ceil)).tolist()
scores = Z.dot(rho)
best_objval = compute_loss_from_scores_real(scores) + get_L0_penalty(rho)
while len(dimensions_to_round) > 0 and best_objval < objval_cutoff:
objvals_at_floor = np.repeat(np.nan, P)
objvals_at_ceil = np.repeat(np.nan, P)
current_penalty = get_L0_penalty(rho)
for idx in dimensions_to_round:
# scores go from center to ceil -> center + dist_from_start_to_ceil
Z_dim = Z[:, idx]
base_scores = scores + dist_from_start_to_ceil[idx] * Z_dim
objvals_at_ceil[idx] = compute_loss_from_scores_real(base_scores)
# move from ceil to floor => -1*Z_j
base_scores -= Z_dim
objvals_at_floor[idx] = compute_loss_from_scores_real(base_scores)
if ceil_is_zero[idx]:
objvals_at_ceil[idx] -= C_0[idx]
elif floor_is_zero[idx]:
objvals_at_floor[idx] -= C_0[idx]
# adjust for penalty value
objvals_at_ceil += current_penalty
objvals_at_floor += current_penalty
best_objval_at_ceil = np.nanmin(objvals_at_ceil)
best_objval_at_floor = np.nanmin(objvals_at_floor)
if best_objval_at_ceil <= best_objval_at_floor:
best_objval = best_objval_at_ceil
best_dim = np.nanargmin(objvals_at_ceil)
rho[best_dim] += dist_from_start_to_ceil[best_dim]
scores += dist_from_start_to_ceil[best_dim] * Z[:, best_dim]
else:
best_objval = best_objval_at_floor
best_dim = np.nanargmin(objvals_at_floor)
rho[best_dim] += dist_from_start_to_floor[best_dim]
scores += dist_from_start_to_floor[best_dim] * Z[:, best_dim]
dimensions_to_round.remove(best_dim)
#assert(np.all(np.isclose(scores, Z.dot(rho))))
early_stop_flag = best_objval > objval_cutoff
return rho, best_objval, early_stop_flag
def discrete_descent(rho, Z, C_0, rho_ub, rho_lb, get_L0_penalty, compute_loss_from_scores, descent_dimensions = None, active_set_flag = True):
"""
Given a initial feasible solution, rho, produces an improved solution that is 1-OPT
(i.e. the objective value does not decrease by moving in any single dimension)
at each iteration, the algorithm moves in the dimension that yields the greatest decrease in objective value
the best step size is each dimension is computed using a directional search strategy that saves computation
Parameters
----------
rho: P x 1 vector of continuous coefficients
Z: N x P data matrix computed as X * Y
C_0: N x 1 vector of L0 penalties. C_0[j] = L0 penalty for rho[j] for j = 0,..., P.
rho_ub
rho_lb
compute_loss_from_scores_real: function handle to compute loss using N x 1 vector of scores, where scores = Z.dot(rho)
get_L0_penalty: function handle to compute L0_penalty from rho
descent_dimensions
Returns
-------
"""
"""
"""
assert callable(compute_loss_from_scores)
assert callable(get_L0_penalty)
# initialize key variables
MAX_ITERATIONS = 500
MIN_IMPROVEMENT_PER_STEP = float(1e-8)
P = len(rho)
# convert solution to integer
rho = np.require(np.require(rho, dtype = np.int_), dtype = np.float_)
# convert descent dimensions to integer values
if descent_dimensions is None:
descent_dimensions = np.arange(P)
else:
descent_dimensions = np.require(descent_dimensions, dtype = np.int_)
if active_set_flag:
descent_dimensions = np.intersect1d(np.flatnonzero(rho), descent_dimensions)
descent_dimensions = descent_dimensions.tolist()
base_scores = Z.dot(rho)
base_loss = compute_loss_from_scores(base_scores)
base_objval = base_loss + get_L0_penalty(rho)
n_iterations = 0
coefficient_values = {k: np.arange(int(rho_lb[k]), int(rho_ub[k]) + 1) for k in descent_dimensions}
search_dimensions = descent_dimensions
while n_iterations < MAX_ITERATIONS and len(search_dimensions) > 0:
# compute the best objective value / step size in each dimension
best_objval_by_dim = np.repeat(np.nan, P)
best_coef_by_dim = np.repeat(np.nan, P)
for k in search_dimensions:
dim_objvals = _compute_objvals_at_dim(base_rho = rho,
base_scores = base_scores,
base_loss = base_loss,
dim_idx = k,
dim_coefs = coefficient_values[k],
Z = Z,
C_0 = C_0,
compute_loss_from_scores = compute_loss_from_scores)
# mark points that will improve the current objective value by at least MIN_IMPROVEMENT_PER_STEP
best_dim_idx = np.nanargmin(dim_objvals)
best_objval_by_dim[k] = dim_objvals[best_dim_idx]
best_coef_by_dim[k] = coefficient_values[k][best_dim_idx]
# recompute base objective value/loss/scores
best_idx = np.nanargmin(best_objval_by_dim)
next_objval = best_objval_by_dim[best_idx]
threshold_objval = base_objval - MIN_IMPROVEMENT_PER_STEP
if next_objval >= threshold_objval:
break
best_step = best_coef_by_dim[best_idx] - rho[best_idx]
rho[best_idx] += best_step
base_objval = next_objval
base_loss = base_objval - get_L0_penalty(rho)
base_scores = base_scores + (best_step * Z[:, best_idx])
# remove the current best direction from the set of directions to explore
search_dimensions = list(descent_dimensions)
search_dimensions.remove(best_idx)
n_iterations += 1
return rho, base_loss, base_objval
def _compute_objvals_at_dim(Z, C_0, base_rho, base_scores, base_loss, dim_coefs, dim_idx, compute_loss_from_scores):
"""
finds the value of rho[j] in dim_coefs that minimizes log_loss(rho) + C_0j
Parameters
----------
Z
C_0
base_rho
base_scores
base_loss
dim_coefs
dim_idx
compute_loss_from_scores
Returns
-------
"""
# copy stuff because ctypes
scores = np.copy(base_scores)
# initialize parameters
P = base_rho.shape[0]
base_coef_value = base_rho[dim_idx]
base_index = np.flatnonzero(dim_coefs == base_coef_value)
loss_at_coef_value = np.repeat(np.nan, len(dim_coefs))
loss_at_coef_value[base_index] = float(base_loss)
Z_dim = Z[:, dim_idx]
# start by moving forward
forward_indices = np.flatnonzero(base_coef_value <= dim_coefs)
forward_step_sizes = np.diff(dim_coefs[forward_indices] - base_coef_value)
n_forward_steps = len(forward_step_sizes)
stop_after_first_forward_step = False
best_loss = base_loss
total_distance_from_base = 0
for i in range(n_forward_steps):
scores += forward_step_sizes[i] * Z_dim
total_distance_from_base += forward_step_sizes[i]
current_loss = compute_loss_from_scores(scores)
if current_loss >= best_loss:
stop_after_first_forward_step = i == 0
break
loss_at_coef_value[forward_indices[i + 1]] = current_loss
best_loss = current_loss
# if the first step forward didn't lead to a decrease in loss, then move backwards
move_backward = stop_after_first_forward_step or n_forward_steps == 0
if move_backward:
# compute backward steps
backward_indices = np.flipud(np.where(dim_coefs <= base_coef_value)[0])
backward_step_sizes = np.diff(dim_coefs[backward_indices] - base_coef_value)
n_backward_steps = len(backward_step_sizes)
# correct size of first backward step if you took 1 step forward
if n_backward_steps > 0 and n_forward_steps > 0:
backward_step_sizes[0] = backward_step_sizes[0] - forward_step_sizes[0]
best_loss = base_loss
for i in range(n_backward_steps):
scores += backward_step_sizes[i] * Z_dim
total_distance_from_base += backward_step_sizes[i]
current_loss = compute_loss_from_scores(scores)
if current_loss >= best_loss:
break
loss_at_coef_value[backward_indices[i + 1]] = current_loss
best_loss = current_loss
# at this point scores == base_scores + step_distance*Z_dim
# assert(all(np.isclose(scores, base_scores + total_distance_from_base * Z_dim)))
# compute objective values by adding penalty values to all other indices
other_dim_idx = np.flatnonzero(dim_idx != np.arange(P))
other_dim_penalty = np.sum(C_0[other_dim_idx] * (base_rho[other_dim_idx] != 0))
objval_at_coef_values = loss_at_coef_value + other_dim_penalty
if C_0[dim_idx] > 0.0:
# increase objective value at every non-zero coefficient value by C_0j
nonzero_coef_idx = np.flatnonzero(dim_coefs)
objval_at_coef_values[nonzero_coef_idx] = objval_at_coef_values[nonzero_coef_idx] + C_0[dim_idx]
# compute value at coef[j] == 0 if needed
zero_coef_idx = np.flatnonzero(dim_coefs == 0)
if np.isnan(objval_at_coef_values[zero_coef_idx]):
# steps_from_here_to_zero: step_from_here_to_base + step_from_base_to_zero
# steps_from_here_to_zero: -step_from_base_to_here + -step_from_zero_to_base
steps_to_zero = -(base_coef_value + total_distance_from_base)
scores += steps_to_zero * Z_dim
objval_at_coef_values[zero_coef_idx] = compute_loss_from_scores(scores) + other_dim_penalty
# assert(all(np.isclose(scores, base_scores - base_coef_value * Z_dim)))
# return objective value at feasible coefficients
return objval_at_coef_values
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import re
import argparse
import json
import functools
NUMBER_RE = re.compile(r'\d+$')
def check_sequential(speed_model):
# combinational path models do not contain
# the following keywords
timing_keywords = {
'setup': 'setup',
'remov': 'removal',
'hold': 'hold',
'recov': 'recovery',
'removal': 'removal',
'recovery': 'recovery'
}
tmp = speed_model.split('_')
for keyword in sorted(timing_keywords):
if keyword in tmp:
# return found keyword and it's map in SDF
return [keyword, timing_keywords[keyword]]
return None
# FF's can be configured to work as FF or latch
def check_ff_latch(speed_model):
tmp = speed_model.split('_')
if 'ff' in tmp:
return 'ff'
elif 'lat' in tmp:
return 'lat'
else:
return None
# some bels have duplicate names e.g bufmrce_bufmrce
# this function cleans them
def clean_bname(bname):
tmp = bname.split('_')
if len(tmp) > 1 and tmp[0] == tmp[1]:
return '_'.join(tmp[1:])
return bname
def find_aliased_pin(pin, model, pin_aliases):
"""
Searches for aliased pins in the timing model.
The check is done using data from pin_aliases dictionary.
The dictionary has an entry for each aliased pin.
Each entry has two fields:
* names : a list of all the possible aliases
* is_property_related: a flag saying if the alias is in fact
pin name combined with BEL property (e.g. Q[LH] pins
in FF - in this case the pin name is Q [original name],
but is named Q[LH] in the timing model. The suffix
determines polarity of the FF's set/reset input).
If is_property_related is set the function returns the original
pin name, aliased name is returned otherwise.
Parameters
----------
pin: str
Pin name to look for
model: str
Timing model
pin_aliases: dict
A dict of list of aliases for given bel/site
Returns
-------
bool, str
The first bool value is set to true if pin is found
in the timing model, false otherwise.
The second returned value is found pin name. If pin
is not found None is returned
>>> find_aliased_pin("a", "a_b_some_test_string", None)
(False, None)
>>> find_aliased_pin("d", "din_dout_setup", {"D": {"names" : ["din"], "is_property_related" : False}})
(True, 'din')
>>> find_aliased_pin("d", "din_dout_setup", {"D": {"names" : ["din"], "is_property_related" : True}})
(True, 'd')
>>> find_aliased_pin("d", "din_dout_setup", {"D": {"names" : ["notdin"], "is_property_related" : True}})
(False, None)
"""
if (pin_aliases is not None) and (pin.upper() in pin_aliases):
for alias in pin_aliases[pin.upper()]['names']:
single_word_alias = (len(alias.split('_')) == 1)
pin_alias = alias.lower()
if single_word_alias:
model_to_check = model.split('_')
else:
model_to_check = model
if pin_alias in model_to_check:
if pin_aliases[pin.upper()]['is_property_related']:
return True, pin.lower()
else:
return True, pin_alias
return False, None
def instance_in_model(instance, model):
if len(instance.split('_')) == 1:
# instance name is one word, search it in the model
return instance in model.split('_')
else:
# instance name is multi word, search for a string
return instance in model
def create_pin_in_model(pin_aliases):
"""
Checks if a given pin belongs to the model.
Parameters
----------
pin: str
Pin name to look for
pin_aliases: dict
A dict of list of aliases for given bel/site
model: str
Timing model name
direction: str
Optional pin direction suffix [IN|OUT]
Returns
-------
bool, str
The first returned value is set to true if pin is found,
false otherwise.
The second returned value contains found pin name. If the
pin is not found, None is returned.
>>> create_pin_in_model(None)("d", "ff_init_din_q", "in")
(True, 'din')
>>> create_pin_in_model(None)("q", "ff_init_clk_q", None)
(True, 'q')
>>> create_pin_in_model({"Q": {"names" : ["QL", "QH"], "is_property_related" : True}})("q", "ff_init_clk_ql", None)
(True, 'q')
>>> create_pin_in_model(None)("logic_out", "my_cell_i_logic_out", None)
(True, 'logic_out')
>>> create_pin_in_model({"LOGIC_OUT": {"names" : ["LOGIC_O", "O"], "is_property_related" : False}})("logic_out", "my_cell_i_logic_o", None)
(True, 'logic_o')
>>> create_pin_in_model({"LOGIC_OUT": {"names" : ["LOGIC_O", "O"], "is_property_related" : False}})("logic_out", "my_cell_i_o", None)
(True, 'o')
"""
@functools.lru_cache(maxsize=10000)
def pin_in_model(pin, model, direction=None):
# strip site location
model = model.split(':')[0]
extended_pin_name = pin
aliased_pin, aliased_pin_name = find_aliased_pin(
pin.upper(), model, pin_aliases)
# some timings reports pins with their directions
# this happens for e.g. CLB reg_init D pin, which
# timing is reported as DIN
if direction is not None:
extended_pin_name = pin + direction
if instance_in_model(pin, model):
return True, pin
elif instance_in_model(extended_pin_name, model):
return True, extended_pin_name
elif aliased_pin:
return True, aliased_pin_name
else:
return False, None
return pin_in_model
def remove_pin_from_model(pin, model):
"""
Removes the pin from model name if present.
Arguments
---------
pin: str
Pin name
mode: str
Timing model name
Returns
-------
str
Updated timing model name
>>> remove_pin_from_model("q", "ff_init_d_q")
'ff_init_d'
>>> remove_pin_from_model("q", "ff_init_d_ql")
'ff_init_d_ql'
>>> remove_pin_from_model("logic_out", "ff_init_d_logic_out")
'ff_init_d'
>>> remove_pin_from_model("logic_out", "ff_init_d_second_out")
'ff_init_d_second_out'
"""
if len(pin.split('_')) == 1:
# pin name is one word, search it in the model
tmp = model.split('_')
if pin in tmp:
tmp.remove(pin)
return "_".join(tmp)
else:
return model
else:
# pin name is multi word, search for a string
return "_".join(list(filter(None, model.replace(pin, '').split('_'))))
def merged_dict(itr):
""" Create a merged dict of dict (of dict) based on input.
Input is an iteratable of (keys, value).
Return value is root dictionary
Keys are successive dictionaries indicies. For example:
(('a', 'b', 'c'), 1)
would set:
output['a']['b']['c'] = 1
This function returns an error if two values conflict.
>>> merged_dict(((('a', 'b', 'c'), 1), (('a', 'b', 'd'), 2)))
{'a': {'b': {'c': 1, 'd': 2}}}
"""
output = {}
for keys, value in itr:
target = output
for key in keys[:-1]:
if key not in target:
target[key] = {}
target = target[key]
if keys[-1] in target:
assert target[keys[-1]] == value, (keys, value, target[keys[-1]])
else:
target[keys[-1]] = value
return output
def extract_properties(tile, site, bel, properties, model):
if tile not in properties:
return None
if site not in properties[tile]:
return None
if bel not in properties[tile][site]:
return None
model_properties = dict()
for prop in properties[tile][site][bel]:
if prop in model_properties:
continue
if instance_in_model(prop.lower(), model):
# if there is property there must be value
# value always follow the property
for value in properties[tile][site][bel][prop]:
value = value.replace(',', '')
prop_val_str = "_".join([prop, value])
if instance_in_model(prop_val_str.lower(), model):
model_properties[prop] = value
break
return model_properties
def parse_raw_timing(fin):
with open(fin, "r") as f:
for line in f:
raw_data = line.split()
slice = raw_data[0]
sites_count = int(raw_data[1])
loc = 2
for site in range(0, sites_count):
site_name = raw_data[loc]
bels_count = int(raw_data[loc + 1])
# read all BELs data within
loc += 2
for bel in range(0, bels_count):
bel = raw_data[loc]
delay_count = int(raw_data[loc + 1])
# get all the delays
loc += 2
for delay in range(0, delay_count):
speed_model = raw_data[loc]
# each timing entry reports 5 delays
timing = [
raw_data[d + 1 + loc].split(':')
for d in range(0, 5)
]
yield slice, site_name, bel, speed_model, timing
# 5 delay values + name
loc += 6
def read_raw_timings(fin, properties, pins, site_pins, pin_alias_map):
def inner():
raw = list(parse_raw_timing(fin))
pin_in_models = {}
for slice, site_name, bel, speed_model, timing in raw:
btype = bel.lower()
delay_btype = clean_bname(btype)
delay_btype_orig = delay_btype
# all the bel names seem to start with "bel_d_"
# let's get rid of it
if speed_model.startswith('bel_d_'):
speed_model = speed_model[6:]
# keep original speed model string to use as unique dict entry
speed_model_orig = speed_model
# if more than one BEL type exists in the slice
# location is added at the end of the name
tmp = speed_model.split(':')
speed_model = tmp[0]
bel_location = site_name
if len(tmp) > 2:
bel_location += "/" + "/".join(tmp[2:])
bel_location = bel_location.upper()
sequential = check_sequential(speed_model)
if sequential is not None:
tmp = speed_model.split('_')
tmp.remove(sequential[0])
speed_model = '_'.join(tmp)
bel_input = None
bel_output = None
bel_clock = None
# strip btype from speed model so we can search for pins
speed_model_clean = speed_model
if speed_model.startswith(delay_btype):
speed_model_clean = speed_model[len(delay_btype):]
# remove properties from the model
speed_model_properties = extract_properties(
slice, site_name, delay_btype_orig, properties,
speed_model_clean)
if speed_model_properties is not None:
for prop in speed_model_properties:
# properties values in the model always follow properties name
prop_string = "_".join(
[prop, speed_model_properties[prop]])
speed_model_clean = remove_pin_from_model(
prop_string.lower(), speed_model_clean)
# Get pin alias map
if delay_btype not in pin_in_models:
pin_aliases = pin_alias_map.get(delay_btype, None)
pin_in_models[delay_btype] = create_pin_in_model(pin_aliases)
pin_in_model = pin_in_models[delay_btype]
# locate pins
for pin in pins[slice][site_name][delay_btype_orig]:
orig_pin = pin
pim, pin = pin_in_model(pin.lower(), speed_model_clean, 'in')
if pim:
if pins[slice][site_name][delay_btype_orig][orig_pin][
'is_clock'] and not pins[slice][site_name][
delay_btype_orig][orig_pin]['is_part_of_bus']:
bel_clock = pin
bel_clock_orig_pin = orig_pin
elif pins[slice][site_name][delay_btype_orig][orig_pin][
'direction'] == 'IN':
bel_input = pin
elif pins[slice][site_name][delay_btype_orig][orig_pin][
'direction'] == 'OUT':
bel_output = pin
speed_model_clean = remove_pin_from_model(
pin.lower(), speed_model_clean)
# Some speed models describe delays from/to site pins instead of BEL pins
if bel_clock is None:
for pin in site_pins[slice][site_name.lower()]:
orig_pin = pin
pim, pin = pin_in_model(pin.lower(), speed_model_clean)
if pim:
if site_pins[slice][site_name.lower(
)][orig_pin]['is_clock'] and not site_pins[slice][
site_name.lower()][orig_pin]['is_part_of_bus']:
bel_clock = pin
bel_clock_orig_pin = orig_pin
speed_model_clean = remove_pin_from_model(
pin.lower(), speed_model_clean)
if bel_input is None:
# search site inputs
for pin in site_pins[slice][site_name.lower()]:
orig_pin = pin
pim, pin = pin_in_model(
pin.lower(), speed_model_clean, 'in')
if pim:
if site_pins[slice][site_name.lower(
)][orig_pin]['direction'] == 'IN':
bel_input = pin
speed_model_clean = remove_pin_from_model(
pin.lower(), speed_model_clean)
if bel_output is None:
for pin in site_pins[slice][site_name.lower()]:
orig_pin = pin
pim, pin = pin_in_model(pin.lower(), speed_model_clean)
if pim:
if site_pins[slice][site_name.lower(
)][orig_pin]['direction'] == 'OUT':
bel_output = pin
speed_model_clean = remove_pin_from_model(
pin.lower(), speed_model_clean)
# if we couldn't find input, check if the clock is the
# only input. This applies only to combinational paths
if (sequential is None) and (bel_input is None) and (bel_clock is
not None):
if bel_clock_orig_pin in site_pins[slice][site_name.lower()] and \
site_pins[slice][site_name.lower(
)][bel_clock_orig_pin]['direction'] == 'IN':
bel_input = bel_clock
# if we still don't have the input check if the input
# is wider than 1 bit and timing defined for the whole
# port
if (bel_input is None) or (bel_output is None):
for pin in pins[slice][site_name][delay_btype_orig]:
number = NUMBER_RE.search(pin)
if number is not None:
orig_pin = pin[:-(len(str(number.group())))]
orig_pin_full = pin
pim, pin = pin_in_model(
orig_pin.lower(), speed_model_clean)
if not pim:
# some inputs pins are named with unsignificant zeros
# remove ti and try again
orig_pin = orig_pin + str(int(number.group()))
pim, pin = pin_in_model(
orig_pin.lower(), speed_model_clean)
if pim:
if pins[slice][site_name][delay_btype_orig][orig_pin_full]['direction'] == 'IN' \
and bel_input is None:
bel_input = pin
if pins[slice][site_name][delay_btype_orig][orig_pin_full]['direction'] == 'OUT' \
and bel_output is None:
bel_output = pin
speed_model_clean = remove_pin_from_model(
orig_pin.lower(), speed_model_clean)
# check if the input is not a BEL property
if bel_input is None:
# if there is anything not yet decoded
if len(speed_model_clean.split("_")) > 1:
if len(speed_model_properties.keys()) == 1:
bel_input = list(speed_model_properties.keys())[0]
# if we still don't have input, give up
if bel_input is None:
continue
# restore speed model name
speed_model = delay_btype + speed_model_clean
if sequential is not None:
if bel_clock is None:
continue
if bel_output is None and bel_clock is None or \
bel_output is None and bel_clock == bel_input:
continue
else:
if bel_input is None or bel_output is None:
continue
delay_btype = speed_model
# add properties to the delay_btype
if speed_model_properties is not None:
for prop in sorted(speed_model_properties):
prop_string = "_".join(
[prop, speed_model_properties[prop]])
delay_btype += "_" + prop_string
yield (slice, bel_location, delay_btype, speed_model_orig,
'type'), btype.upper()
yield (
slice, bel_location, delay_btype, speed_model_orig,
'input'), bel_input.upper()
if bel_output is not None:
yield (
slice, bel_location, delay_btype, speed_model_orig,
'output'), bel_output.upper()
if bel_clock is not None:
yield (
slice, bel_location, delay_btype, speed_model_orig,
'clock'), bel_clock.upper()
yield (
slice, bel_location, delay_btype, speed_model_orig,
'location'), bel_location.upper()
#XXX: debug
yield (
slice, bel_location, delay_btype, speed_model_orig,
'model'), speed_model_orig
if sequential is not None:
assert bel_clock is not None, (
slice, bel_location, delay_btype, speed_model_orig)
yield (
slice, bel_location, delay_btype, speed_model_orig,
'sequential'), sequential[1]
for t, v in timing:
yield (
slice, bel_location, delay_btype, speed_model_orig, t), v
return merged_dict(inner())
def read_bel_properties(properties_file, properties_map):
def inner():
with open(properties_file, 'r') as f:
for line in f:
raw_props = line.split()
tile = raw_props[0]
sites_count = int(raw_props[1])
prop_loc = 2
if sites_count == 0:
yield (tile, ), {}
for site in range(0, sites_count):
site_name = raw_props[prop_loc]
bels_count = int(raw_props[prop_loc + 1])
prop_loc += 2
for bel in range(0, bels_count):
bel_name = raw_props[prop_loc]
bel_name = clean_bname(bel_name)
bel_name = bel_name.lower()
bel_properties_count = int(raw_props[prop_loc + 1])
props = 0
prop_loc += 2
for prop in range(0, bel_properties_count):
prop_name = raw_props[prop_loc]
# the name always starts with "CONFIG." and ends with ".VALUES"
# let's get rid of that
if prop_name.startswith(
'CONFIG.') and prop_name.endswith(
'.VALUES'):
prop_name = prop_name[7:-7]
prop_values_count = int(raw_props[prop_loc + 1])
if prop_name not in [
'RAM_MODE',
'WRITE_WIDTH_A',
'WRITE_WIDTH_B',
'READ_WIDTH_A',
'READ_WIDTH_B',
]:
if bel_name in properties_map:
if prop_name in properties_map[bel_name]:
prop_name = properties_map[bel_name][
prop_name]
yield (tile, site_name, bel_name, prop_name), \
raw_props[prop_loc + 2:prop_loc + 2 +
prop_values_count]
props += 1
prop_loc += 2 + prop_values_count
if props == 0:
yield (tile, site_name, bel_name), {}
return merged_dict(inner())
def read_bel_pins(pins_file):
def inner():
with open(pins_file, 'r') as f:
for line in f:
raw_pins = line.split()
tile = raw_pins[0]
sites_count = int(raw_pins[1])
pin_loc = 2
if sites_count == 0:
yield (tile, ), {}
for site in range(0, sites_count):
site_name = raw_pins[pin_loc]
bels_count = int(raw_pins[pin_loc + 1])
pin_loc += 2
for bel in range(0, bels_count):
bel_name = raw_pins[pin_loc]
bel_name = clean_bname(bel_name)
bel_name = bel_name.lower()
bel_pins_count = int(raw_pins[pin_loc + 1])
pin_loc += 2
for pin in range(0, bel_pins_count):
pin_name = raw_pins[pin_loc]
pin_direction = raw_pins[pin_loc + 1]
pin_is_clock = raw_pins[pin_loc + 2]
pin_is_part_of_bus = raw_pins[pin_loc + 3]
yield (
tile, site_name, bel_name, pin_name,
'direction'), pin_direction
yield (
tile, site_name, bel_name, pin_name,
'is_clock'), int(pin_is_clock) == 1
yield (
tile, site_name, bel_name, pin_name,
'is_part_of_bus'
), int(pin_is_part_of_bus) == 1
pin_loc += 4
return merged_dict(inner())
def read_site_pins(pins_file):
def inner():
with open(pins_file, 'r') as f:
for line in f:
raw_pins = line.split()
tile = raw_pins[0]
site_count = int(raw_pins[1])
pin_loc = 2
if site_count == 0:
yield (tile, ), {}
for site in range(0, site_count):
site_name = raw_pins[pin_loc]
site_name = site_name.lower()
site_pins_count = int(raw_pins[pin_loc + 1])
pin_loc += 2
for pin in range(0, site_pins_count):
pin_name = raw_pins[pin_loc]
pin_direction = raw_pins[pin_loc + 1]
pin_is_part_of_bus = raw_pins[pin_loc + 2]
yield (
(tile, site_name, pin_name, 'direction'),
pin_direction)
yield (
(tile, site_name, pin_name, 'is_clock'),
pin_name.lower() == 'clk')
yield (
(tile, site_name, pin_name, 'is_part_of_bus'),
int(pin_is_part_of_bus))
# site clock pins are always named 'CLK'
pin_loc += 3
return merged_dict(inner())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--timings', type=str, help='Raw timing input file')
parser.add_argument('--json', type=str, help='json output file')
parser.add_argument(
'--properties', type=str, help='Bel properties input file')
parser.add_argument('--belpins', type=str, help='Bel pins input file')
parser.add_argument('--sitepins', type=str, help='Site pins input file')
parser.add_argument(
'--debug', type=bool, default=False, help='Enable debug json dumps')
parser.add_argument(
'--propertiesmap', type=str, help='Properties names mappings')
parser.add_argument(
'--pinaliasmap', type=str, help='Pin name alias mappings')
args = parser.parse_args()
with open(args.propertiesmap, 'r') as fp:
properties_map = json.load(fp)
with open(args.pinaliasmap, 'r') as fp:
pin_alias_map = json.load(fp)
properties = read_bel_properties(args.properties, properties_map)
if args.debug:
with open('debug_prop.json', 'w') as fp:
json.dump(properties, fp, indent=4, sort_keys=True)
pins = read_bel_pins(args.belpins)
if args.debug:
with open('debug_pins.json', 'w') as fp:
json.dump(pins, fp, indent=4, sort_keys=True)
site_pins = read_site_pins(args.sitepins)
if args.debug:
with open('debug_site_pins.json', 'w') as fp:
json.dump(site_pins, fp, indent=4, sort_keys=True)
timings = read_raw_timings(
args.timings, properties, pins, site_pins, pin_alias_map)
with open(args.json, 'w') as fp:
json.dump(timings, fp, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
|
|
from JumpScale import j
import telegram
import datetime
from telegrambot.Repo import AYS_REPO_DIR
# from JumpScale.baselib.atyourservice.robot.ActionRequest import *
class RunMgmt(object):
def __init__(self, bot):
self.bot = bot
self.rootpath = bot.rootpath
self.callbacks = bot.question_callbacks
#
# # helpers
# def _blueprintsPath(self, repo):
# return '%s/%s/%s/blueprints' % (self.rootpath, repo)
def _currentRepoName(self, username):
return self.bot.repo_mgmt._currentRepoName(username)
def _currentRepo(self, username):
return j.atyourservice.repoGet(j.sal.fs.joinPaths(AYS_REPO_DIR, self._currentRepoName(username)))
def _createname(self, run):
return datetime.datetime.fromtimestamp(run.dictFiltered["lastModDate"]).strftime('%c')
def _runlist(self, username, executed="all"):
repo = self._currentRepo(username)
runs = []
for run in repo.runsList():
if run.dictFiltered['state'] == "ok" and executed == "executed":
runs.append(self._createname(run))
elif run.dictFiltered['state'] == "new" and executed == "new":
runs.append(self._createname(run))
elif executed == "all":
runs.append(self._createname(run))
return runs
def _getrunid(self, name, repo):
runs = j.data.serializer.json.loads(self.bot._rediscl.hget('telegrambot.runs', repo.name))
for run_id in runs:
run = repo.runGet(run_id)
if self._createname(run).strip() == name.strip():
return run.key
# def _currentBlueprintsPath(self, username):
# return self._blueprintsPath(self._currentRepoName(username))
def create(self, bot, update):
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
runs = []
try:
run = repo.runCreate()
run.save()
runs_str = self.bot._rediscl.hget("telegrambot.runs", repo.name)
if runs_str:
runs.extend(j.data.serializer.json.loads(runs_str))
runs.append(run.key)
self.bot._rediscl.hset("telegrambot.runs", repo.name, j.data.serializer.json.dumps(runs))
msg = "run for repo %s has completed creation. to execute, use;\n `/run execute`" % repo.name
except Exception as e:
msg = 'Error during run creation'
self.bot.logger.error(e.message)
finally:
self.bot.sendMessage(
chat_id=chat_id,
text=msg)
def simulate(self, bot, update, run_id):
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
run = repo.runGet(run_id).objectGet()
self.bot.sendMessage(
chat_id=chat_id,
text=run.__str__())
def execute(self, bot, update, run_id):
import ipdb; ipdb.set_trace()
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
run = repo.runGet(run_id).objectGet()
aysconn = j.core.db.connection_pool.connection_kwargs
if aysconn.get('host', None):
host, port, socket = aysconn['host'], aysconn['port'], None
else:
host, port, socket = None, None, aysconn['path']
ayscl = j.clients.atyourservice.get(host, port, socket)
try:
ayscl.execute_run(run)
msg = "run for repo %s has started execution. to view state use ```/run inspect %s```" % (repo.name, run_id)
except Exception as e:
msg = 'Error during run execution,\n Error: %s' % str(e)
self.bot.logger.error(e.message)
finally:
self.bot.sendMessage(
chat_id=chat_id,
text=msg)
def list(self, bot, update, executed='all'):
username = update.message.from_user.username
chat_id = update.message.chat_id
bot.sendChatAction(chat_id=chat_id, action=telegram.ChatAction.TYPING)
runs = self._runlist(username, executed)
if not runs:
self.bot.sendMessage(chat_id=update.message.chat_id,
text="Sorry, this repository has not had any runs yet. :(;\n to create one use `/run create`",
parse_mode=telegram.ParseMode.MARKDOWN)
return
runs_list = []
for run in runs:
runs_list.append('- %s' % run.replace("_", "\_"))
msg = '\n'.join(runs_list)
self.bot.sendMessage(chat_id=update.message.chat_id, text=msg, parse_mode=telegram.ParseMode.MARKDOWN)
def inspect(self, bot, update, run_id):
username = update.message.from_user.username
repo = self._currentRepo(username)
run = repo.runGet(run_id)
data = ''
# data = j.data.serializer.yaml.dumps(run.dictFiltered)
def create_steps():
steps = ''
for step in run.dictFiltered['steps']:
steps += ' <b>- %s</b>\n' % step['number']
for name, val in step.items():
if name == "jobs":
steps += ' - <b>%s</b> :\n' % name
for job in val:
steps += ' - <b>%s</b> :\n' % job['actionName']
for k, v in job.items():
steps += ' %s = %s\n' % (k, v)
else:
steps += ' - <b>%s</b> = %s \n' % (name, val)
return steps
for key, value in run.dictFiltered.items():
if key == 'repo':
value = repo.name
elif key == 'steps':
data += '<b>- %s</b>\n' % key
data += create_steps()
continue
elif key == 'epoch':
key = "Creation time"
value = datetime.datetime.fromtimestamp(value).strftime('%c')
elif key == 'lastModDate':
value = self._createname(run)
data += '<b>- %s</b>\n' % key
data += ' - %s\n' % value
self.bot.sendMessage(
chat_id=update.message.chat_id,
text=data,
parse_mode="HTML",
reply_markup=telegram.ReplyKeyboardHide())
def choose_action(self, bot, update):
self.callbacks[update.message.from_user.username] = self.dispatch_choice
choices = ['create', 'simulate', 'execute', 'inspect', 'list']
reply_markup = telegram.ReplyKeyboardMarkup([choices], resize_keyboard=True, one_time_keyboard=True)
return self.bot.sendMessage(chat_id=update.message.chat_id,
text="What do you want to do ?", reply_markup=reply_markup)
def dispatch_choice(self, bot, update):
message = update.message
username = update.message.from_user.username
repo = self._currentRepo(username)
if message.text == 'list':
self.list(bot, update, repo)
elif message.text == 'create':
self.create_prompt(bot, update, repo)
elif message.text == 'simulate':
self.simulate_prompt(bot, update)
elif message.text in ['execute', 'exec']:
self.execute_prompt(bot, update)
elif message.text in ['inspect', 'see']:
self.inspect_prompt(bot, update)
def execute_prompt(self, bot, update):
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
run_list = self._runlist(username, "new")
def cb(bot, update):
run_id = self._getrunid(update.message.text, repo)
self.execute(bot, update, run_id)
self.callbacks[username] = cb
if not run_list:
self.bot.sendMessage(chat_id=update.message.chat_id,
text="Sorry, this repository has not had any runs yet. :(;\n to create one use `/run create`",
parse_mode=telegram.ParseMode.MARKDOWN)
return
reply_markup = telegram.ReplyKeyboardMarkup([run_list], resize_keyboar=True, one_time_keyboard=True)
self.bot.sendMessage(
chat_id=chat_id,
text="Click on the run you want to execute",
reply_markup=reply_markup)
def inspect_prompt(self, bot, update):
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
run_list = self._runlist(username)
def cb(bot, update):
run_id = self._getrunid(update.message.text, repo)
self.inspect(bot, update, run_id)
self.callbacks[username] = cb
if not run_list:
self.bot.sendMessage(chat_id=update.message.chat_id,
text="Sorry, this repository has not had any runs yet. :(;\n to create one use `/run create`",
parse_mode=telegram.ParseMode.MARKDOWN)
return
reply_markup = telegram.ReplyKeyboardMarkup([run_list], resize_keyboar=True, one_time_keyboard=True)
self.bot.sendMessage(
chat_id=chat_id,
text="Click on the run you want to execute",
reply_markup=reply_markup)
def simulate_prompt(self, bot, update):
username = update.message.from_user.username
chat_id = update.message.chat_id
repo = self._currentRepo(username)
run_list = self._runlist(username)
def cb(bot, update):
run_id = self._getrunid(update.message.text, repo)
self.simulate(bot, update, run_id)
self.callbacks[username] = cb
if not run_list:
self.bot.sendMessage(chat_id=update.message.chat_id,
text="Sorry, this repository has not had any runs yet. :(;\n to create one use `/run create`",
parse_mode=telegram.ParseMode.MARKDOWN)
return
reply_markup = telegram.ReplyKeyboardMarkup([run_list], resize_keyboar=True, one_time_keyboard=True)
self.bot.sendMessage(
chat_id=chat_id,
text="Click on the run you want to simulate",
reply_markup=reply_markup)
# Handler for robot
def handler(self, bot, update, args):
username = update.message.from_user.username
self.bot.logger.debug('service management for: %s' % username)
if not self.bot.repo_mgmt._userCheck(bot, update):
return
if not self._currentRepo(username):
message = "Sorry, you are not working on a repo currently, use `/repo` to select a repository"
return self.bot.sendMessage(chat_id=update.message.chat_id, text=message, parse_mode="Markdown")
# no arguments
if len(args) == 0:
self.choose_action(bot, update)
return
if args[0] == "list":
if len(args) == 1:
return self.list(bot, update)
elif len(args) >= 2:
return self.list(bot, update, args[1])
if args[0] in ['create', 'add']:
return self.create(bot, update)
if args[0] in ["execute", "exec"]:
if len(args) == 1:
return self.execute_prompt(bot, update)
elif len(args) >= 2:
return self.execute(bot, update, args[1])
if args[0] in ["simulate", "sim"]:
if len(args) == 1:
return self.simulate_prompt(bot, update)
elif len(args) >= 2:
return self.simulate(bot, update, args[1])
if args[0] in ['inspect', 'see', 'show']:
if len(args) == 1:
return self.inspect_prompt(bot, update)
elif len(args) >= 2:
return self.inspect(bot, update, args[1])
|
|
# Author: Pearu Peterson
# Created: June 2010
__all__ = ['bytes2str', 'isindisk']
import os
import optparse
VERBOSE = False
def isindisk(path):
""" Return True if path is stored in a local disk.
"""
return os.major(os.stat(path).st_dev) in [3, # HD
8, # SCSI
]
def bytes2str(bytes):
l = []
Pbytes = bytes//1024**5
if Pbytes:
l.append('%sPi' % (Pbytes))
bytes = bytes - 1024**5 * Pbytes
Tbytes = bytes//1024**4
if Tbytes:
l.append('%sTi' % (Tbytes))
bytes = bytes - 1024**4 * Tbytes
Gbytes = bytes//1024**3
if Gbytes:
l.append('%sGi' % (Gbytes))
bytes = bytes - 1024**3 * Gbytes
Mbytes = bytes//1024**2
if Mbytes:
l.append('%sMi' % (Mbytes))
bytes = bytes - 1024**2 * Mbytes
kbytes = bytes//1024
if kbytes:
l.append('%sKi' % (kbytes))
bytes = bytes - 1024*kbytes
if bytes: l.append('%s' % (bytes))
if not l: return '0 bytes'
return '+'.join(l) + ' bytes'
class Options(optparse.Values):
"""Holds option keys and values.
Examples
--------
>>> from iocbio.utils import Options
>>> options = Options(a='abc', n=4)
>>> print options
{'a': 'abc', 'n': 4}
>>> options.get(n=5)
4
>>> options.get(m=5)
5
>>> print options
{'a': 'abc', 'm': 5, 'n': 4}
>>> options2 = Options(options)
>>> options.get(k = 6)
>>> print options2 # note that updating options will update also options2
{'a': 'abc', 'm': 5, 'n': 4, 'k': 6}
See also
--------
__init__
"""
def __init__(self, *args, **kws):
"""Construct Options instance.
The following constructions are supported:
+ construct Options instance from keyword arguments::
Options(key1 = value1, key2 = value2, ...)
+ construct Options instance from :pythonlib:`optparse`.Values
instance and override with keyword arguments::
Options(<Values instance>, key1 = value1, ...)
+ construct Options instance from Options instance::
Options(<Options instance>, key1 = value1, ...)
Note that both Options instances will share options data.
See also
--------
Options
"""
if len(args)==0:
optparse.Values.__init__(self, kws)
elif len (args)==1:
arg = args[0]
if isinstance(arg, Options):
self.__dict__ = arg.__dict__
self.__dict__.update(**kws)
elif isinstance(arg, optparse.Values):
optparse.Values.__init__(self, arg.__dict__)
self.__dict__.update(**kws)
elif isinstance(arg, type (None)):
optparse.Values.__init__(self, kws)
else:
raise NotImplementedError(repr(arg))
else:
raise NotImplementedError(repr(args))
def get(self, **kws):
"""Return option value.
For example, ``options.get(key = default_value)`` will return
the value of an option with ``key``. If such an option does
not exist then update ``options`` and return
``default_value``.
Parameters
----------
key = default_value
Specify option key and its default value.
Returns
-------
value
Value of the option.
See also
--------
Options
"""
assert len (kws)==1,repr(kws)
key, default = list(kws.items())[0]
if key not in self.__dict__:
if VERBOSE:
print('Options.get: adding new option: %s=%r' % (key, default))
self.__dict__[key] = default
value = self.__dict__[key]
if value is None:
value = self.__dict__[key] = default
return value
def splitcommandline(line):
items, stopchar = splitquote (line)
result = []
for item in items:
if item[0]==item[-1] and item[0] in '\'"':
result.append (item[1:-1])
else:
result.extend (item.split())
return result
def splitquote(line, stopchar=None, lower=False, quotechars = '"\''):
"""
Fast LineSplitter.
Copied from The F2Py Project.
"""
items = []
i = 0
while 1:
try:
char = line[i]; i += 1
except IndexError:
break
l = []
l_append = l.append
nofslashes = 0
if stopchar is None:
# search for string start
while 1:
if char in quotechars and not nofslashes % 2:
stopchar = char
i -= 1
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if not l: continue
item = ''.join(l)
if lower: item = item.lower()
items.append(item)
continue
if char==stopchar:
# string starts with quotechar
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
if l:
item = str(''.join(l))
items.append(item)
break
# else continued string
while 1:
if char==stopchar and not nofslashes % 2:
l_append(char)
stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if l:
item = str(''.join(l))
items.append(item)
return items, stopchar
|
|
import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.mail import send_mail, EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_('label'), max_length=40)
display = models.CharField(_('display'), max_length=50)
description = models.CharField(_('description'), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_('default'))
def __unicode__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_('user'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_('send'))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
setting = NoticeSetting(user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if archived:
qs = self.filter(user=user)
else:
qs = self.filter(user=user, archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, user, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(user, unseen=True, **kwargs).count()
class Notice(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
message = models.TextField(_('message'))
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
unseen = models.BooleanField(_('unseen'), default=True)
archived = models.BooleanField(_('archived'), default=False)
on_site = models.BooleanField(_('on site'))
objects = NoticeManager()
def __unicode__(self):
return self.message
def archive(self):
self.archived = True
self.save()
def is_unseen(self):
"""
returns value of self.unseen but also changes it to false.
Use this in a template to mark an unseen notice differently the first
time it is shown.
"""
unseen = self.unseen
if unseen:
self.unseen = False
self.save()
return unseen
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
def get_absolute_url(self):
return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_url)
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.')
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, 'language'):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
'notification/%s/%s' % (label, format),
'notification/%s' % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, 'friends_invite_sent', {
'spam': 'eggs',
'foo': 'bar',
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = Site.objects.get_current()
notices_url = u"%s://%s%s" % (
protocol,
unicode(current_site),
reverse("notification_notices"),
)
current_language = get_language()
formats = (
'short.txt',
'full.txt',
'notice.html',
'full.html',
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
else:
activate(settings.LANGUAGE_CODE)
# update context with user specific translations
context = Context({
"user": user,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = ''.join(render_to_string('notification/email_subject.txt', {
'message': messages['short.txt'],
}, context).splitlines())
body = render_to_string('notification/email_body.txt', {
'message': messages['full.txt'],
}, context)
html_body = render_to_string('notification/email_body.html', {
'message': messages['full.txt'],
}, context)
notice = Notice.objects.create(user=user, message=messages['notice.html'],
notice_type=notice_type, on_site=on_site)
if should_send(user, notice_type, "1") and user.email: # Email
recipients.append(user.email)
msg = EmailMultiAlternatives(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)
msg.attach_alternative(html_body, 'text/html')
msg.send()
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_('user'))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey('content_type', 'object_id')
notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type'))
added = models.DateTimeField(_('added'), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_('signal'))
objects = ObservedItemManager()
class Meta:
ordering = ['-added']
verbose_name = _('observed item')
verbose_name_plural = _('observed items')
def send_notice(self):
send([self.user], self.notice_type.label,
{'observed': self.observed_object})
def observe(observed, observer, notice_type_label, signal='post_save'):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(user=observer, observed_object=observed,
notice_type=notice_type, signal=signal)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal='post_save'):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal='post_save'):
"""
Send a notice for each registered user about an observed object.
"""
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice()
return observed_items
def is_observing(observed, observer, signal='post_save'):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
|
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib import constants
from neutron_lib.db import api as db_api
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import extradhcpopt_db as edo_db
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_PLUGIN_KLASS = (
'neutron.tests.unit.extensions.test_extra_dhcp_opt.ExtraDhcpOptTestPlugin')
class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
edo_db.ExtraDhcpOptMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with extra dhcp options.
"""
supported_extension_aliases = [edo_ext.ALIAS]
def create_port(self, context, port):
with db_api.CONTEXT_WRITER.using(context):
edos = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(ExtraDhcpOptTestPlugin, self).create_port(
context, port)
self._process_port_create_extra_dhcp_opts(context, new_port, edos)
return new_port
def update_port(self, context, id, port):
with db_api.CONTEXT_WRITER.using(context):
rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port(
context, id, port)
self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port)
return rtn_port
class ExtraDhcpOptDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self, plugin=DB_PLUGIN_KLASS):
super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin)
class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase):
def _check_opts(self, expected, returned):
self.assertEqual(len(expected), len(returned))
for opt in returned:
name = opt['opt_name']
for exp in expected:
if (name == exp['opt_name'] and
opt['ip_version'] == exp.get(
'ip_version', constants.IP_VERSION_4)):
val = exp['opt_value']
break
self.assertEqual(val, opt['opt_value'])
def test_create_port_with_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_none_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
expected = [{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(expected,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_empty_router_extradhcpopts(self):
opt_list = [{'opt_name': 'router',
'opt_value': ''},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_extradhcpopts_ipv4_opt_version(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': constants.IP_VERSION_4},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456',
'ip_version': constants.IP_VERSION_4},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': constants.IP_VERSION_4}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_extradhcpopts_ipv6_opt_version(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': constants.IP_VERSION_6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': constants.IP_VERSION_6}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def _test_update_port_with_extradhcpopts(self, opt_list, upd_opts,
expected_opts):
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
port = self.deserialize('json', res)
self._check_opts(expected_opts,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_update_port_with_extradhcpopts_with_same(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = opt_list[:]
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_additional_extradhcpopt(self):
opt_list = [{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = copy.deepcopy(opt_list)
expected_opts.append(upd_opts[0])
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopt_delete(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
expected_opts = []
expected_opts = [opt for opt in opt_list
if opt['opt_name'] != 'bootfile-name']
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_without_extradhcpopt_delete(self):
opt_list = []
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
expected_opts = []
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_adding_extradhcpopts(self):
opt_list = []
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
expected_opts = copy.deepcopy(upd_opts)
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_blank_string_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_update_port_with_blank_name_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_update_port_with_blank_router_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': constants.IP_VERSION_4},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': constants.IP_VERSION_4},
{'opt_name': 'router',
'opt_value': '123.123.123.1',
'ip_version': constants.IP_VERSION_4}]
upd_opts = [{'opt_name': 'router',
'opt_value': '',
'ip_version': constants.IP_VERSION_4}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts_ipv6_change_value(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': constants.IP_VERSION_6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': constants.IP_VERSION_6}]
upd_opts = [{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::2',
'ip_version': constants.IP_VERSION_6}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts_add_another_ver_opt(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': constants.IP_VERSION_6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': constants.IP_VERSION_6}]
upd_opts = [{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': constants.IP_VERSION_4}]
expected_opts = copy.deepcopy(opt_list)
expected_opts.extend(upd_opts)
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pants.engine.fs import PathGlobs
from pants.engine.objects import Collection
from pants.option.custom_types import GlobExpansionConjunction
from pants.option.global_options import GlobMatchErrorBehavior
from pants.util.collections import assert_single_element
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.filtering import create_filters, wrap_filters
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.mapper import AddressFamily, AddressMapper
class Spec(ABC):
"""A specification for what Pants should operate on."""
@abstractmethod
def to_spec_string(self) -> str:
"""Return the normalized string representation of this spec."""
class AddressSpec(Spec, metaclass=ABCMeta):
"""Represents address selectors as passed from the command line.
Supports `Single` target addresses as well as `Sibling` (:) and `Descendant` (::) selector forms.
Note: In general, 'spec' should not be a user visible term, it is usually appropriate to
substitute 'address' for a spec resolved to an address, or 'address selector' if you are
referring to an unresolved spec string.
"""
class AddressFamilyResolutionError(Exception):
pass
@abstractmethod
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
"""Given a dict of (namespace path) -> AddressFamily, return the values matching this
address spec.
:raises: :class:`AddressSpec.AddressFamilyResolutionError` if no address families matched this spec.
"""
@classmethod
def address_families_for_dir(
cls, address_families_dict: Dict[str, "AddressFamily"], spec_dir_path: str
) -> List["AddressFamily"]:
"""Implementation of `matching_address_families()` for address specs matching at most one
directory."""
maybe_af = address_families_dict.get(spec_dir_path, None)
if maybe_af is None:
raise cls.AddressFamilyResolutionError(
'Path "{}" does not contain any BUILD files.'.format(spec_dir_path)
)
return [maybe_af]
class AddressResolutionError(Exception):
pass
@abstractmethod
def address_target_pairs_from_address_families(self, address_families: List["AddressFamily"]):
"""Given a list of AddressFamily, return (address, target) pairs matching this address spec.
:raises: :class:`SingleAddress._SingleAddressResolutionError` for resolution errors with a
:class:`SingleAddress` instance.
:raises: :class:`AddressSpec.AddressResolutionError` if no targets could be found otherwise, if
the address spec type requires a non-empty set of targets.
:return: list of (Address, Target) pairs.
"""
@classmethod
def all_address_target_pairs(cls, address_families):
"""Implementation of `address_target_pairs_from_address_families()` which does no
filtering."""
addr_tgt_pairs = []
for af in address_families:
addr_tgt_pairs.extend(af.addressables.items())
return addr_tgt_pairs
@abstractmethod
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
"""Generate glob patterns matching exactly all the BUILD files this address spec covers."""
@classmethod
def globs_in_single_dir(cls, spec_dir_path: str, address_mapper: "AddressMapper") -> List[str]:
"""Implementation of `make_glob_patterns()` which only allows a single base directory."""
return [os.path.join(spec_dir_path, pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class SingleAddress(AddressSpec):
"""An AddressSpec for a single address."""
directory: str
name: str
def __post_init__(self) -> None:
if self.directory is None:
raise ValueError(f"A SingleAddress must have a directory. Got: {self}")
if self.name is None:
raise ValueError(f"A SingleAddress must have a name. Got: {self}")
def to_spec_string(self) -> str:
return "{}:{}".format(self.directory, self.name)
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"]
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
class _SingleAddressResolutionError(Exception):
def __init__(self, single_address_family: "AddressFamily", name: str) -> None:
super().__init__()
self.single_address_family = single_address_family
self.name = name
def address_target_pairs_from_address_families(
self, address_families: Sequence["AddressFamily"]
):
"""Return the pair for the single target matching the single AddressFamily, or error.
:raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a
:class:`SingleAddress` instance.
:return: list of (Address, Target) pairs with exactly one element.
"""
single_af = assert_single_element(address_families)
addr_tgt_pairs = [
(addr, tgt)
for addr, tgt in single_af.addressables.items()
if addr.target_name == self.name
]
if len(addr_tgt_pairs) == 0:
raise self._SingleAddressResolutionError(single_af, self.name)
# There will be at most one target with a given name in a single AddressFamily.
assert len(addr_tgt_pairs) == 1
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class SiblingAddresses(AddressSpec):
"""An AddressSpec representing all addresses located directly within the given directory."""
directory: str
def to_spec_string(self) -> str:
return f"{self.directory}:"
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return self.address_families_for_dir(address_families_dict, self.directory)
def address_target_pairs_from_address_families(
self, address_families: Sequence["AddressFamily"]
):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return self.globs_in_single_dir(self.directory, address_mapper)
@dataclass(frozen=True)
class DescendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively under the given directory."""
directory: str
def to_spec_string(self) -> str:
return f"{self.directory}::"
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(ns, self.directory) is not None
]
def address_target_pairs_from_address_families(
self, address_families: Sequence["AddressFamily"]
):
addr_tgt_pairs = self.all_address_target_pairs(address_families)
if len(addr_tgt_pairs) == 0:
raise self.AddressResolutionError(
"AddressSpec {} does not match any targets.".format(self)
)
return addr_tgt_pairs
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [os.path.join(self.directory, "**", pat) for pat in address_mapper.build_patterns]
@dataclass(frozen=True)
class AscendantAddresses(AddressSpec):
"""An AddressSpec representing all addresses located recursively _above_ the given directory."""
directory: str
def to_spec_string(self) -> str:
return f"{self.directory}^"
def matching_address_families(
self, address_families_dict: Dict[str, "AddressFamily"],
) -> List["AddressFamily"]:
return [
af
for ns, af in address_families_dict.items()
if fast_relpath_optional(self.directory, ns) is not None
]
def address_target_pairs_from_address_families(self, address_families):
return self.all_address_target_pairs(address_families)
def make_glob_patterns(self, address_mapper: "AddressMapper") -> List[str]:
return [
os.path.join(f, pattern)
for pattern in address_mapper.build_patterns
for f in recursive_dirname(self.directory)
]
_specificity = {
SingleAddress: 0,
SiblingAddresses: 1,
AscendantAddresses: 2,
DescendantAddresses: 3,
type(None): 99,
}
def more_specific(
address_spec1: Optional[AddressSpec], address_spec2: Optional[AddressSpec]
) -> AddressSpec:
"""Returns which of the two specs is more specific.
This is useful when a target matches multiple specs, and we want to associate it with the "most
specific" one, which will make the most intuitive sense to the user.
"""
# Note that if either of spec1 or spec2 is None, the other will be returned.
if address_spec1 is None and address_spec2 is None:
raise ValueError("internal error: both specs provided to more_specific() were None")
return cast(
AddressSpec,
address_spec1
if _specificity[type(address_spec1)] < _specificity[type(address_spec2)]
else address_spec2,
)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecsMatcher:
"""Contains filters for the output of a AddressSpecs match.
This class is separated out from `AddressSpecs` to allow for both stuctural equality of the
`tags` and `exclude_patterns`, and for caching of their compiled forms using
`@memoized_property` (which uses the hash of the class instance in its key, and results in a
very large key when used with `AddressSpecs` directly).
"""
tags: Tuple[str, ...]
exclude_patterns: Tuple[str, ...]
def __init__(
self,
tags: Optional[Iterable[str]] = None,
exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.tags = tuple(tags or [])
self.exclude_patterns = tuple(exclude_patterns or [])
@memoized_property
def _exclude_compiled_regexps(self):
return [re.compile(pattern) for pattern in set(self.exclude_patterns or [])]
def _excluded_by_pattern(self, address):
return any(p.search(address.spec) is not None for p in self._exclude_compiled_regexps)
@memoized_property
def _target_tag_matches(self):
def filter_for_tag(tag):
return lambda t: tag in [str(t_tag) for t_tag in t.kwargs().get("tags", [])]
return wrap_filters(create_filters(self.tags, filter_for_tag))
def matches_target_address_pair(self, address, target):
"""
:param Address address: An Address to match
:param HydratedTarget target: The Target for the address.
:return: True if the given Address/HydratedTarget are included by this matcher.
"""
return self._target_tag_matches(target) and not self._excluded_by_pattern(address)
@frozen_after_init
@dataclass(unsafe_hash=True)
class AddressSpecs:
"""A collection of `AddressSpec`s representing AddressSpec subclasses, and a AddressSpecsMatcher
to filter results."""
dependencies: Tuple[AddressSpec, ...]
matcher: AddressSpecsMatcher
def __init__(
self,
dependencies: Iterable[AddressSpec],
tags: Optional[Iterable[str]] = None,
exclude_patterns: Optional[Iterable[str]] = None,
) -> None:
self.dependencies = tuple(dependencies)
self.matcher = AddressSpecsMatcher(tags=tags, exclude_patterns=exclude_patterns)
def __iter__(self) -> Iterator[AddressSpec]:
return iter(self.dependencies)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
class FilesystemResolvedSpec(FilesystemSpec, metaclass=ABCMeta):
@property
@abstractmethod
def resolved_files(self) -> Tuple[str, ...]:
"""The literal files this spec refers to after resolving all globs and excludes."""
@dataclass(frozen=True)
class FilesystemLiteralSpec(FilesystemResolvedSpec):
"""A literal file name, e.g. `foo.py`."""
file: str
@property
def resolved_files(self) -> Tuple[str, ...]:
return (self.file,)
def to_spec_string(self) -> str:
return self.file
@dataclass(frozen=True)
class FilesystemGlobSpec(FilesystemSpec):
"""A spec with a glob or globs, e.g. `*.py` and `**/*.java`."""
glob: str
def to_spec_string(self) -> str:
return self.glob
@dataclass(frozen=True)
class FilesystemResolvedGlobSpec(FilesystemGlobSpec, FilesystemResolvedSpec):
"""A spec with resolved globs.
For example, `*.py` may resolve to `('f1.py', 'f2.py', '__init__.py')`.
"""
files: Tuple[str, ...]
@property
def resolved_files(self) -> Tuple[str, ...]:
return self.files
@dataclass(frozen=True)
class FilesystemMergedSpec(FilesystemResolvedSpec):
"""Represents multiple FilesystemSpecs belonging to the same target."""
globs: Tuple[str, ...]
files: Tuple[str, ...]
@classmethod
def create(
cls, original_specs: Iterable[Union[FilesystemLiteralSpec, FilesystemResolvedGlobSpec]]
) -> "FilesystemMergedSpec":
globs: List[str] = []
files: List[str] = []
for spec in original_specs:
globs.append(spec.to_spec_string())
files.extend(spec.resolved_files)
return cls(globs=tuple(sorted(globs)), files=tuple(sorted(files)))
@property
def resolved_files(self) -> Tuple[str, ...]:
return self.files
def to_spec_string(self) -> str:
return ", ".join(self.globs)
@dataclass(frozen=True)
class FilesystemIgnoreSpec(FilesystemSpec):
"""A spec to ignore certain files or globs."""
glob: str
def __post_init__(self) -> None:
if self.glob.startswith("!"):
raise ValueError(f"The `glob` for {self} should not start with `!`.")
def to_spec_string(self) -> str:
return f"!{self.glob}"
class FilesystemSpecs(Collection[FilesystemSpec]):
@memoized_property
def includes(self) -> Tuple[Union[FilesystemLiteralSpec, FilesystemGlobSpec], ...]:
return tuple(
spec
for spec in self.dependencies
if isinstance(spec, (FilesystemGlobSpec, FilesystemLiteralSpec))
)
@memoized_property
def ignores(self) -> Tuple[FilesystemIgnoreSpec, ...]:
return tuple(spec for spec in self.dependencies if isinstance(spec, FilesystemIgnoreSpec))
@staticmethod
def _generate_path_globs(specs: Iterable[FilesystemSpec]) -> PathGlobs:
return PathGlobs(
globs=(s.to_spec_string() for s in specs),
# We error on unmatched globs for consistency with unmatched address specs. This also
# ensures that scripts don't silently do the wrong thing.
glob_match_error_behavior=GlobMatchErrorBehavior.error,
# We validate that _every_ glob is valid.
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="file arguments",
)
def path_globs_for_spec(
self, spec: Union[FilesystemLiteralSpec, FilesystemGlobSpec]
) -> PathGlobs:
"""Generate PathGlobs for the specific spec, automatically including the instance's
FilesystemIgnoreSpecs."""
return self._generate_path_globs(specs=(spec, *self.ignores))
def to_path_globs(self) -> PathGlobs:
"""Generate a single PathGlobs for the instance."""
return self._generate_path_globs(specs=(*self.includes, *self.ignores))
class AmbiguousSpecs(Exception):
pass
@dataclass(frozen=True)
class Specs:
address_specs: AddressSpecs
filesystem_specs: FilesystemSpecs
def __post_init__(self) -> None:
if self.address_specs.dependencies and self.filesystem_specs.dependencies:
raise AmbiguousSpecs(
"Both address specs and filesystem specs given. Please use only one type of spec.\n\n"
f"Address specs: {', '.join(spec.to_spec_string() for spec in self.address_specs)}\n"
f"Filesystem specs: {', '.join(spec.to_spec_string() for spec in self.filesystem_specs)}"
)
@property
def provided_specs(self) -> Union[AddressSpecs, FilesystemSpecs]:
"""Return whichever types of specs was provided by the user.
It is guaranteed that there will only ever be AddressSpecs or FilesystemSpecs, but not both,
through validation in the constructor.
"""
return self.filesystem_specs if self.filesystem_specs.dependencies else self.address_specs
OriginSpec = Union[AddressSpec, FilesystemResolvedSpec]
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Generator that produces an externs file for the Closure Compiler.
Note: This is a work in progress, and generated externs may require tweaking.
See https://developers.google.com/closure/compiler/docs/api-tutorial3#externs
"""
from code import Code
from js_util import JsUtil
from model import *
from schema_util import *
import os
import sys
import re
NOTE = """// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.%s.FooType'.
// Please run the closure compiler before committing changes.
// See https://chromium.googlesource.com/chromium/src/+/main/docs/closure_compilation.md
"""
class JsExternsGenerator(object):
def Generate(self, namespace):
return _Generator(namespace).Generate()
class _Generator(object):
def __init__(self, namespace):
self._namespace = namespace
self._class_name = None
self._js_util = JsUtil()
def Generate(self):
"""Generates a Code object with the schema for the entire namespace.
"""
c = Code()
# /abs/path/src/tools/json_schema_compiler/
script_dir = os.path.dirname(os.path.abspath(__file__))
# /abs/path/src/
src_root = os.path.normpath(os.path.join(script_dir, '..', '..'))
# tools/json_schema_compiler/
src_to_script = os.path.relpath(script_dir, src_root)
# tools/json_schema_compiler/compiler.py
compiler_path = os.path.join(src_to_script, 'compiler.py')
(c.Append(self._GetHeader(compiler_path, self._namespace.name))
.Append())
self._AppendNamespaceObject(c)
for js_type in self._namespace.types.values():
self._AppendType(c, js_type)
for prop in self._namespace.properties.values():
self._AppendProperty(c, prop)
for function in self._namespace.functions.values():
self._AppendFunction(c, function)
for event in self._namespace.events.values():
self._AppendEvent(c, event)
c.TrimTrailingNewlines()
return c
def _GetHeader(self, tool, namespace):
"""Returns the file header text.
"""
return (self._js_util.GetLicense() + '\n' +
self._js_util.GetInfo(tool) + (NOTE % namespace) + '\n' +
('/** @fileoverview Externs generated from namespace: %s */' %
namespace))
def _AppendType(self, c, js_type):
"""Given a Type object, generates the Code for this type's definition.
"""
if js_type.property_type is PropertyType.ENUM:
self._AppendEnumJsDoc(c, js_type)
else:
self._AppendTypeJsDoc(c, js_type)
c.Append()
def _AppendEnumJsDoc(self, c, js_type):
""" Given an Enum Type object, generates the Code for the enum's definition.
"""
c.Sblock(line='/**', line_prefix=' * ')
c.Append('@enum {string}')
self._js_util.AppendSeeLink(c, self._namespace.name, 'type',
js_type.simple_name)
c.Eblock(' */')
c.Append('%s.%s = {' % (self._GetNamespace(), js_type.name))
def get_property_name(e):
# Enum properties are normified to be in ALL_CAPS_STYLE.
# Assume enum '1ring-rulesThemAll'.
# Transform to '1ring-rules_Them_All'.
e = re.sub(r'([a-z])([A-Z])', r'\1_\2', e)
# Transform to '1ring_rules_Them_All'.
e = re.sub(r'\W', '_', e)
# Transform to '_1ring_rules_Them_All'.
e = re.sub(r'^(\d)', r'_\1', e)
# Transform to '_1RING_RULES_THEM_ALL'.
return e.upper()
c.Append('\n'.join(
[" %s: '%s'," % (get_property_name(v.name), v.name)
for v in js_type.enum_values]))
c.Append('};')
def _IsTypeConstructor(self, js_type):
"""Returns true if the given type should be a @constructor. If this returns
false, the type is a typedef.
"""
return any(prop.type_.property_type is PropertyType.FUNCTION
for prop in js_type.properties.values())
def _AppendTypeJsDoc(self, c, js_type, optional=False):
"""Appends the documentation for a type as a Code.
"""
c.Sblock(line='/**', line_prefix=' * ')
if js_type.description:
for line in js_type.description.splitlines():
c.Append(line)
if js_type.jsexterns:
for line in js_type.jsexterns.splitlines():
c.Append(line)
is_constructor = self._IsTypeConstructor(js_type)
if js_type.property_type is not PropertyType.OBJECT:
self._js_util.AppendTypeJsDoc(c, self._namespace.name, js_type, optional)
elif is_constructor:
c.Comment('@constructor', comment_prefix = '', wrap_indent=4)
c.Comment('@private', comment_prefix = '', wrap_indent=4)
elif js_type.jsexterns is None:
self._AppendTypedef(c, js_type.properties)
self._js_util.AppendSeeLink(c, self._namespace.name, 'type',
js_type.simple_name)
c.Eblock(' */')
var = '%s.%s' % (self._GetNamespace(), js_type.simple_name)
if is_constructor: var += ' = function() {}'
var += ';'
c.Append(var)
if is_constructor:
c.Append()
self._class_name = js_type.name
for prop in js_type.properties.values():
if prop.type_.property_type is PropertyType.FUNCTION:
self._AppendFunction(c, prop.type_.function)
else:
self._AppendTypeJsDoc(c, prop.type_, prop.optional)
c.Append()
self._class_name = None
def _AppendTypedef(self, c, properties):
"""Given an OrderedDict of properties, Appends code containing a @typedef.
"""
c.Append('@typedef {')
if properties:
self._js_util.AppendObjectDefinition(
c, self._namespace.name, properties, new_line=False)
else:
c.Append('Object', new_line=False)
c.Append('}', new_line=False)
def _AppendProperty(self, c, prop):
"""Appends the code representing a top-level property, including its
documentation. For example:
/** @type {string} */
chrome.runtime.id;
"""
self._AppendTypeJsDoc(c, prop.type_, prop.optional)
c.Append()
def _AppendFunction(self, c, function):
"""Appends the code representing a function, including its documentation.
For example:
/**
* @param {string} title The new title.
*/
chrome.window.setTitle = function(title) {};
"""
self._js_util.AppendFunctionJsDoc(c, self._namespace.name, function)
params = self._GetFunctionParams(function)
c.Append('%s.%s = function(%s) {};' % (self._GetNamespace(),
function.name, params))
c.Append()
def _AppendEvent(self, c, event):
"""Appends the code representing an event.
For example:
/** @type {!ChromeEvent} */
chrome.bookmarks.onChildrenReordered;
"""
c.Sblock(line='/**', line_prefix=' * ')
if (event.description):
c.Comment(event.description, comment_prefix='')
c.Append('@type {!ChromeEvent}')
self._js_util.AppendSeeLink(c, self._namespace.name, 'event', event.name)
c.Eblock(' */')
c.Append('%s.%s;' % (self._GetNamespace(), event.name))
c.Append()
def _AppendNamespaceObject(self, c):
"""Appends the code creating namespace object.
For example:
/** @const */
chrome.bookmarks = {};
"""
c.Append('/** @const */')
c.Append('chrome.%s = {};' % self._namespace.name)
c.Append()
def _GetFunctionParams(self, function):
"""Returns the function params string for function.
"""
params = function.params[:]
param_names = [param.name for param in params]
# TODO(https://crbug.com/1142991): Update this to represent promises better,
# rather than just appended as a callback.
if function.returns_async:
param_names.append(function.returns_async.name)
return ', '.join(param_names)
def _GetNamespace(self):
"""Returns the namespace to be prepended to a top-level typedef.
For example, it might return "chrome.namespace".
Also optionally includes the class name if this is in the context
of outputting the members of a class.
For example, "chrome.namespace.ClassName.prototype"
"""
if self._class_name:
return 'chrome.%s.%s.prototype' % (self._namespace.name, self._class_name)
return 'chrome.%s' % self._namespace.name
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/graphs.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import string
from king_phisher import color
from king_phisher import ipaddress
from king_phisher import its
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher.client import client_rpc
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.constants import ColorHexCode
from king_phisher.constants import OSFamily
from boltons import iterutils
from gi.repository import Gtk
from smoke_zephyr.requirements import check_requirements
from smoke_zephyr.utilities import unique
try:
import matplotlib
matplotlib.rcParams['backend'] = 'GTK3Cairo'
from matplotlib import dates
from matplotlib import patches
from matplotlib import pyplot
from matplotlib import ticker
from matplotlib import lines
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.backends.backend_gtk3cairo import FigureManagerGTK3Cairo as FigureManager
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
except ImportError:
has_matplotlib = False
"""Whether the :py:mod:`matplotlib` module is available."""
else:
if not its.frozen and check_requirements(['matplotlib>=1.5.1']):
has_matplotlib = False
else:
has_matplotlib = True
try:
import mpl_toolkits.basemap
except ImportError:
has_matplotlib_basemap = False
"""Whether the :py:mod:`mpl_toolkits.basemap` module is available."""
else:
if not its.frozen and check_requirements(['basemap>=1.0.7']):
has_matplotlib_basemap = False
else:
has_matplotlib_basemap = True
EXPORTED_GRAPHS = {}
MPL_COLOR_NULL = 'darkcyan'
__all__ = ('export_graph_provider', 'get_graph', 'get_graphs', 'CampaignGraph')
def export_graph_provider(cls):
"""
Decorator to mark classes as valid graph providers. This decorator also sets
the :py:attr:`~.CampaignGraph.name` attribute.
:param class cls: The class to mark as a graph provider.
:return: The *cls* parameter is returned.
"""
if not issubclass(cls, CampaignGraph):
raise RuntimeError("{0} is not a subclass of CampaignGraph".format(cls.__name__))
if not cls.is_available:
return None
graph_name = cls.__name__[13:]
cls.name = graph_name
EXPORTED_GRAPHS[graph_name] = cls
return cls
def get_graph(graph_name):
"""
Return the graph providing class for *graph_name*. The class providing the
specified graph must have been previously exported using
:py:func:`.export_graph_provider`.
:param str graph_name: The name of the graph provider.
:return: The graph provider class.
:rtype: :py:class:`.CampaignGraph`
"""
return EXPORTED_GRAPHS.get(graph_name)
def get_graphs():
"""
Get a list of all registered graph providers.
:return: All registered graph providers.
:rtype: list
"""
return sorted(EXPORTED_GRAPHS.keys())
class GraphBase(object):
"""
A basic graph provider for using :py:mod:`matplotlib` to create graph
representations of campaign data. This class is meant to be subclassed
by real providers.
"""
name = 'Unknown'
"""The name of the graph provider."""
name_human = 'Unknown'
"""The human readable name of the graph provider used for UI identification."""
graph_title = 'Unknown'
"""The title that will be given to the graph."""
table_subscriptions = []
"""A list of tables from which information is needed to produce the graph."""
is_available = True
def __init__(self, application, size_request=None, style_context=None):
"""
:param tuple size_request: The size to set for the canvas.
"""
self.application = application
self.style_context = style_context
self.config = application.config
"""A reference to the King Phisher client configuration."""
self.figure, _ = pyplot.subplots()
self.figure.set_facecolor(self.get_color('bg', ColorHexCode.WHITE))
self.axes = self.figure.get_axes()
self.canvas = FigureCanvas(self.figure)
self.manager = None
self.minimum_size = (380, 200)
"""An absolute minimum size for the canvas."""
if size_request is not None:
self.resize(*size_request)
self.canvas.mpl_connect('button_press_event', self.mpl_signal_canvas_button_pressed)
self.canvas.show()
self.navigation_toolbar = NavigationToolbar(self.canvas, self.application.get_active_window())
self.popup_menu = Gtk.Menu.new()
menu_item = Gtk.MenuItem.new_with_label('Export')
menu_item.connect('activate', self.signal_activate_popup_menu_export)
self.popup_menu.append(menu_item)
menu_item = Gtk.MenuItem.new_with_label('Refresh')
menu_item.connect('activate', self.signal_activate_popup_refresh)
self.popup_menu.append(menu_item)
menu_item = Gtk.CheckMenuItem.new_with_label('Show Toolbar')
menu_item.connect('toggled', self.signal_toggled_popup_menu_show_toolbar)
self._menu_item_show_toolbar = menu_item
self.popup_menu.append(menu_item)
self.popup_menu.show_all()
self.navigation_toolbar.hide()
self._legend = None
@property
def rpc(self):
return self.application.rpc
@staticmethod
def _ax_hide_ticks(ax):
for tick in ax.yaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
@staticmethod
def _ax_set_spine_color(ax, spine_color):
for pos in ('top', 'right', 'bottom', 'left'):
ax.spines[pos].set_color(spine_color)
def add_legend_patch(self, legend_rows, fontsize=None):
if self._legend is not None:
self._legend.remove()
self._legend = None
fontsize = fontsize or self.fontsize_scale
legend_bbox = self.figure.legend(
tuple(patches.Patch(color=patch_color) for patch_color, _ in legend_rows),
tuple(label for _, label in legend_rows),
borderaxespad=1.25,
fontsize=fontsize,
frameon=True,
handlelength=1.5,
handletextpad=0.75,
labelspacing=0.3,
loc='lower right'
)
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def get_color(self, color_name, default):
"""
Get a color by its style name such as 'fg' for foreground. If the
specified color does not exist, default will be returned. The underlying
logic for this function is provided by
:py:func:`~.gui_utilities.gtk_style_context_get_color`.
:param str color_name: The style name of the color.
:param default: The default color to return if the specified one was not found.
:return: The desired color if it was found.
:rtype: tuple
"""
color_name = 'theme_color_graph_' + color_name
sc_color = gui_utilities.gtk_style_context_get_color(self.style_context, color_name, default)
return (sc_color.red, sc_color.green, sc_color.blue)
def make_window(self):
"""
Create a window from the figure manager.
:return: The graph in a new, dedicated window.
:rtype: :py:class:`Gtk.Window`
"""
if self.manager is None:
self.manager = FigureManager(self.canvas, 0)
self.navigation_toolbar.destroy()
self.navigation_toolbar = self.manager.toolbar
self._menu_item_show_toolbar.set_active(True)
window = self.manager.window
window.set_transient_for(self.application.get_active_window())
window.set_title(self.graph_title)
return window
@property
def fontsize_scale(self):
scale = self.markersize_scale
if scale < 5:
fontsize = 'xx-small'
elif scale < 7:
fontsize = 'x-small'
elif scale < 9:
fontsize = 'small'
else:
fontsize = 'medium'
return fontsize
@property
def markersize_scale(self):
bbox = self.axes[0].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())
return bbox.width * self.figure.dpi * 0.01
def mpl_signal_canvas_button_pressed(self, event):
if event.button != 3:
return
self.popup_menu.popup(None, None, None, None, event.button, Gtk.get_current_event_time())
return True
def signal_activate_popup_menu_export(self, action):
dialog = extras.FileChooserDialog('Export Graph', self.application.get_active_window())
file_name = self.config['campaign_name'] + '.png'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
self.figure.savefig(destination_file, format='png')
def signal_activate_popup_refresh(self, event):
self.refresh()
def signal_toggled_popup_menu_show_toolbar(self, widget):
if widget.get_property('active'):
self.navigation_toolbar.show()
else:
self.navigation_toolbar.hide()
def resize(self, width=0, height=0):
"""
Attempt to resize the canvas. Regardless of the parameters the canvas
will never be resized to be smaller than :py:attr:`.minimum_size`.
:param int width: The desired width of the canvas.
:param int height: The desired height of the canvas.
"""
min_width, min_height = self.minimum_size
width = max(width, min_width)
height = max(height, min_height)
self.canvas.set_size_request(width, height)
class CampaignGraph(GraphBase):
"""
Graph format used for the graphs generated in the dashboard and
in the create graphs tab.
"""
def _load_graph(self, info_cache):
raise NotImplementedError()
def load_graph(self):
"""Load the graph information via :py:meth:`.refresh`."""
self.refresh()
def refresh(self, info_cache=None, stop_event=None):
"""
Refresh the graph data by retrieving the information from the
remote server.
:param dict info_cache: An optional cache of data tables.
:param stop_event: An optional object indicating that the operation should stop.
:type stop_event: :py:class:`threading.Event`
:return: A dictionary of cached tables from the server.
:rtype: dict
"""
info_cache = (info_cache or {})
if not self.rpc:
return info_cache
for table in self.table_subscriptions:
if stop_event and stop_event.is_set():
return info_cache
if not table in info_cache:
query_filter = None
if 'campaign_id' in client_rpc.database_table_objects[table].__slots__:
query_filter = {'campaign_id': self.config['campaign_id']}
info_cache[table] = tuple(self.rpc.remote_table(table, query_filter=query_filter))
for ax in self.axes:
ax.clear()
if self._legend is not None:
self._legend.remove()
self._legend = None
self._load_graph(info_cache)
self.figure.suptitle(
self.graph_title,
color=self.get_color('fg', ColorHexCode.BLACK),
size=14,
weight='bold',
y=0.97
)
self.canvas.draw()
return info_cache
class CampaignBarGraph(CampaignGraph):
yticklabel_fmt = "{0:,}"
def __init__(self, *args, **kwargs):
super(CampaignBarGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(top=0.85, right=0.85, bottom=0.05, left=0.225)
ax = self.axes[0]
ax.tick_params(
axis='both',
top='off',
right='off',
bottom='off',
left='off',
labelbottom='off'
)
ax.invert_yaxis()
self.axes.append(ax.twinx())
def _barh(self, ax, bars, height, max_bars=None):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_bar_bg = self.get_color('bar_bg', ColorHexCode.GRAY)
color_bar_fg = self.get_color('bar_fg', ColorHexCode.BLACK)
ax.set_axis_bgcolor(color_bg)
self.resize(height=60 + 20 * len(bars))
# draw the foreground / filled bar
bar_container = ax.barh(
range(len(bars)),
bars,
height=height,
color=color_bar_fg,
linewidth=0
)
# draw the background / unfilled bar
largest_bar = (max(bars) if len(bars) else 0)
ax.barh(
range(len(bars)),
[largest_bar - bar for bar in bars],
left=bars,
height=height,
color=color_bar_bg,
linewidth=0
)
return bar_container
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_bar(self, title):
return self.graph_bar([0], 1, [''], xlabel=title)
def graph_bar(self, bars, max_bars, yticklabels, xlabel=None):
"""
Create a horizontal bar graph with better defaults for the standard use
cases.
:param list bars: The values of the bars to graph.
:param int max_bars: The number to treat as the logical maximum number of plotted bars.
:param list yticklabels: The labels to use on the x-axis.
:param str xlabel: The label to give to the y-axis.
:return: The bars created using :py:mod:`matplotlib`
:rtype: `matplotlib.container.BarContainer`
"""
height = 0.275
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
ax1, ax2 = self.axes # primary axis
bar_container = self._barh(ax1, bars, height, max_bars)
yticks = [float(y) + (height / 2) for y in range(len(bars))]
# this makes the top bar shorter than the rest
# ax1.set_ybound(0, max(len(bars), max_bars))
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticklabels, color=color_fg, size=10)
ax2.set_yticks(yticks)
ax2.set_yticklabels([self.yticklabel_fmt.format(bar) for bar in bars], color=color_fg, size=12)
ax2.set_ylim(ax1.get_ylim())
# remove the y-axis tick marks
self._ax_hide_ticks(ax1)
self._ax_hide_ticks(ax2)
self._ax_set_spine_color(ax1, color_bg)
self._ax_set_spine_color(ax2, color_bg)
if xlabel:
ax1.set_xlabel(xlabel, color=color_fg, size=12)
return bar_container
class CampaignLineGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignLineGraph, self).__init__(*args, **kwargs)
def _load_graph(self, info_cache):
raise NotImplementedError()
class CampaignPieGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignPieGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(top=0.85, right=0.75, bottom=0.05, left=0.05)
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_pie(self, title):
ax = self.axes[0]
ax.pie(
(100,),
autopct='%1.0f%%',
colors=(self.get_color('pie_low', ColorHexCode.GRAY),),
labels=(title,),
shadow=True,
startangle=225,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)}
)
ax.axis('equal')
return
def graph_pie(self, parts, autopct=None, labels=None, legend_labels=None):
colors = color.get_scale(
self.get_color('pie_low', ColorHexCode.BLACK),
self.get_color('pie_high', ColorHexCode.GRAY),
len(parts),
ascending=False
)
ax = self.axes[0]
pie = ax.pie(
parts,
autopct=autopct,
colors=colors,
explode=[0.1] + ([0] * (len(parts) - 1)),
labels=labels or tuple("{0:.1f}%".format(p) for p in parts),
labeldistance=1.15,
shadow=True,
startangle=45,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)},
wedgeprops={'linewidth': 0}
)
ax.axis('equal')
if legend_labels is not None:
self.add_legend_patch(tuple(zip(colors, legend_labels)), fontsize='x-small')
return pie
@export_graph_provider
class CampaignGraphDepartmentComparison(CampaignBarGraph):
"""Display a graph which compares the different departments."""
graph_title = 'Department Comparison'
name_human = 'Bar - Department Comparison'
table_subscriptions = ('company_departments', 'messages', 'visits')
yticklabel_fmt = "{0:.01f}%"
def _load_graph(self, info_cache):
departments = info_cache['company_departments']
departments = dict((department.id, department.name) for department in departments)
messages = info_cache['messages']
message_departments = dict((message.id, departments[message.company_department_id]) for message in messages if message.company_department_id is not None)
if not len(message_departments):
self._graph_null_bar('')
return
messages = [message for message in messages if message.id in message_departments]
visits = info_cache['visits']
visits = [visit for visit in visits if visit.message_id in message_departments]
visits = unique(visits, key=lambda visit: visit.message_id)
department_visits = collections.Counter()
department_visits.update(message_departments[visit.message_id] for visit in visits)
department_totals = collections.Counter()
department_totals.update(message_departments[message.id] for message in messages)
department_scores = dict((department, (float(department_visits[department]) / float(total)) * 100) for department, total in department_totals.items())
department_scores = sorted(department_scores.items(), key=lambda x: (x[1], x[0]), reverse=True)
department_scores = collections.OrderedDict(department_scores)
yticklabels, bars = zip(*department_scores.items())
self.graph_bar(bars, len(yticklabels), yticklabels)
return
@export_graph_provider
class CampaignGraphOverview(CampaignBarGraph):
"""Display a graph which represents an overview of the campaign."""
graph_title = 'Campaign Overview'
name_human = 'Bar - Campaign Overview'
table_subscriptions = ('credentials', 'visits')
def _load_graph(self, info_cache):
rpc = self.rpc
visits = info_cache['visits']
creds = info_cache['credentials']
messages_count = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id']})
messages_not_opened = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id'], 'opened': None})
bars = []
bars.append(messages_count)
bars.append(messages_count - messages_not_opened)
bars.append(len(visits))
bars.append(len(unique(visits, key=lambda visit: visit.message_id)))
if len(creds):
bars.append(len(creds))
bars.append(len(unique(creds, key=lambda cred: cred.message_id)))
yticklabels = ('Messages', 'Opened', 'Visits', 'Unique\nVisits', 'Credentials', 'Unique\nCredentials')
self.graph_bar(bars, len(yticklabels), yticklabels[:len(bars)])
return
@export_graph_provider
class CampaignGraphVisitorInfo(CampaignBarGraph):
"""Display a graph which shows the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Bar - Visitor OS Information'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
visits = info_cache['visits']
operating_systems = collections.Counter()
for visit in visits:
user_agent = None
if visit.visitor_details:
user_agent = ua_parser.parse_user_agent(visit.visitor_details)
operating_systems.update([user_agent.os_name if user_agent and user_agent.os_name else 'Unknown OS'])
os_names = sorted(operating_systems.keys())
bars = [operating_systems[os_name] for os_name in os_names]
self.graph_bar(bars, len(OSFamily), os_names)
return
@export_graph_provider
class CampaignGraphVisitorInfoPie(CampaignPieGraph):
"""Display a graph which compares the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Pie - Visitor OS Information'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
visits = info_cache['visits']
if not len(visits):
self._graph_null_pie('No Visitor Information')
return
operating_systems = collections.Counter()
for visit in visits:
ua = ua_parser.parse_user_agent(visit.visitor_details)
operating_systems.update([ua.os_name or 'Unknown OS' if ua else 'Unknown OS'])
(os_names, count) = tuple(zip(*reversed(sorted(operating_systems.items(), key=lambda item: item[1]))))
self.graph_pie(count, labels=tuple("{0:,}".format(os) for os in count), legend_labels=os_names)
return
@export_graph_provider
class CampaignGraphVisitsTimeline(CampaignLineGraph):
"""Display a graph which represents the visits of a campaign over time."""
graph_title = 'Campaign Visits Timeline'
name_human = 'Line - Visits Timeline'
table_subscriptions = ('visits',)
def _load_graph(self, info_cache):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
color_line_fg = self.get_color('line_fg', ColorHexCode.BLACK)
visits = info_cache['visits']
first_visits = [utilities.datetime_utc_to_local(visit.first_visit) for visit in visits]
ax = self.axes[0]
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax.set_axis_bgcolor(color_line_bg)
ax.set_ylabel('Number of Visits', color=self.get_color('fg', ColorHexCode.WHITE), size=10)
self._ax_hide_ticks(ax)
self._ax_set_spine_color(ax, color_bg)
if not len(first_visits):
ax.set_yticks((0,))
ax.set_xticks((0,))
return
first_visits.sort()
ax.plot_date(
first_visits,
range(1, len(first_visits) + 1),
'-',
color=color_line_fg,
linewidth=6
)
self.figure.autofmt_xdate()
self.figure.subplots_adjust(top=0.85, right=0.95, bottom=0.25, left=0.1)
locator = dates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(dates.AutoDateFormatter(locator))
return
@export_graph_provider
class CampaignGraphMessageResults(CampaignPieGraph):
"""Display the percentage of messages which resulted in a visit."""
graph_title = 'Campaign Message Results'
name_human = 'Pie - Message Results'
table_subscriptions = ('credentials', 'visits')
def _load_graph(self, info_cache):
rpc = self.rpc
messages_count = rpc('db/table/count', 'messages', query_filter={'campaign_id': self.config['campaign_id']})
if not messages_count:
self._graph_null_pie('No Messages Sent')
return
visits_count = len(unique(info_cache['visits'], key=lambda visit: visit.message_id))
credentials_count = len(unique(info_cache['credentials'], key=lambda cred: cred.message_id))
if not credentials_count <= visits_count <= messages_count:
raise ValueError('credential visit and message counts are inconsistent')
labels = ['Without Visit', 'With Visit', 'With Credentials']
sizes = []
sizes.append((float(messages_count - visits_count) / float(messages_count)) * 100)
sizes.append((float(visits_count - credentials_count) / float(messages_count)) * 100)
sizes.append((float(credentials_count) / float(messages_count)) * 100)
if not credentials_count:
labels.pop()
sizes.pop()
if not visits_count:
labels.pop()
sizes.pop()
self.graph_pie(sizes, legend_labels=labels)
return
class CampaignGraphVisitsMap(CampaignGraph):
"""A base class to display a map which shows the locations of visit origins."""
graph_title = 'Campaign Visit Locations'
table_subscriptions = ('credentials', 'visits')
is_available = has_matplotlib_basemap
draw_states = False
def _load_graph(self, info_cache):
visits = unique(info_cache['visits'], key=lambda visit: visit.message_id)
cred_ips = set(cred.message_id for cred in info_cache['credentials'])
cred_ips = set([visit.visitor_ip for visit in visits if visit.message_id in cred_ips])
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_land = self.get_color('map_land', ColorHexCode.GRAY)
color_water = self.get_color('map_water', ColorHexCode.WHITE)
ax = self.axes[0]
bm = mpl_toolkits.basemap.Basemap(resolution='c', ax=ax, **self.basemap_args)
if self.draw_states:
bm.drawstates()
bm.drawcoastlines()
bm.drawcountries()
bm.fillcontinents(color=color_land, lake_color=color_water)
parallels = bm.drawparallels(
(-60, -30, 0, 30, 60),
labels=(1, 1, 0, 0)
)
self._map_set_line_color(parallels, color_fg)
meridians = bm.drawmeridians(
(0, 90, 180, 270),
labels=(0, 0, 0, 1)
)
self._map_set_line_color(meridians, color_fg)
bm.drawmapboundary(
fill_color=color_water,
linewidth=0
)
if not visits:
return
ctr = collections.Counter()
ctr.update([visit.visitor_ip for visit in visits])
base_markersize = self.markersize_scale
base_markersize = max(base_markersize, 3.05)
base_markersize = min(base_markersize, 9)
self._plot_visitor_map_points(bm, ctr, base_markersize, cred_ips)
self.add_legend_patch(((self.color_with_creds, 'With Credentials'), (self.color_without_creds, 'Without Credentials')))
return
def _resolve_geolocations(self, all_ips):
geo_locations = {}
public_ips = []
for visitor_ip in all_ips:
ip = ipaddress.ip_address(visitor_ip)
if ip.is_private or ip.is_loopback:
continue
public_ips.append(visitor_ip)
public_ips.sort()
for ip_chunk in iterutils.chunked(public_ips, 100):
geo_locations.update(self.rpc.geoip_lookup_multi(ip_chunk))
return geo_locations
def _plot_visitor_map_points(self, bm, ctr, base_markersize, cred_ips):
o_high = float(max(ctr.values()))
o_low = float(min(ctr.values()))
color_with_creds = self.color_with_creds
color_without_creds = self.color_without_creds
geo_locations = self._resolve_geolocations(ctr.keys())
for visitor_ip, geo_location in geo_locations.items():
if not (geo_location.coordinates.longitude and geo_location.coordinates.latitude):
continue
occurrences = ctr[visitor_ip]
pts = bm(geo_location.coordinates.longitude, geo_location.coordinates.latitude)
if o_high == o_low:
markersize = 2.0
else:
markersize = 1.0 + (float(occurrences) - o_low) / (o_high - o_low)
markersize = markersize * base_markersize
bm.plot(
pts[0],
pts[1],
'o',
markeredgewidth=0,
markerfacecolor=(color_with_creds if visitor_ip in cred_ips else color_without_creds),
markersize=markersize
)
return
def _map_set_line_color(self, map_lines, line_color):
for sub_lines, texts in map_lines.values():
for line in sub_lines:
line.set_color(line_color)
for text in texts:
text.set_color(line_color)
@property
def color_with_creds(self):
return self.get_color('map_marker1', ColorHexCode.RED)
@property
def color_without_creds(self):
return self.get_color('map_marker2', ColorHexCode.YELLOW)
@export_graph_provider
class CampaignGraphVisitsMapUSA(CampaignGraphVisitsMap):
"""Display a map of the USA which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (USA)'
draw_states = True
basemap_args = dict(projection='lcc', lat_1=30, lon_0=-90, llcrnrlon=-122.5, llcrnrlat=12.5, urcrnrlon=-45, urcrnrlat=50)
@export_graph_provider
class CampaignGraphVisitsMapWorld(CampaignGraphVisitsMap):
"""Display a map of the world which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (World)'
basemap_args = dict(projection='kav7', lon_0=0)
@export_graph_provider
class CampaignGraphPasswordComplexityPie(CampaignPieGraph):
"""Display a graph which displays the number of passwords which meet standard complexity requirements."""
graph_title = 'Campaign Password Complexity'
name_human = 'Pie - Password Complexity'
table_subscriptions = ('credentials',)
def _load_graph(self, info_cache):
passwords = set(cred.password for cred in info_cache['credentials'])
if not len(passwords):
self._graph_null_pie('No Credential Information')
return
ctr = collections.Counter()
ctr.update(self._check_complexity(password) for password in passwords)
self.graph_pie((ctr[True], ctr[False]), autopct='%1.1f%%', legend_labels=('Complex', 'Not Complex'))
return
def _check_complexity(self, password):
if len(password) < 8:
return False
met = 0
for char_set in (string.ascii_uppercase, string.ascii_lowercase, string.digits, string.punctuation):
for char in password:
if char in char_set:
met += 1
break
return met >= 3
class CampaignCompGraph(GraphBase):
""" Display selected campaigns data by order of campaign start date."""
graph_title = 'Campaign Comparison Graph'
name_human = 'Graph'
def __init__(self, *args, **kwargs):
super(CampaignCompGraph, self).__init__(*args, **kwargs)
ax = self.axes[0]
self.axes.append(ax.twinx())
ax2 = self.axes[1]
self._config_axes(ax, ax2)
self._campaigns = []
def _calc(self, stats, key, comp_key='messages'):
return 0 if stats[comp_key] == 0 else (float(stats[key]) / stats[comp_key]) * 100
def _config_axes(self, ax, ax2):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax2.tick_params(
axis='both',
which='both',
colors=color_fg,
top='off',
bottom='off'
)
ax.set_axis_bgcolor(color_line_bg)
ax2.set_axis_bgcolor(color_line_bg)
title = pyplot.title('Campaign Comparison', color=color_fg, size=self.markersize_scale * 1.75, loc='left')
title.set_position([0.075, 1.05])
ax.set_ylabel('Percent Visits/Credentials', color=color_fg, size=self.markersize_scale * 1.5)
ax.set_xlabel('Campaign Name', color=color_fg, size=self.markersize_scale * 1.5)
self._ax_hide_ticks(ax)
self._ax_hide_ticks(ax2)
ax2.set_ylabel('Messages', color=color_fg, size=self.markersize_scale * 1.25, rotation=270, labelpad=20)
self._ax_set_spine_color(ax, color_bg)
self._ax_set_spine_color(ax2, color_bg)
ax2.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.tick_params(axis='x', labelsize=10, pad=5)
def load_graph(self, campaigns):
"""
Load the information to compare the specified and paint it to the
canvas. Campaigns are graphed on the X-axis in the order that they are
provided. No sorting of campaigns is done by this method.
:param tuple campaigns: A tuple containing campaign IDs to compare.
"""
ax = self.axes[0]
ax2 = self.axes[1]
ax.clear()
ax2.clear()
self._config_axes(ax, ax2)
rpc = self.rpc
ellipsize = lambda text: (text if len(text) < 20 else text[:17] + '...')
visits_line_color = self.get_color('line_fg', ColorHexCode.RED)
creds_line_color = self.get_color('map_marker1', ColorHexCode.BLACK)
messages_color = '#046D8B'
trained_color = '#77c67f'
ax.grid(True)
ax.set_xticks(range(len(campaigns)))
ax.set_xticklabels([ellipsize(rpc.remote_table_row('campaigns', cid).name) for cid in campaigns])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(self.markersize_scale * 1.25)
labels = ax.get_xticklabels()
pyplot.setp(labels, rotation=15)
self._campaigns = campaigns
campaigns = [rpc('/campaign/stats', cid) for cid in campaigns]
ax2.plot([stats['messages'] for stats in campaigns], label='Messages', color=messages_color, lw=3)
if sum(stats['messages-trained'] for stats in campaigns):
ax.plot([self._calc(stats, 'messages-trained', 'visits-unique') for stats in campaigns], label='Trained (Visited)', color=trained_color, lw=3)
ax.plot([self._calc(stats, 'messages-trained') for stats in campaigns], label='Trained (All)', color=trained_color, lw=3, ls='dashed')
ax.plot([self._calc(stats, 'visits') for stats in campaigns], label='Visits', color=visits_line_color, lw=3)
ax.plot([self._calc(stats, 'visits-unique') for stats in campaigns], label='Unique Visits', color=visits_line_color, lw=3, ls='dashed')
if sum(stats['credentials'] for stats in campaigns):
ax.plot([self._calc(stats, 'credentials') for stats in campaigns], label='Credentials', color=creds_line_color, lw=3)
ax.plot([self._calc(stats, 'credentials-unique') for stats in campaigns], label='Unique Credentials', color=creds_line_color, lw=3, ls='dashed')
ax.set_ylim((0, 100))
ax2.set_ylim(bottom=0)
self.canvas.set_size_request(500 + 50 * (len(campaigns) - 1), 500)
legend_patch = [
(visits_line_color, 'solid', 'Visits'),
(visits_line_color, 'dotted', 'Unique Visits')
]
if sum(stats['credentials'] for stats in campaigns):
legend_patch.extend([
(creds_line_color, 'solid', 'Credentials'),
(creds_line_color, 'dotted', 'Unique Credentials')
])
if sum(stats['messages-trained'] for stats in campaigns):
legend_patch.extend([
(trained_color, 'solid', 'Trained (Visited)'),
(trained_color, 'dotted', 'Trained (All)')
])
legend_patch.append(
(messages_color, 'solid', 'Messages')
)
self.add_legend_patch(legend_patch)
pyplot.tight_layout()
def add_legend_patch(self, legend_rows, fontsize=None):
if self._legend is not None:
self._legend.remove()
self._legend = None
legend_bbox = self.figure.legend(
tuple(lines.Line2D([], [], color=patch_color, lw=3, ls=style) for patch_color, style, _ in legend_rows),
tuple(label for _, _, label in legend_rows),
borderaxespad=1,
columnspacing=1.5,
fontsize=self.fontsize_scale,
ncol=3,
frameon=True,
handlelength=2,
handletextpad=0.5,
labelspacing=0.5,
loc='upper right'
)
legend_bbox.get_frame().set_facecolor(self.get_color('line_bg', ColorHexCode.GRAY))
for text in legend_bbox.get_texts():
text.set_color('white')
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def refresh(self):
self.load_graph(self._campaigns)
|
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = ()
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckWatchlist(input_api, output_api):
"""Check that the WATCHLIST file parses correctly."""
errors = []
for f in input_api.AffectedFiles():
if f.LocalPath() != 'WATCHLISTS':
continue
import StringIO
import logging
import watchlists
log_buffer = StringIO.StringIO()
log_handler = logging.StreamHandler(log_buffer)
log_handler.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.addHandler(log_handler)
wl = watchlists.Watchlists(input_api.change.RepositoryRoot())
logger.removeHandler(log_handler)
log_handler.flush()
log_buffer.flush()
if log_buffer.getvalue():
errors.append(output_api.PresubmitError(
'Cannot parse WATCHLISTS file, please resolve.',
log_buffer.getvalue().splitlines()))
return errors
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS,
maxlen=800, license_header=license_header))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckTestExpectations(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckChromiumPlatformMacros(input_api, output_api))
results.extend(_CheckWatchlist(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _CheckTestExpectations(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
if any(path.startswith('LayoutTests') for path in local_paths):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'lint-test-expectations')
_, errs = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
return [output_api.PresubmitError(
"lint-test-expectations failed "
"to produce output; check by hand. ")]
if errs.strip() != 'Lint succeeded.':
return [output_api.PresubmitError(errs)]
return []
def _CheckStyle(input_api, output_api):
style_checker_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'check-webkit-style')
args = ([input_api.python_executable, style_checker_path, '--diff-files']
+ [f.LocalPath() for f in input_api.AffectedFiles()])
results = []
try:
child = input_api.subprocess.Popen(args,
stderr=input_api.subprocess.PIPE)
_, stderrdata = child.communicate()
if child.returncode != 0:
results.append(output_api.PresubmitError(
'check-webkit-style failed', [stderrdata]))
except Exception as e:
results.append(output_api.PresubmitNotifyResult(
'Could not run check-webkit-style', [str(e)]))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.realpath(input_api.os_path.join(
input_api.PresubmitLocalPath(), '..', '..', 'buildtools', 'checkdeps'))]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(
input_api.os_path.join(input_api.PresubmitLocalPath()))
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckChromiumPlatformMacros(input_api, output_api, source_file_filter=None):
"""Ensures that Blink code uses WTF's platform macros instead of
Chromium's. Using the latter has resulted in at least one subtle
build breakage."""
os_macro_re = input_api.re.compile(r'^\s*#(el)?if.*\bOS_')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not os_macro_re.search(x),
input_api, source_file_filter)
errors = ['Found use of Chromium OS_* macro in %s. '
'Use WTF platform macros instead.' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def _CheckForPrintfDebugging(input_api, output_api):
"""Generally speaking, we'd prefer not to land patches that printf
debug output."""
printf_re = input_api.re.compile(r'^\s*printf\(')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not printf_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptOrNotify(
'printf debugging is best debugging! That said, it might '
'be a good idea to drop the following occurances from '
'your patch before uploading:\n%s' % '\n'.join(errors))]
return []
def _CheckForDangerousTestFunctions(input_api, output_api):
"""Tests should not be using serveAsynchronousMockedRequests, since it does
not guarantee that the threaded HTML parser will have completed."""
serve_async_requests_re = input_api.re.compile(
r'serveAsynchronousMockedRequests')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not serve_async_requests_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitError(
'You should be using FrameTestHelpers::'
'pumpPendingRequests() instead of '
'serveAsynchronousMockedRequests() in the following '
'locations:\n%s' % '\n'.join(errors))]
return []
def _CheckForFailInFile(input_api, f):
pattern = input_api.re.compile('^FAIL')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
path = input_api.os_path.join(
'..', '..', 'tools', 'checkperms', 'checkperms.py')
args = [sys.executable, path, '--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
checkperms = input_api.subprocess.Popen(
args, stdout=input_api.subprocess.PIPE)
errors = checkperms.communicate()[0].strip()
if errors:
return [output_api.PresubmitError(
'checkperms.py failed.', errors.splitlines())]
return []
def _CheckForInvalidPreferenceError(input_api, output_api):
pattern = input_api.re.compile('Invalid name for preference: (.+)')
results = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('-expected.txt'):
continue
for line_num, line in f.ChangedContents():
error = pattern.search(line)
if error:
results.append(output_api.PresubmitError('Found an invalid preference %s in expected result %s:%s' % (error.group(1), f, line_num)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckStyle(input_api, output_api))
results.extend(_CheckForPrintfDebugging(input_api, output_api))
results.extend(_CheckForDangerousTestFunctions(input_api, output_api))
results.extend(_CheckForInvalidPreferenceError(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://blink-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
import json
import os.path
import platform
import subprocess
cq_config_path = os.path.join(
change.RepositoryRoot(), 'infra', 'config', 'cq.cfg')
# commit_queue.py below is a script in depot_tools directory, which has a
# 'builders' command to retrieve a list of CQ builders from the CQ config.
is_win = platform.system() == 'Windows'
masters = json.loads(subprocess.check_output(
['commit_queue', 'builders', cq_config_path], shell=is_win))
try_config = {}
for master in masters:
try_config.setdefault(master, {})
for builder in masters[master]:
# Do not trigger presubmit builders, since they're likely to fail
# (e.g. OWNERS checks before finished code review), and we're
# running local presubmit anyway.
if 'presubmit' not in builder:
try_config[master][builder] = ['defaulttests']
return try_config
|
|
# smartmirror.py
# requirements
# requests, feedparser, traceback, Pillow
from Tkinter import *
import locale
import threading
import time
import locale
import requests
import json
import traceback
import feedparser
import serial
import subprocess #checks if camera connected
from time import sleep
#from serial import SerialException
from PIL import Image, ImageTk
from contextlib import contextmanager
from facematch.FaceMatch import FaceMatch
from contextlib import contextmanager #check http://preshing.com/20110920/the-python-with-statement-by-example/
from serial import SerialException
LOCALE_LOCK = threading.Lock()
ui_locale = "en_US.utf8" # e.g. 'fr_FR' fro French, '' as default
time_format = 12 # 12 or 24
date_format = "%b %d, %Y" # check python doc for strftime() for options
news_country_code = 'us'
weather_api_token = 'fbbdfae3c5f26c016c543398fc7f8cbf' # create account at https://darksky.net/dev/
weather_lang = 'en' # see https://darksky.net/dev/docs/forecast for full list of language parameters values
weather_unit = 'us' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values
latitude = None # Set this if IP location lookup does not work for you (must be a string)
longitude = None # Set this if IP location lookup does not work for you (must be a string)
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 18
#Serial port parameters
serial_speed = 9600
serial_port = '/dev/rfcomm0'
#####Bluetooth-Serial connection check
send = 1
try:
ser = serial.Serial(serial_port, serial_speed, timeout=1)
print "Bluetooth Connected"
except serial.SerialException:
print "No connection to the bluetooth device could be established"
send=0
#CAMERA CHECK#
##############
camera = subprocess.check_output(["vcgencmd","get_camera"])
#int(camera.strip()[-1]) #gets only 0 or 1 from detected status
section = camera.split(" ")[1]
detector = section.split("=")[-1]
#print detector
if (int(detector)==1):
print "Camera Detected"
print camera
else:
print "Camera not detected"
print camera
#@contextmanager
def setlocale(name): #thread proof function to work with locale
print "locale " + name
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
# maps open weather icons to
# icon reading is not impacted by the 'lang' parameter
icon_lookup = {
'clear-day': "assets/Sun.png", # clear sky day
'wind': "assets/Wind.png", #wind
'cloudy': "assets/Cloud.png", # cloudy day
'partly-cloudy-day': "assets/PartlySunny.png", # partly cloudy day
'rain': "assets/Rain.png", # rain day
'snow': "assets/Snow.png", # snow day
'snow-thin': "assets/Snow.png", # sleet day
'fog': "assets/Haze.png", # fog day
'clear-night': "assets/Moon.png", # clear sky night
'partly-cloudy-night': "assets/PartlyMoon.png", # scattered clouds night
'thunderstorm': "assets/Storm.png", # thunderstorm
'tornado': "assests/Tornado.png", # tornado
'hail': "assests/Hail.png" # hail
}
class FaceRec(Frame):
def __init__(self, parent, *args, **kwargs):
# Frame.__init__(self, parent, *args, **kwargs)
self.name = StringVar()
# self.createWidgets()
# self.pack()
self.find()
def find(self):
if (int(detector)==1):
fm=FaceMatch("test")
name=fm.getName()
else:
name=("camera not connected")
self.name.set("Hello " + name)
self.face.pack(side=LEFT, anchor=W)
# return name
# Create display elements
def createWidgets(self):
self.face = Label(self, textvariable=self.name, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.face.pack(side=TOP, anchor=W)
class TempTest(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.temp_data = StringVar()
called = StringVar()
self.createWidgets()
of = FaceRec(Frame)
# self.pack()
# self.called = names
#try:
# of = FaceRec(Frame)
# of.find()
# called = FaceRec.find(self) #calls fuction from FaceRe of.find()
#o print("name is " + of.find())
#except:
# self.called = "no name"
if(int(send)==1): #if bluetooth connected
self.measure()
print "reading data"
#if self.name !="":
# try:
# storeData.record(self.name, self.temp_data) #calls function record from storeData
# except Exception as e:
# traceback.print_exc()
# print "Error: %s. Cannot store data." % e
def measure(self):
# Request data and read the answer
ser.write("t")
print("this is t")
data = ser.readline()
print str(data)
# If the answer is not empty, process & display data
if (data != ""):
processed_data = data.split(",")
self.temp_data.set("Temperature: " + str(data))
self.face.pack(side=LEFT, anchor=W)
self.temperature.pack(side=LEFT, anchor=W)
# Wait 1 second between each measurement
#self.after(1000,self.measure)
# Create display elements
# def record(self, storeData):
# storeData.write(self.name, self.temp_data)
def createWidgets(self):
self.face = Label(self, textvariable=self.temp_data, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.face.pack(side=TOP, anchor=W)
self.temperature = Label(self, textvariable=self.temp_data, font=('Helvetica', small_text_size), fg="white", bg="black")
self.temperature.pack(side=TOP, anchor=W)
#class storeData(Frame):
# def __init__(self, parent, name, weight, *args, **kwargs):
# Frame.__init__(self, parent, *args, **kwargs)
# ID = StringVar()
# Weight = IntVar()
# data = {'People': [{'name': ID, 'weight':[weight]}]}
# ID = name
# weight = weight
# #self.write()
# def write(self):
# with open('data.json') as outfile_r:
# try:
# source_data = json.load(outfile_r)
# new_name = True
# for ppl in source_data['People']:
# if ppl['name']==ID:
# ppl['weight'].append(weight)
# new_name = False
# if new_name:
# source_data['People'].append({'name': ID, 'weight':[weight]})
# storage = source_data
# except:
# storage = data
# with open('data.json','w') as outfile_w:
# json.dump(storage, outfile_w, sort_keys=True, indent=4)
class Clock(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=('Helvetica', large_text_size), fg="white", bg="black")
self.timeLbl.pack(side=TOP, anchor=E)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dayOWLbl.pack(side=TOP, anchor=E)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dateLbl.pack(side=TOP, anchor=E)
self.tick()
def tick(self):
# print "test -->: " + ui_locale
# with setlocale('en_US.utf8'):
if time_format == 12:
time2 = time.strftime('%I:%M %p') #hour in 12h format
else:
time2 = time.strftime('%H:%M') #hour in 24h format
if 1:
day_of_week2 = time.strftime('%A')
date2 = time.strftime(date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.timeLbl.after(200, self.tick)
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temperature = ''
self.forecast = ''
self.location = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg="black")
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=('Helvetica', xlarge_text_size), fg="white", bg="black")
self.temperatureLbl.pack(side=LEFT, anchor=N)
self.iconLbl = Label(self.degreeFrm, bg="black")
self.iconLbl.pack(side=LEFT, anchor=N, padx=20)
self.currentlyLbl = Label(self, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.currentlyLbl.pack(side=TOP, anchor=W)
self.forecastLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.forecastLbl.pack(side=TOP, anchor=W)
self.locationLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.locationLbl.pack(side=TOP, anchor=W)
self.get_weather()
def get_ip(self):
try:
ip_url = "http://jsonip.com/"
req = requests.get(ip_url)
ip_json = json.loads(req.text)
return ip_json['ip']
except Exception as e:
traceback.print_exc()
return "Error: %s. Cannot get ip." % e
def get_weather(self):
try:
if latitude is None and longitude is None:
# get location
location_req_url = "http://freegeoip.net/json/%s" % self.get_ip()
r = requests.get(location_req_url)
location_obj = json.loads(r.text)
lat = location_obj['latitude']
lon = location_obj['longitude']
location2 = "%s, %s" % (location_obj['city'], location_obj['region_code'])
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, lat,lon,weather_lang,weather_unit)
else:
location2 = ""
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, latitude, longitude, weather_lang, weather_unit)
r = requests.get(weather_req_url)
weather_obj = json.loads(r.text)
degree_sign= u'\N{DEGREE SIGN}'
temperature2 = "%s%s" % (str(int(weather_obj['currently']['temperature'])), degree_sign)
currently2 = weather_obj['currently']['summary']
forecast2 = weather_obj["hourly"]["summary"]
icon_id = weather_obj['currently']['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
else:
# remove image
self.iconLbl.config(image='')
if self.currently != currently2:
self.currently = currently2
self.currentlyLbl.config(text=currently2)
if self.forecast != forecast2:
self.forecast = forecast2
self.forecastLbl.config(text=forecast2)
if self.temperature != temperature2:
self.temperature = temperature2
self.temperatureLbl.config(text=temperature2)
if self.location != location2:
if location2 == ", ":
self.location = "Cannot Pinpoint Location"
self.locationLbl.config(text="Cannot Pinpoint Location")
else:
self.location = location2
self.locationLbl.config(text=location2)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get weather." % e
self.after(600000, self.get_weather)
@staticmethod
def convert_kelvin_to_fahrenheit(kelvin_temp):
return 1.8 * (kelvin_temp - 273) + 32
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'News!' # 'News' is more internationally generic
self.newsLbl = Label(self, text=self.title, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg="black")
self.headlinesContainer.pack(side=TOP, anchor=S)
self.get_headlines()
def get_headlines(self):
try:
# remove all children
for widget in self.headlinesContainer.winfo_children():
widget.destroy()
if news_country_code == None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/news?ned=%s&output=rss" % news_country_code
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get news." % e
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg='black')
image = Image.open("assets/Newspaper.png")
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg='black', image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=W)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=LEFT, anchor=N)
class SplashScreen(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.createWidget()
self.pack(side=TOP, fill=BOTH, expand=YES)
# get screen width and height
ws = self.master.winfo_screenwidth()
hs = self.master.winfo_screenheight()
w = (useFactor and ws*width) or width
h = (useFactor and ws*height) or height
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.master.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.master.overrideredirect(True)
self.lift()
def createWidget(self):
sp.config(bg="#3366ff")
m = Label(sp, text="This is a test of the splash screen\n\n\nThis is only a test.\nwww.sunjay-varma.com")
m.pack(side=TOP, expand=YES)
m.config(bg="#3366ff", justify=CENTER, font=("calibri", 29))
Button(sp, text="Press this button to kill the program", bg='red', command=root.destroy).pack(side=BOTTOM, fill=X)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background = 'black')
self.centerFrame = Frame(self.tk, background = 'black')
self.bottomFrame = Frame(self.tk, background = 'black')
self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.centerFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)
self.state = False
self.tk.bind("<Return>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
# Name
# Facial Recognition
# self.facerec = FaceRec(self.topFrame)
# self.facerec.pack(side=TOP, anchor=N, padx=0, pady=0)
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=RIGHT, anchor=N, padx=10, pady=60)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=10, pady=60)
#temp
self.temp = TempTest(self.centerFrame)
self.temp.pack(side=TOP, anchor=N, padx=10)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=10, pady=60)
# self.weight = Weight(self.centerFrame)
# self.weight.pack(side=LEFT, anchor=W, padx=10)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
if __name__ == '__main__':
#w = FullscreenWindow()
#new code
# root = Tk()
# sp = SplashScreen(root)
# sp.config(bg="#3366ff")
# m = Label(sp, text="This is a test of the splash screen\n\n\nThis is only a test.\nwww.sunjay-varma.com")
# m.pack(side=TOP, expand=YES)
# m.config(bg="#3366ff", justify=CENTER, font=("calibri", 29))
# Button(sp, text="Press this button to kill the program", bg='red', command=root.destroy).pack(side=BOTTOM, fill=X)
#end new code
w = FullscreenWindow()
w.tk.mainloop()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest.mock as mock
import numpy as np
import coords
import go
from go import Position
from tests import test_utils
from mcts import MCTSNode, MAX_DEPTH
from strategies import MCTSPlayerMixin, time_recommendation
ALMOST_DONE_BOARD = test_utils.load_board('''
.XO.XO.OO
X.XXOOOO.
XXXXXOOOO
XXXXXOOOO
.XXXXOOO.
XXXXXOOOO
.XXXXOOO.
XXXXXOOOO
XXXXOOOOO
''')
#Tromp taylor means black can win if we hit the move limit.
TT_FTW_BOARD = test_utils.load_board('''
.XXOOOOOO
X.XOO...O
.XXOO...O
X.XOO...O
.XXOO..OO
X.XOOOOOO
.XXOOOOOO
X.XXXXXXX
XXXXXXXXX
''')
SEND_TWO_RETURN_ONE = go.Position(
board=ALMOST_DONE_BOARD,
n=70,
komi=2.5,
caps=(1, 4),
ko=None,
recent=(go.PlayerMove(go.BLACK, (0, 1)),
go.PlayerMove(go.WHITE, (0, 8))),
to_play=go.BLACK
)
class DummyNet():
def __init__(self, fake_priors=None, fake_value=0):
if fake_priors is None:
fake_priors = np.ones((go.N ** 2) + 1) / (go.N ** 2 + 1)
self.fake_priors = fake_priors
self.fake_value = fake_value
def run(self, position):
return self.fake_priors, self.fake_value
def run_many(self, positions):
if not positions:
raise ValueError(
"No positions passed! (Tensorflow would have failed here.")
return [self.fake_priors] * len(positions), [self.fake_value] * len(positions)
def initialize_basic_player():
player = MCTSPlayerMixin(DummyNet())
player.initialize_game()
first_node = player.root.select_leaf()
first_node.incorporate_results(
*player.network.run(player.root.position), up_to=player.root)
return player
def initialize_almost_done_player():
probs = np.array([.001] * (go.N * go.N + 1))
probs[2:5] = 0.2 # some legal moves along the top.
probs[-1] = 0.2 # passing is also ok
net = DummyNet(fake_priors=probs)
player = MCTSPlayerMixin(net)
# root position is white to play with no history == white passed.
player.initialize_game(SEND_TWO_RETURN_ONE)
return player
class TestMCTSPlayerMixin(test_utils.MiniGoUnitTest):
def test_time_controls(self):
secs_per_move = 5
for time_limit in (10, 100, 1000):
# in the worst case imaginable, let's say a game goes 1000 moves long
move_numbers = range(0, 1000, 2)
total_time_spent = sum(
time_recommendation(move_num, secs_per_move,
time_limit=time_limit)
for move_num in move_numbers)
# we should not exceed available game time
self.assertLess(total_time_spent, time_limit)
# but we should have used at least 95% of our time by the end.
self.assertGreater(total_time_spent, time_limit * 0.95)
def test_inject_noise(self):
player = initialize_basic_player()
sum_priors = np.sum(player.root.child_prior)
# dummyNet should return normalized priors.
self.assertAlmostEqual(sum_priors, 1)
self.assertTrue(np.all(player.root.child_U == player.root.child_U[0]))
player.root.inject_noise()
new_sum_priors = np.sum(player.root.child_prior)
# priors should still be normalized after injecting noise
self.assertAlmostEqual(sum_priors, new_sum_priors)
# With dirichelet noise, majority of density should be in one node.
max_p = np.max(player.root.child_prior)
self.assertGreater(max_p, 3/(go.N ** 2 + 1))
def test_pick_moves(self):
player = initialize_basic_player()
root = player.root
root.child_N[coords.to_flat((2, 0))] = 10
root.child_N[coords.to_flat((1, 0))] = 5
root.child_N[coords.to_flat((3, 0))] = 1
root.position.n = go.N ** 2 # move 81, or 361, or... Endgame.
# Assert we're picking deterministically
self.assertTrue(root.position.n > player.temp_threshold)
move = player.pick_move()
self.assertEqual(move, (2, 0))
# But if we're in the early part of the game, pick randomly
root.position.n = 3
self.assertFalse(player.root.position.n > player.temp_threshold)
with mock.patch('random.random', lambda: .5):
move = player.pick_move()
self.assertEqual(move, (2, 0))
with mock.patch('random.random', lambda: .99):
move = player.pick_move()
self.assertEqual(move, (3, 0))
def test_dont_pass_if_losing(self):
player = initialize_almost_done_player()
# check -- white is losing.
self.assertEqual(player.root.position.score(), -0.5)
for i in range(20):
player.tree_search()
# uncomment to debug this test
# print(player.root.describe())
# Search should converge on D9 as only winning move.
flattened = coords.to_flat(coords.from_kgs('D9'))
best_move = np.argmax(player.root.child_N)
self.assertEqual(best_move, flattened)
# D9 should have a positive value
self.assertGreater(player.root.children[flattened].Q, 0)
self.assertGreaterEqual(player.root.N, 20)
# passing should be ineffective.
self.assertLess(player.root.child_Q[-1], 0)
# no virtual losses should be pending
self.assertNoPendingVirtualLosses(player.root)
# uncomment to debug this test
# print(player.root.describe())
def test_parallel_tree_search(self):
player = initialize_almost_done_player()
# check -- white is losing.
self.assertEqual(player.root.position.score(), -0.5)
# initialize the tree so that the root node has populated children.
player.tree_search(num_parallel=1)
# virtual losses should enable multiple searches to happen simultaneously
# without throwing an error...
for i in range(5):
player.tree_search(num_parallel=4)
# uncomment to debug this test
# print(player.root.describe())
# Search should converge on D9 as only winning move.
flattened = coords.to_flat(coords.from_kgs('D9'))
best_move = np.argmax(player.root.child_N)
self.assertEqual(best_move, flattened)
# D9 should have a positive value
self.assertGreater(player.root.children[flattened].Q, 0)
self.assertGreaterEqual(player.root.N, 20)
# passing should be ineffective.
self.assertLess(player.root.child_Q[-1], 0)
# no virtual losses should be pending
self.assertNoPendingVirtualLosses(player.root)
def test_ridiculously_parallel_tree_search(self):
player = initialize_almost_done_player()
# Test that an almost complete game
# will tree search with # parallelism > # legal moves.
for i in range(10):
player.tree_search(num_parallel=50)
self.assertNoPendingVirtualLosses(player.root)
def test_long_game_tree_search(self):
player = MCTSPlayerMixin(DummyNet())
endgame = go.Position(
board=TT_FTW_BOARD,
n=MAX_DEPTH-2,
komi=2.5,
ko=None,
recent=(go.PlayerMove(go.BLACK, (0, 1)),
go.PlayerMove(go.WHITE, (0, 8))),
to_play=go.BLACK
)
player.initialize_game(endgame)
# Test that an almost complete game
for i in range(10):
player.tree_search(num_parallel=8)
self.assertNoPendingVirtualLosses(player.root)
self.assertGreater(player.root.Q, 0)
def test_cold_start_parallel_tree_search(self):
# Test that parallel tree search doesn't trip on an empty tree
player = MCTSPlayerMixin(DummyNet(fake_value=0.17))
player.initialize_game()
self.assertEqual(player.root.N, 0)
self.assertFalse(player.root.is_expanded)
player.tree_search(num_parallel=4)
self.assertNoPendingVirtualLosses(player.root)
# Even though the root gets selected 4 times by tree search, its
# final visit count should just be 1.
self.assertEqual(player.root.N, 1)
# 0.085 = average(0, 0.17), since 0 is the prior on the root.
self.assertAlmostEqual(player.root.Q, 0.085)
def test_tree_search_failsafe(self):
# Test that the failsafe works correctly. It can trigger if the MCTS
# repeatedly visits a finished game state.
probs = np.array([.001] * (go.N * go.N + 1))
probs[-1] = 1 # Make the dummy net always want to pass
player = MCTSPlayerMixin(DummyNet(fake_priors=probs))
pass_position = go.Position().pass_move()
player.initialize_game(pass_position)
player.tree_search(num_parallel=1)
self.assertNoPendingVirtualLosses(player.root)
def test_only_check_game_end_once(self):
# When presented with a situation where the last move was a pass,
# and we have to decide whether to pass, it should be the first thing
# we check, but not more than that.
white_passed_pos = go.Position(
).play_move((3, 3) # b plays
).play_move((3, 4) # w plays
).play_move((4, 3) # b plays
).pass_move() # w passes - if B passes too, B would lose by komi.
player = MCTSPlayerMixin(DummyNet())
player.initialize_game(white_passed_pos)
# initialize the root
player.tree_search()
# explore a child - should be a pass move.
player.tree_search()
pass_move = go.N * go.N
self.assertEqual(player.root.children[pass_move].N, 1)
self.assertEqual(player.root.child_N[pass_move], 1)
player.tree_search()
# check that we didn't visit the pass node any more times.
self.assertEqual(player.root.child_N[pass_move], 1)
def test_extract_data_normal_end(self):
player = MCTSPlayerMixin(DummyNet())
player.initialize_game()
player.tree_search()
player.play_move(None)
player.tree_search()
player.play_move(None)
self.assertTrue(player.root.is_done())
player.set_result(player.root.position.result(), was_resign=False)
data = list(player.extract_data())
self.assertEqual(len(data), 2)
position, pi, result = data[0]
# White wins by komi
self.assertEqual(result, go.WHITE)
self.assertEqual(player.result_string, "W+{}".format(player.root.position.komi))
def test_extract_data_resign_end(self):
player = MCTSPlayerMixin(DummyNet())
player.initialize_game()
player.tree_search()
player.play_move((0, 0))
player.tree_search()
player.play_move(None)
player.tree_search()
# Black is winning on the board
self.assertEqual(player.root.position.result(), go.BLACK)
# But if Black resigns
player.set_result(go.WHITE, was_resign=True)
data = list(player.extract_data())
position, pi, result = data[0]
# Result should say White is the winner
self.assertEqual(result, go.WHITE)
self.assertEqual(player.result_string, "W+R")
|
|
"""
A collection of functions to simplfy common VSX operations.
"""
import logging
import os
import re
from otto.lib.otypes import ReturnCode
from otto.appliances.vsx import Vsx
from otto.lib.decorators import wait_until
instance = os.environ.get('instance') or ''
logger = logging.getLogger('otto' + instance + '.lib')
logger.addHandler(logging.NullHandler())
def pv_is_empty(vsx, pv=None): # TODO: use pv to filter results
"""
Check if most of the metadata is empty and that extents are
correctly accounted for. pv parameter is ignored for now
"""
if not isinstance(vsx, Vsx):
e = ReturnCode(False)
e.message = "object is not a Vsx instance"
return e
total = 0
free = 0
dirty = 0
meta = 0
sh = vsx.shelf
ret = vsx.pvs
if not ret.status:
return ret
p = ret.message
ret = ReturnCode(True)
used = calculate_metaext(total, 4096)
if used != dirty:
e = "pv has too many dirty extents, should be %d on VSX %s:\n%s" % (used, sh, p)
logger.error(e)
ret.status = False
ret.message = e
if meta:
e = "meta extents not zero on VSX %s:\n%s" % (sh, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
if total != free + dirty:
e = "pv accounting error on VSX %s:\n%s" % (sh, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
return ret
def pool_is_empty(vsx, pool):
"""
Verifypool makes sure that the pool has no extents allocated.
"""
if not isinstance(vsx, Vsx):
e = ReturnCode(False)
e.message = "object is not a Vsx instance"
return e
total = 0
free = 0
unique = 0
shelf = vsx.shelf
ret = vsx.run_and_check("pools -a %s" % pool) # TODO: teach this that pools has -a
if not ret.status:
return ret
p = ret.message
ret = ReturnCode(True)
m = re.search(r"Total[ \t]+Exts[ \t]+:[ \t]+([0-9]+)[ \t]+", p)
if m:
total = int(m.group(1))
m = re.search(r"Free[ \t]+Exts[ \t]+:[ \t]+([0-9]+)[ \t]+", p)
if m:
free = int(m.group(1))
m = re.search(r"Unique[ \t]+Exts[ \t]+:[ \t]+([0-9]+)[ \t]+", p)
if m:
unique = int(m.group(1))
if total:
e = "Empty pool %s on %s has %d total extents:\n%s" % (pool, shelf, total, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
if free:
e = "Empty pool %s on %s has %d free extents:\n%s" % (pool, shelf, free, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
if unique:
e = "Empty pool %s on %s has %d unique extents:\n%s" % (pool, shelf, unique, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
m = re.search(r"PVs[ \t]+:[ \t]+[0-9]+\.[0-9]+", p)
if m:
e = "Empty pool %s on %s has PVs:\n%s" % (pool, shelf, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
m = re.search(r"LVs[ \t]+:[ \t]+(.*)[ \t]+", p)
if m:
e = "Empty pool %s on %s has LVs:\n%s" % (pool, shelf, p)
logger.error(e)
ret.status = False
ret.message += "\n%s" % e
return ret
def lv_is_empty(vsx, lv):
"""
Verify there are no snap extents left on this LV.
"""
su = get_snap_used(vsx, lv)
r = ReturnCode(False)
if su == "0.000":
r.status = True
else:
r.message = "Stray snap extents on LV %s: %s" % (lv, su)
logger.error(r.message)
return r
def get_snap_used(vsx, lv):
"""
Get space used by an LV's snapshots
returns a string representing GB
"""
helptxt = vsx.run("help")
if helptxt.find("snaplimit") == -1:
return "0.000" # not implemented in this release of the VSX
nse = vsx.snaplimit([lv])
if not len(nse):
return "0.000" # no snaps
used = nse[lv].get('used')
return used
def pv_is_mirrored(vsx, pv):
"""
Check whether or not a pv is mirrored and
is done silvering.
"""
if not isinstance(vsx, Vsx):
e = ReturnCode(False)
e.message = "Object is not a Vsx instance"
return e
pv = vsx.pvs.get(pv)
if not pv:
e = ReturnCode(False)
e.message = "pv not found"
return e
if pv.get('stat') != 'mirrored':
e = ReturnCode(False)
e.message = "pv not mirrored"
return e
pool = pv.get('pool')
pv = pv.get('pv')
tprompt = vsx.prompt
vsx.prompt = 'VSX EXPERTMODE# '
vsx.run_and_check('/expertmode')
cmd = "ls /n/xlate/pool/%s/pv/" % pool
ls = vsx.run_and_check(cmd)
if not ls:
return ls
ret = ReturnCode(False)
files = ls.message.split('\r\n')
for fname in files:
status = vsx.run_and_check("cat %s/status" % fname)
fields = status.message.split()
if fields[0] == "single" and fields[5] == pv:
ret = ReturnCode(False, fields[0])
break
elif fields[0] == "mirrored" and fields[5] == pv:
ret = ReturnCode(True, fields[0])
break
else:
ret.message = str(fields)
vsx.prompt = tprompt
vsx.run("exit")
return ret
@wait_until()
def wait_pv_is_mirrored(vsx, pv):
ret = pv_is_mirrored(vsx, pv)
return ret
def pvs_available(vsx, pvs):
"""
Verify that all pvs passed in are found in aoestat.
assuming::
pvs = ['33.3', '35.0', '35.2', '35.3'] and
>>> v.aoestat.keys()
['33.3', '35.2', '35.3', '35.0']
>>> pvs_available(v, pvs)
True
>>> pvs.append('44.8')
>>> pvs_available(v, pvs)
False
"""
vsx.aoeflush()
vsx.aoediscover()
targets = set(vsx.aoestat.keys())
if type(pvs) == str:
pvs = {pvs}
if type(pvs) == list:
pvs = set(pvs)
return pvs.issubset(targets)
@wait_until()
def wait_pv_available(vsx, pv):
ret = pvs_available(vsx, pv)
return ret
def calculate_metaext(total, perblk):
"""
Given number of total extents and per block, return estimated meta extents.
Taken from metaextents() in xlate source
"""
total = total + perblk - 1
total = total / perblk
total *= 8192
total += 8192
total = total + 4194304 - 1
return total / 4194304
|
|
"""Demo platform for the cover component."""
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverDevice,
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo covers."""
add_entities(
[
DemoCover(hass, "Kitchen Window"),
DemoCover(hass, "Hall Window", 10),
DemoCover(hass, "Living Room Window", 70, 50),
DemoCover(
hass,
"Garage Door",
device_class="garage",
supported_features=(SUPPORT_OPEN | SUPPORT_CLOSE),
),
]
)
class DemoCover(CoverDevice):
"""Representation of a demo cover."""
def __init__(
self,
hass,
name,
position=None,
tilt_position=None,
device_class=None,
supported_features=None,
):
"""Initialize the cover."""
self.hass = hass
self._name = name
self._position = position
self._device_class = device_class
self._supported_features = supported_features
self._set_position = None
self._set_tilt_position = None
self._tilt_position = tilt_position
self._requested_closing = True
self._requested_closing_tilt = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
self._is_opening = False
self._is_closing = False
if position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def supported_features(self):
"""Flag supported features."""
if self._supported_features is not None:
return self._supported_features
return super().supported_features
def close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
if self._position is None:
self._closed = True
self.schedule_update_ha_state()
return
self._is_closing = True
self._listen_cover()
self._requested_closing = True
self.schedule_update_ha_state()
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if self._tilt_position in (0, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = True
def open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100:
return
if self._position is None:
self._closed = False
self.schedule_update_ha_state()
return
self._is_opening = True
self._listen_cover()
self._requested_closing = False
self.schedule_update_ha_state()
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if self._tilt_position in (100, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = False
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
self._set_position = round(position, -1)
if self._position == position:
return
self._listen_cover()
self._requested_closing = position < self._position
def set_cover_tilt_position(self, **kwargs):
"""Move the cover til to a specific position."""
tilt_position = kwargs.get(ATTR_TILT_POSITION)
self._set_tilt_position = round(tilt_position, -1)
if self._tilt_position == tilt_position:
return
self._listen_cover_tilt()
self._requested_closing_tilt = tilt_position < self._tilt_position
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._is_closing = False
self._is_opening = False
if self._position is None:
return
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
self._set_position = None
def stop_cover_tilt(self, **kwargs):
"""Stop the cover tilt."""
if self._tilt_position is None:
return
if self._unsub_listener_cover_tilt is not None:
self._unsub_listener_cover_tilt()
self._unsub_listener_cover_tilt = None
self._set_tilt_position = None
def _listen_cover(self):
"""Listen for changes in cover."""
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._time_changed_cover
)
def _time_changed_cover(self, now):
"""Track time changes."""
if self._requested_closing:
self._position -= 10
else:
self._position += 10
if self._position in (100, 0, self._set_position):
self.stop_cover()
self._closed = self.current_cover_position <= 0
self.schedule_update_ha_state()
def _listen_cover_tilt(self):
"""Listen for changes in cover tilt."""
if self._unsub_listener_cover_tilt is None:
self._unsub_listener_cover_tilt = track_utc_time_change(
self.hass, self._time_changed_cover_tilt
)
def _time_changed_cover_tilt(self, now):
"""Track time changes."""
if self._requested_closing_tilt:
self._tilt_position -= 10
else:
self._tilt_position += 10
if self._tilt_position in (100, 0, self._set_tilt_position):
self.stop_cover_tilt()
self.schedule_update_ha_state()
|
|
"""
Pygam utilities
"""
from __future__ import division
from copy import deepcopy
import numbers
import sys
import warnings
import scipy as sp
from scipy import sparse
import numpy as np
from numpy.linalg import LinAlgError
try:
from sksparse.cholmod import cholesky as spcholesky
from sksparse.test_cholmod import CholmodNotPositiveDefiniteError
SKSPIMPORT = True
except ImportError:
SKSPIMPORT = False
class NotPositiveDefiniteError(ValueError):
"""Exception class to raise if a matrix is not positive definite
"""
class OptimizationError(ValueError):
"""Exception class to raise if PIRLS optimization fails
"""
def cholesky(A, sparse=True, verbose=True):
"""
Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings
"""
if SKSPIMPORT:
A = sp.sparse.csc_matrix(A)
try:
F = spcholesky(A)
# permutation matrix P
P = sp.sparse.lil_matrix(A.shape)
p = F.P()
P[np.arange(len(p)), p] = 1
# permute
L = F.L()
L = P.T.dot(L)
except CholmodNotPositiveDefiniteError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return L.T # upper triangular factorization
return L.T.A # upper triangular factorization
else:
msg = 'Could not import Scikit-Sparse or Suite-Sparse.\n'\
'This will slow down optimization for models with '\
'monotonicity/convexity penalties and many splines.\n'\
'See installation instructions for installing '\
'Scikit-Sparse and Suite-Sparse via Conda.'
if verbose:
warnings.warn(msg)
if sp.sparse.issparse(A):
A = A.A
try:
L = sp.linalg.cholesky(A, lower=False)
except LinAlgError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return sp.sparse.csc_matrix(L)
return L
def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. '\
'Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array
def check_array(array, force_2d=False, n_feats=None, ndim=None,
min_samples=1, name='Input data', verbose=True):
"""
tool to perform basic data validation.
called by check_X and check_y.
ensures that data:
- is ndim dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- is finite
Parameters
----------
array : array-like
force_2d : boolean, default: False
whether to force a 2d array. Setting to True forces ndim = 2
n_feats : int, default: None
represents number of features that the array should have.
not enforced if n_feats is None.
ndim : int default: None
number of dimensions expected in the array
min_samples : int, default: 1
name : str, default: 'Input data'
name to use when referring to the array
verbose : bool, default: True
whether to print warnings
Returns
-------
array : validated array
"""
# make array
if force_2d:
array = make_2d(array, verbose=verbose)
ndim = 2
else:
array = np.array(array)
# cast to float
dtype = array.dtype
if dtype.kind not in ['i', 'f']:
try:
array = array.astype('float')
except ValueError as e:
raise ValueError('{} must be type int or float, '\
'but found type: {}\n'\
'Try transforming data with a LabelEncoder first.'\
.format(name, dtype.type))
# check finite
if not(np.isfinite(array).all()):
raise ValueError('{} must not contain Inf nor NaN'.format(name))
# check ndim
if ndim is not None:
if array.ndim != ndim:
raise ValueError('{} must have {} dimensions. '\
'found shape {}'.format(name, ndim, array.shape))
# check n_feats
if n_feats is not None:
m = array.shape[1]
if m != n_feats:
raise ValueError('{} must have {} features, '\
'but found {}'.format(name, n_feats, m))
# minimum samples
n = array.shape[0]
if n < min_samples:
raise ValueError('{} should have at least {} samples, '\
'but found {}'.format(name, min_samples, n))
return array
def check_y(y, link, dist, min_samples=1, verbose=True):
"""
tool to ensure that the targets:
- are in the domain of the link function
- are numerical
- have at least min_samples
- is finite
Parameters
----------
y : array-like
link : Link object
dist : Distribution object
min_samples : int, default: 1
verbose : bool, default: True
whether to print warnings
Returns
-------
y : array containing validated y-data
"""
y = np.ravel(y)
y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1,
name='y data', verbose=verbose)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if np.any(np.isnan(link.link(y, dist))):
raise ValueError('y data is not in domain of {} link function. ' \
'Expected domain: {}, but found {}' \
.format(link, get_link_domain(link, dist),
[float('%.2f'%np.min(y)),
float('%.2f'%np.max(y))]))
return y
def check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None,
features=None, verbose=True):
"""
tool to ensure that X:
- is 2 dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- has categorical features in the right range
- is finite
Parameters
----------
X : array-like
n_feats : int. default: None
represents number of features that X should have.
not enforced if n_feats is None.
min_samples : int, default: 1
edge_knots : list of arrays, default: None
dtypes : list of strings, default: None
features : list of ints,
which features are considered by the model
verbose : bool, default: True
whether to print warnings
Returns
-------
X : array with ndims == 2 containing validated X-data
"""
# check all features are there
if bool(features):
features = flatten(features)
max_feat = max(flatten(features))
if n_feats is None:
n_feats = max_feat
n_feats = max(n_feats, max_feat)
# basic diagnostics
X = check_array(X, force_2d=True, n_feats=n_feats, min_samples=min_samples,
name='X data', verbose=verbose)
# check our categorical data has no new categories
if (edge_knots is not None) and (dtypes is not None) and (features is not None):
# get a flattened list of tuples
edge_knots = flatten(edge_knots)[::-1]
dtypes = flatten(dtypes)
assert len(edge_knots) % 2 == 0 # sanity check
# form pairs
n = len(edge_knots) // 2
edge_knots = [(edge_knots.pop(), edge_knots.pop()) for _ in range(n)]
# check each categorical term
for i, ek in enumerate(edge_knots):
dt = dtypes[i]
feature = features[i]
x = X[:, feature]
if dt == 'categorical':
min_ = ek[0]
max_ = ek[-1]
if (np.unique(x) < min_).any() or \
(np.unique(x) > max_).any():
min_ += .5
max_ -= 0.5
raise ValueError('X data is out of domain for categorical '\
'feature {}. Expected data on [{}, {}], '\
'but found data on [{}, {}]'\
.format(i, min_, max_, x.min(), x.max()))
return X
def check_X_y(X, y):
"""
tool to ensure input and output data have the same number of samples
Parameters
----------
X : array-like
y : array-like
Returns
-------
None
"""
if len(X) != len(y):
raise ValueError('Inconsistent input and output data shapes. '\
'found X: {} and y: {}'.format(X.shape, y.shape))
def check_lengths(*arrays):
"""
tool to ensure input and output data have the same number of samples
Parameters
----------
*arrays : iterable of arrays to be checked
Returns
-------
None
"""
lengths = [len(array) for array in arrays]
if len(np.unique(lengths)) > 1:
raise ValueError('Inconsistent data lengths: {}'.format(lengths))
def check_param(param, param_name, dtype, constraint=None, iterable=True,
max_depth=2):
"""
checks the dtype of a parameter,
and whether it satisfies a numerical contraint
Parameters
---------
param : object
param_name : str, name of the parameter
dtype : str, desired dtype of the parameter
contraint : str, default: None
numerical constraint of the parameter.
if None, no constraint is enforced
iterable : bool, default: True
whether to allow iterable param
max_depth : int, default: 2
maximum nesting of the iterable.
only used if iterable == True
Returns
-------
list of validated and converted parameter(s)
"""
msg = []
msg.append(param_name + " must be "+ dtype)
if iterable:
msg.append(" or nested iterable of depth " + str(max_depth) +
" containing " + dtype + "s")
msg.append(", but found " + param_name + " = {}".format(repr(param)))
if constraint is not None:
msg = (" " + constraint).join(msg)
else:
msg = ''.join(msg)
# check param is numerical
try:
param_dt = np.array(flatten(param))# + np.zeros_like(flatten(param), dtype='int')
# param_dt = np.array(param).astype(dtype)
except (ValueError, TypeError):
raise TypeError(msg)
# check iterable
if iterable:
if check_iterable_depth(param) > max_depth:
raise TypeError(msg)
if (not iterable) and isiterable(param):
raise TypeError(msg)
# check param is correct dtype
if not (param_dt == np.array(flatten(param)).astype(float)).all():
raise TypeError(msg)
# check constraint
if constraint is not None:
if not (eval('np.' + repr(param_dt) + constraint)).all():
raise ValueError(msg)
return param
def get_link_domain(link, dist):
"""
tool to identify the domain of a given monotonic link function
Parameters
----------
link : Link object
dist : Distribution object
Returns
-------
domain : list of length 2, representing the interval of the domain.
"""
domain = np.array([-np.inf, -1, 0, 1, np.inf])
domain = domain[~np.isnan(link.link(domain, dist))]
return [domain[0], domain[-1]]
def load_diagonal(cov, load=None):
"""Return the given square matrix with a small amount added to the diagonal
to make it positive semi-definite.
"""
n, m = cov.shape
assert n == m, "matrix must be square, but found shape {}".format((n, m))
if load is None:
load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon
return cov + np.eye(n) * load
def round_to_n_decimal_places(array, n=3):
"""
tool to keep round a float to n decimal places.
n=3 by default
Parameters
----------
array : np.array
n : int. number of decimal places to keep
Returns
-------
array : rounded np.array
"""
# check if in scientific notation
if issubclass(array.__class__, float) and '%.e'%array == str(array):
return array # do nothing
shape = np.shape(array)
out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n))
return out.reshape(shape)
# Credit to Hugh Bothwell from http://stackoverflow.com/questions/5084743/how-to-print-pretty-string-output-in-python
class TablePrinter(object):
"Print a list of dicts as a table"
def __init__(self, fmt, sep=' ', ul=None):
"""
@param fmt: list of tuple(heading, key, width)
heading: str, column label
key: dictionary key to value to print
width: int, column width in chars
@param sep: string, separation between columns
@param ul: string, character to underline column label, or None for no underlining
"""
super(TablePrinter,self).__init__()
self.fmt = str(sep).join('{lb}{0}:{1}{rb}'.format(key, width, lb='{', rb='}') for heading,key,width in fmt)
self.head = {key:heading for heading,key,width in fmt}
self.ul = {key:str(ul)*width for heading,key,width in fmt} if ul else None
self.width = {key:width for heading,key,width in fmt}
def row(self, data):
if sys.version_info < (3,):
return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.iteritems() })
else:
return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.items() })
def __call__(self, dataList):
_r = self.row
res = [_r(data) for data in dataList]
res.insert(0, _r(self.head))
if self.ul:
res.insert(1, _r(self.ul))
return '\n'.join(res)
def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right
def sig_code(p_value):
"""create a significance code in the style of R's lm
Arguments
---------
p_value : float on [0, 1]
Returns
-------
str
"""
assert 0 <= p_value <= 1, 'p_value must be on [0, 1]'
if p_value < 0.001:
return '***'
if p_value < 0.01:
return '**'
if p_value < 0.05:
return '*'
if p_value < 0.1:
return '.'
return ' '
def gen_edge_knots(data, dtype, verbose=True):
"""
generate uniform knots from data including the edges of the data
for discrete data, assumes k categories in [0, k-1] interval
Parameters
----------
data : array-like with one dimension
dtype : str in {'categorical', 'numerical'}
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array containing ordered knots
"""
if dtype not in ['categorical', 'numerical']:
raise ValueError('unsupported dtype: {}'.format(dtype))
if dtype == 'categorical':
return np.r_[np.min(data) - 0.5, np.max(data) + 0.5]
else:
knots = np.r_[np.min(data), np.max(data)]
if knots[0] == knots[1] and verbose:
warnings.warn('Data contains constant feature. '\
'Consider removing and setting fit_intercept=True',
stacklevel=2)
return knots
def b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True,
periodic=True, verbose=True):
"""
tool to generate b-spline basis using vectorized De Boor recursion
the basis functions extrapolate linearly past the end-knots.
Parameters
----------
x : array-like, with ndims == 1.
edge_knots : array-like contaning locations of the 2 edge knots.
n_splines : int. number of splines to generate. must be >= spline_order+1
default: 20
spline_order : int. order of spline basis to create
default: 3
sparse : boolean. whether to return a sparse basis matrix or not.
default: True
periodic: bool, default: True
whether to repeat basis functions (True) or linearly extrapolate (False).
verbose : bool, default: True
whether to print warnings
Returns
-------
basis : sparse csc matrix or array containing b-spline basis functions
with shape (len(x), n_splines)
"""
if np.ravel(x).ndim != 1:
raise ValueError('Data must be 1-D, but found {}'\
.format(np.ravel(x).ndim))
if (n_splines < 1) or not isinstance(n_splines, numbers.Integral):
raise ValueError('n_splines must be int >= 1')
if (spline_order < 0) or not isinstance(spline_order, numbers.Integral):
raise ValueError('spline_order must be int >= 1')
if n_splines < spline_order + 1:
raise ValueError('n_splines must be >= spline_order + 1. '\
'found: n_splines = {} and spline_order = {}'\
.format(n_splines, spline_order))
if n_splines == 0 and verbose:
warnings.warn('Requested 1 spline. This is equivalent to '\
'fitting an intercept', stacklevel=2)
n_splines += spline_order * periodic
# rescale edge_knots to [0,1], and generate boundary knots
edge_knots = np.sort(deepcopy(edge_knots))
offset = edge_knots[0]
scale = edge_knots[-1] - edge_knots[0]
if scale == 0:
scale = 1
boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order)
diff = np.diff(boundary_knots[:2])[0]
# rescale x as well
x = (np.ravel(deepcopy(x)) - offset) / scale
# wrap periodic values
if periodic:
x = x % (1 + 1e-9)
# append 0 and 1 in order to get derivatives for extrapolation
x = np.r_[x, 0., 1.]
# determine extrapolation indices
x_extrapolte_l = (x < 0)
x_extrapolte_r = (x > 1)
x_interpolate = ~(x_extrapolte_r + x_extrapolte_l)
# formatting
x = np.atleast_2d(x).T
n = len(x)
# augment knots
aug = np.arange(1, spline_order + 1) * diff
aug_knots = np.r_[-aug[::-1],
boundary_knots,
1 + aug]
aug_knots[-1] += 1e-9 # want last knot inclusive
# prepare Haar Basis
bases = (x >= aug_knots[:-1]).astype(np.int) * \
(x < aug_knots[1:]).astype(np.int)
bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1
# do recursion from Hastie et al. vectorized
maxi = len(aug_knots) - 1
for m in range(2, spline_order + 2):
maxi -= 1
# left sub-basis
num = (x - aug_knots[:maxi])
num *= bases[:, :maxi]
denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi]
left = num/denom
# right sub-basis
num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1]
denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1]
right = num/denom
# track previous bases and update
prev_bases = bases[-2:]
bases = left + right
if periodic and spline_order > 0:
# make spline domain periodic
bases[:, :spline_order] = np.max([bases[:, :spline_order],
bases[:, -spline_order:]],
axis=0)
# remove extra splines used only for ensuring correct domain
bases = bases[:, :-spline_order]
# extrapolate
# since we have repeated end-knots, only the last 2 basis functions are
# non-zero at the end-knots, and they have equal and opposite gradient.
if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0:
bases[~x_interpolate] = 0.
denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1])
left = prev_bases[:, :-1] / denom
denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order])
right = prev_bases[:, 1:] / denom
grads = (spline_order) * (left - right)
if any(x_extrapolte_l):
val = grads[0] * x[x_extrapolte_l] + bases[-2]
bases[x_extrapolte_l] = val
if any(x_extrapolte_r):
val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1]
bases[x_extrapolte_r] = val
# get rid of the added values at 0, and 1
bases = bases[:-2]
if sparse:
return sp.sparse.csc_matrix(bases)
return bases
def ylogydu(y, u):
"""
tool to give desired output for the limit as y -> 0, which is 0
Parameters
----------
y : array-like of len(n)
u : array-like of len(n)
Returns
-------
np.array len(n)
"""
mask = (np.atleast_1d(y)!=0.)
out = np.zeros_like(u)
out[mask] = y[mask] * np.log(y[mask] / u[mask])
return out
def combine(*args):
"""
tool to perform tree search via recursion
useful for developing the grid in a grid search
Parameters
----------
args : list of lists
Returns
-------
list of all the combinations of the elements in the input lists
"""
if hasattr(args, '__iter__') and (len(args) > 1):
subtree = combine(*args[:-1])
tree = []
for leaf in subtree:
for node in args[-1]:
if hasattr(leaf, '__iter__'):
tree.append(leaf + [node])
else:
tree.append([leaf] + [node])
return tree
else:
return [[arg] for arg in args[0]]
def isiterable(obj, reject_string=True):
"""convenience tool to detect if something is iterable.
in python3, strings count as iterables to we have the option to exclude them
Parameters:
-----------
obj : object to analyse
reject_string : bool, whether to ignore strings
Returns:
--------
bool, if the object is itereable.
"""
iterable = hasattr(obj, '__len__')
if reject_string:
iterable = iterable and not isinstance(obj, str)
return iterable
def check_iterable_depth(obj, max_depth=100):
"""find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
"""
def find_iterables(obj):
iterables = []
for item in obj:
if isiterable(item):
iterables += list(item)
return iterables
depth = 0
while (depth < max_depth) and isiterable(obj) and len(obj) > 0:
depth += 1
obj = find_iterables(obj)
return depth
def flatten(iterable):
"""convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object
"""
if isiterable(iterable):
flat = []
for item in list(iterable):
item = flatten(item)
if not isiterable(item):
item = [item]
flat += item
return flat
else:
return iterable
def tensor_product(a, b, reshape=True):
"""
compute the tensor protuct of two matrices a and b
if a is (n, m_a), b is (n, m_b),
then the result is
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
Parameters
---------
a : array-like of shape (n, m_a)
b : array-like of shape (n, m_b)
reshape : bool, default True
whether to reshape the result to be 2-dimensional ie
(n, m_a * m_b)
or return a 3-dimensional tensor ie
(n, m_a, m_b)
Returns
-------
dense np.ndarray of shape
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
"""
assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim)
assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim)
na, ma = a.shape
nb, mb = b.shape
if na != nb:
raise ValueError('both arguments must have the same number of samples')
if sp.sparse.issparse(a):
a = a.A
if sp.sparse.issparse(b):
b = b.A
tensor = a[..., :, None] * b[..., None, :]
if reshape:
return tensor.reshape(na, ma * mb)
return tensor
|
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from tcp import TCPCollector
################################################################################
class TestTCPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('TCPCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = TCPCollector(config, None)
def test_import(self):
self.assertTrue(TCPCollector)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_open_proc_net_netstat(self, publish_mock, open_mock):
TCPCollector.PROC = ['/proc/net/netstat']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/netstat')
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_work_with_synthetic_data(self, publish_mock, open_mock):
TCPCollector.PROC = ['/proc/net/netstat']
self.setUp(['A', 'C'])
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 0 0
'''.strip())
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 1 2
'''.strip())
self.collector.collect()
self.assertEqual(len(publish_mock.call_args_list), 2)
metrics = {
'A': 0,
'C': 2,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp(['ListenOverflows', 'ListenDrops', 'TCPLoss', 'TCPTimeouts'])
TCPCollector.PROC = [self.getFixturePath('proc_net_netstat_1')]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
TCPCollector.PROC = [self.getFixturePath('proc_net_netstat_2')]
self.collector.collect()
metrics = {
'ListenOverflows': 0,
'ListenDrops': 0,
'TCPLoss': 188,
'TCPTimeouts': 15265
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_all_data(self, publish_mock):
self.setUp([])
TCPCollector.PROC = [
self.getFixturePath('proc_net_netstat_1'),
self.getFixturePath('proc_net_snmp_1'),
]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
TCPCollector.PROC = [
self.getFixturePath('proc_net_netstat_2'),
self.getFixturePath('proc_net_snmp_2'),
]
self.collector.collect()
metrics = {
'TCPMD5Unexpected': 0.0,
'ArpFilter': 0.0,
'TCPBacklogDrop': 0.0,
'TCPDSACKRecv': 1580.0,
'TCPDSACKIgnoredOld': 292.0,
'MaxConn': (-1.0),
'RcvPruned': 0.0,
'TCPSackMerged': 1121.0,
'OutOfWindowIcmps': 10.0,
'TCPDeferAcceptDrop': 0.0,
'TCPLossUndo': 6538.0,
'TCPHPHitsToUser': 5667.0,
'TCPTimeouts': 15265.0,
'TCPForwardRetrans': 41.0,
'TCPTSReorder': 0.0,
'RtoMin': 0.0,
'TCPAbortOnData': 143.0,
'TCPFullUndo': 0.0,
'TCPSackRecoveryFail': 13.0,
'InErrs': 0.0,
'TCPAbortOnClose': 38916.0,
'TCPAbortOnTimeout': 68.0,
'TCPFACKReorder': 0.0,
'LockDroppedIcmps': 4.0,
'RtoMax': 0.0,
'TCPSchedulerFailed': 0.0,
'EstabResets': 0.0,
'DelayedACKs': 125491.0,
'TCPSACKReneging': 0.0,
'PruneCalled': 0.0,
'OutRsts': 0.0,
'TCPRenoRecoveryFail': 0.0,
'TCPSackShifted': 2356.0,
'DelayedACKLocked': 144.0,
'TCPHPHits': 10361792.0,
'EmbryonicRsts': 0.0,
'TCPLossFailures': 7.0,
'TWKilled': 0.0,
'TCPSACKDiscard': 0.0,
'TCPAbortFailed': 0.0,
'TCPSackRecovery': 364.0,
'TCPDirectCopyFromBacklog': 35660.0,
'TCPFastRetrans': 1184.0,
'TCPPartialUndo': 0.0,
'TCPMinTTLDrop': 0.0,
'SyncookiesSent': 0.0,
'OutSegs': 0.0,
'TCPSackShiftFallback': 3091.0,
'RetransSegs': 0.0,
'IPReversePathFilter': 0.0,
'TCPRcvCollapsed': 0.0,
'TCPDSACKUndo': 2448.0,
'SyncookiesFailed': 9.0,
'TCPSACKReorder': 0.0,
'TCPDSACKOldSent': 10175.0,
'TCPAbortOnLinger': 0.0,
'TCPSpuriousRTOs': 9.0,
'TCPRenoRecovery': 0.0,
'TCPPrequeued': 114232.0,
'TCPLostRetransmit': 7.0,
'TCPLoss': 188.0,
'TCPHPAcks': 12673896.0,
'TCPDSACKOfoRecv': 0.0,
'TWRecycled': 0.0,
'TCPRenoFailures': 0.0,
'OfoPruned': 0.0,
'TCPMD5NotFound': 0.0,
'ActiveOpens': 0.0,
'TCPDSACKIgnoredNoUndo': 1025.0,
'TCPPrequeueDropped': 0.0,
'RtoAlgorithm': 0.0,
'TCPAbortOnMemory': 0.0,
'TCPTimeWaitOverflow': 0.0,
'TCPAbortOnSyn': 0.0,
'TCPDirectCopyFromPrequeue': 19340531.0,
'DelayedACKLost': 10118.0,
'PassiveOpens': 0.0,
'InSegs': 1.0,
'PAWSPassive': 0.0,
'TCPRenoReorder': 0.0,
'CurrEstab': 3.0,
'TW': 89479.0,
'AttemptFails': 0.0,
'PAWSActive': 0.0,
'ListenDrops': 0.0,
'SyncookiesRecv': 0.0,
'TCPDSACKOfoSent': 0.0,
'TCPSlowStartRetrans': 2540.0,
'TCPMemoryPressures': 0.0,
'PAWSEstab': 0.0,
'TCPSackFailures': 502.0,
'ListenOverflows': 0.0,
'TCPPureAcks': 1003528.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
|
import json
import io
import pytest
import GoogleSheets
import os
from googleapiclient.discovery import build
from googleapiclient.http import HttpMock
from googleapiclient.http import HttpMockSequence
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
# HELPER FUNCTIONS
def test_parse_sheets_for_get_response():
'''
Given:
- The sheets list from the Google api response and an argument include_grid_data = True
When:
- We want to process the process and filter the sheets data given by the get_spreadsheet command
Then:
- return a filtered list of the sheets with the relevant data
'''
path = 'test_data/helper_functions/test_parse_sheets_for_get_response/'
result = util_load_json(os.path.join(path, 'res_parse.json'))
assert result == GoogleSheets.parse_sheets_for_get_response(util_load_json(path + 'sheets.json'), True)
def test_make_markdown_matrix():
'''
Given:
- The sheets after they have been processed by context_singe_get_parse
When:
- we prepare the human-readable response and include_grid_data is True
Then:
- return a Markdown table with the headers of the sheets and the data inside them
'''
path = 'test_data/helper_functions/test_make_markdown_matrix/'
with open(path + 'result.md', 'r') as file:
result = file.read()
assert GoogleSheets.make_markdown_matrix(util_load_json(os.path.join(path, 'sheets.json'))) == result
def test_prepare_result_with_echo(mocker):
'''
Given:
- a response from google api and the demisto args
When:
- when echo_spreadsheet is true, we want to prepare the command result
Then:
- return the command result
'''
mocker.patch.object(GoogleSheets, 'create_list_id_title',
return_value=[{'SheetId': 783932040, 'Sheet title': "new sheet"},
{'SheetId': 0, 'Sheet title': "Sheet1"}])
response = util_load_json('test_data/helper_functions/test_prepare_result_echo/response.json')
command_result = GoogleSheets.prepare_result(response, {"echo_spreadsheet": "true"}, "")
with open('test_data/helper_functions/test_prepare_result_echo/markdown_result.md', 'r') as file:
markdown_assert = file.read()
assert command_result.readable_output == markdown_assert
assert command_result.outputs == response
def test_prepare_result_without_echo():
'''
Given:
- a response from google api and the demisto args
When:
- when echo spreadsheet is false and we want to prepare the command result
Then:
- return the command result
'''
assert GoogleSheets.prepare_result({}, {"echo_spreadsheet": "false"}, "").readable_output == '### Successfully \n'
def test_create_list_id_title():
'''
Given:
- a sheets array from the google-sheets api response under
When:
- a command is being called with echo_spreadsheet argumeent is true
Then:
- return a proper markdown format string
'''
sheets = util_load_json('test_data/helper_functions/test_create_list_id_title/sheets.json')
assert GoogleSheets.create_list_id_title(sheets) == util_load_json(
'test_data/helper_functions/test_create_list_id_title/create_list_id_title_response.json')
handle_values_input_parametrized = [
("[1,2,3],[4,5,6]", [['1', '2', '3'], ['4', '5', '6']]),
("[1,2]", [['1', '2']]),
("[1]", [['1']]),
("[]", [['']])
]
@pytest.mark.parametrize("test_input,expected", handle_values_input_parametrized)
def test_handle_values_input(test_input, expected):
'''
Given:
- an input of values from the user in the format of [x,y,z],[1,2,3]
When:
- we want to apply one of the update function with values input
Then:
- return the values in a way suitable for the google-sheets api request
in the format of [[1,2,3],[4,5,6]...]
'''
assert GoogleSheets.handle_values_input(test_input) == expected
handle_values_input_parametrized_exception = [
("1,2,3"),
("[4,5"),
("{3,4}"),
("(1,2,3)"),
(None)
]
@pytest.mark.parametrize("test_input", handle_values_input_parametrized_exception)
def test_handle_values_input_exception(test_input):
'''
Given:
- an input of values from the user in the wrong format
When:
- we want to apply one of the update function with values input
Then:
- a ValueError exception with a message for the user
'''
with pytest.raises(ValueError) as exc_info:
GoogleSheets.handle_values_input(test_input)
assert str(exc_info.value) == 'Wrong format of values entered, please check the documentation'
def test_markdown_single_get(mocker):
'''
Given:
- a response from the google api of a get spreadsheet
When:
- we want to process the human readable for the war room
Then:
- return the markdown format
'''
path = 'test_data/helper_functions/test_markdown_single_get/'
mocker.patch.object(GoogleSheets, 'create_list_id_title', return_value=[{'SheetId': 0, 'Sheet title': 'Sheet1'}])
response = util_load_json(os.path.join(path, 'get_response.json'))
markdown = GoogleSheets.markdown_single_get(response)
with open(os.path.join(path, 'markdown_assert.md'), 'r') as file:
markdown_assert = file.read()
assert markdown == markdown_assert
grid_ranges_combinations = [(True, "Sheet1!A1:D5", "Sheet1!A1:D5"),
(True, None, "new sheet!A1:T500"),
(False, "Sheet1!A1:D5", "Sheet1!A1:D5"),
(False, None, None)]
@pytest.mark.parametrize("include_grid_data ,ranges, expected", grid_ranges_combinations)
def test_default_ranges_if_not_specified(include_grid_data, ranges, expected):
'''
Given:
- spreadsheetId, ranges, include_grid_data Google service
When:
- we want to check if include_grid_data was specified but not the ranges argument
Then:
- if include_grid_data is true and ranges not specified return default ranges else return ranges
'''
path = 'test_data/helper_functions/test_default_ranges_if_not_specified/'
http = HttpMock(os.path.join(path, 'response.json'), {'status': '200'})
api_key = 'your_api_key'
service = build('sheets', 'v4', http=http, developerKey=api_key)
res = GoogleSheets.default_ranges_if_not_specified("fake", ranges, include_grid_data, service)
assert res == expected
# CREATE SPREADSHEET TEST
def test_create_spreadsheet():
'''
Given:
- 'google-sheets-spreadsheet-create' is called to be executed with args to the api
When:
- the command is being called from main
Then:
- return a command result with the proper readable output and context outputs
'''
path = "test_data/create_spreadsheet/"
http = HttpMock(os.path.join(path, 'response.json'), {'status': '200'})
api_key = 'your_api_key'
service = build('sheets', 'v4', http=http, developerKey=api_key)
args = util_load_json(path + 'args.json')
command_result = GoogleSheets.create_spreadsheet(service, args)
with open(os.path.join(path, 'command_results_readable_output.md'), 'r') as file:
markdown_assert = file.read()
assert command_result.readable_output == markdown_assert
assert command_result.outputs == util_load_json(os.path.join(path, 'command_results_outputs.json'))
# GET SPREADSHEET TESTS
@pytest.mark.parametrize("path", ['test_data/get_spreadsheet/single_spreadsheet/',
'test_data/get_spreadsheet/single_spreadsheet_include_grid_data/'])
def test_get_single_spreadsheet(path):
'''
Given:
- 'google-sheets-spreadsheet-get' is being called to be executed with a single id in the args
When:
- the command is being called from main
Then:
- return the proper readable output and context upon failure an exception will be raised from google.
'''
http = HttpMock(path + 'response.json', {'status': '200'})
api_key = 'your_api_key'
service = build('sheets', 'v4', http=http, developerKey=api_key)
command_result = GoogleSheets.get_spreadsheet(service, util_load_json(os.path.join(path, 'args.json')))
with open(path + 'markdown.md', 'r') as file:
markdown_assert = file.read()
assert command_result.readable_output == markdown_assert
assert command_result.outputs == util_load_json(os.path.join(path, 'output.json'))
def test_get_multiple_spreadsheets():
'''
Given:
- 'google-sheets-spreadsheet-get' is being called to be executed with multiple ids in the args
When:
- the command is being called from main
Then:
- return the proper readable output and context upon failure an exception will be raised from google.
'''
path = 'test_data/get_spreadsheet/multiple_spreadsheet/'
args = {
'spreadsheet_id': "13YRXawxY54RI0uPjD_BQmw31zwaAYQ53I0mxbWlhTy8,1btQWA8icPTiVd-HIXOLpzetcoXFo77deZ3tExukEk-w"
}
http = HttpMockSequence([
({'status': '200'}, json.dumps(util_load_json(os.path.join(path, 'response1.json')))),
({'status': '200'}, json.dumps(util_load_json(os.path.join(path, 'response2.json'))))])
api_key = 'your_api_key'
service = build('sheets', 'v4',
http=http,
developerKey=api_key)
command_result = GoogleSheets.get_spreadsheet(service, args)
with open(os.path.join(path, 'markdown.md'), 'r') as file:
markdown_assert = file.read()
assert command_result.readable_output == markdown_assert
assert command_result.outputs is None
# UPDATE SPREADSHEET TESTS
def test_value_update():
'''
Given:
- 'google-sheets-value-update' is being called to be executed with args to the api
When:
- the command is being called from main
Then:
- if successful return the proper readable output else google api will through an error
'''
http = HttpMock('test_data/update_spreadsheet/test_value_update/response.json', {'status': '200'})
api_key = 'your_api_key'
service = build('sheets', 'v4', http=http, developerKey=api_key)
args = util_load_json("test_data/update_spreadsheet/test_value_update/command_mock.json")
command_result = GoogleSheets.value_update_sheets(service, args)
assert command_result.readable_output == '### Successfully updated sheet values'
@pytest.mark.parametrize("path", ['test_data/update_spreadsheet/test_sheet_create/',
'test_data/update_spreadsheet/test_sheet_create_no_echo/'])
def test_sheet_create_both_ways(path):
'''
Given:
- 'google-sheets-value-append' is called to be executed with args to the api
test1: echo_spreadsheet argument = true
test2: echo_spreadsheet argument = false
When:
- the command is being called from main
Then:
- returns a command result with the proper readable output and context
'''
http = HttpMock(path + 'response.json', {'status': '200'})
api_key = 'your_api_key'
service = build('sheets', 'v4', http=http, developerKey=api_key)
args = util_load_json(os.path.join(path, 'args.json'))
command_result = GoogleSheets.create_sheet(service, args)
assert command_result.outputs == util_load_json(os.path.join(path, 'command_result_output.json'))
with open(os.path.join(path, 'readable_output.md'), 'r') as file:
markdown_assert = file.read()
assert command_result.readable_output == markdown_assert
|
|
#
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Created on 7 Jun 2014
'''
from actuator import (Var, NamespaceModel, with_variables, NamespaceException,
Role, with_roles, MultiResource,
MultiResourceGroup, ctxt, ActuatorException,
StaticServer)
from actuator.namespace import RoleGroup, MultiRole, MultiRoleGroup
from actuator.infra import InfraModel
from actuator.modeling import AbstractModelReference
from actuator.provisioners.example_resources import Server
MyNS = None
def setup():
global MyNS
class FakeLogicalRef(AbstractModelReference):
def __init__(self, v=None):
self.v = v
self._name = "v"
self._obj = self
def value(self):
return object.__getattribute__(self, "v")
class FakeInfra(object):
def get_inst_ref(self, fakeref):
return fakeref
class MyNamespaceLocal(NamespaceModel):
with_variables(Var("HOST", "wibble"),
Var("PORT", "1234"),
Var("REGION", "NY"),
Var("SIMPLE", "!{REGION}"),
Var("HOST-REGION", "!{REGION}-!{HOST}"),
Var("ONE", "!{TWO}"),
Var("TWO", "!{THREE}"),
Var("THREE", "!{ONE}"),
Var("EMBEDDED", "some text with !{REGION} in it"),
Var("REPEATED", "!{HOST} is !{HOST}"),
Var("INCOMPLETE", "this won't expand; !{SORRY}"),
Var("NONE", None),
Var("REF_TEST_NONE", FakeLogicalRef(None)),
Var("REF_TEST_VALUE", FakeLogicalRef("gabagabahey")))
def __init__(self):
super(MyNamespaceLocal, self).__init__()
self.infra = FakeInfra()
MyNS = MyNamespaceLocal
def test001():
inst = MyNS()
v, p = inst.find_variable("HOST")
assert v and p == inst, "failed to find the global HOST variable"
def test002():
inst = MyNS()
v, p = inst.find_variable("SIMPLE")
assert v.get_value(p) == "NY", "variable replacement failed"
def test003():
inst = MyNS()
v, p = inst.find_variable("HOST-REGION")
assert v.get_value(p) == "NY-wibble", "multiple variable replacement failed"
def test004():
inst = MyNS()
v, p = inst.find_variable("ONE")
try:
_ = v.get_value(p)
assert False, "Replacement cycle was not detected"
except NamespaceException, _:
pass
def test005():
inst = MyNS()
v, p = inst.find_variable("EMBEDDED")
assert v.get_value(p) == "some text with NY in it", "replacement didn't preserve text"
def test006():
inst = MyNS()
v, p = inst.find_variable("REPEATED")
try:
assert v.get_value(p) == "wibble is wibble"
except NamespaceException, _:
assert False, "This doesn't contain a cycle, just a repeated variable"
def test007():
inst = MyNS()
v, p = inst.find_variable("INCOMPLETE")
assert v.get_value(p) is None, "an incomplete expansion returned a value when it shouldn't"
def test008():
inst = MyNS()
v, p = inst.find_variable("INCOMPLETE")
assert v.get_value(p, allow_unexpanded=True) == "this won't expand; !{SORRY}", \
"allowing unexpanded returns didn't yield the expected value"
def test009():
inst = MyNS()
try:
inst.add_variable(("YEP", "NOPE"))
assert False, "Was allowed to add something that isn't a Var"
except NamespaceException, _:
pass
def test010():
try:
class MyNamespaceLocal(NamespaceModel):
with_variables(("YEP", "NOPE"))
_ = MyNamespaceLocal()
assert False, "Was allowed to use with_variables with something not a Var"
except NamespaceException, _:
pass
def test011():
inst = MyNS()
inst.add_override(Var("THREE", "BROKENLOOP"))
try:
v, p = inst.find_variable("ONE")
_ = v.get_value(p)
except NamespaceException, _:
assert False, "Override should have broken the cycle"
def test012():
inst = MyNS()
inst.add_variable(Var("TWO", "and a half"))
try:
v, p = inst.find_variable("ONE")
_ = v.get_value(p)
except NamespaceException, _:
assert False, "New Var should have replaced the old one"
def test013():
inst = MyNS()
v, p = inst.find_variable("NONE")
assert v.get_value(p) is None, "Did not return None for an unset variable"
def test014():
inst = MyNS()
v, _ = inst.find_variable("NONE")
assert not v.value_is_external(), "None value is being identified as external"
def test015():
inst = MyNS()
v, p = inst.find_variable("REF_TEST_NONE")
assert v.get_value(p) is None
def test016():
inst = MyNS()
v, _ = inst.find_variable("REF_TEST_NONE")
assert v.value_is_external()
def test017():
inst = MyNS()
v, p = inst.find_variable("REF_TEST_VALUE")
assert v.get_value(p) is "gabagabahey"
def test018():
inst = MyNS()
v, _ = inst.find_variable("REF_TEST_VALUE")
assert v.value_is_external()
def test019():
class NS19(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
app_server = Role("app_server")
inst = NS19()
assert inst._roles["app_server"] == inst.app_server.value()
def test020():
class NS20(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
app_server = Role("app_server")
inst = NS20()
assert inst.app_server is not NS20.app_server
def test021():
class NS21(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
queries = {}
for i in range(5):
queries["query_%d" % i] = Role("query_%d" % i)
with_roles(**queries)
del queries
assert NS21.query_1
def test022():
class NS22(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
with_variables(Var("THIS", "TOO"))
inst = NS22()
v1, p1 = inst.find_variable("QUERY_PORT")
v2, p2 = inst.find_variable("THIS")
assert v1.get_value(p1) and v2.get_value(p2)
def test023():
class Infra23(InfraModel):
app = Server("app")
query = MultiResource(Server("query", mem="8GB"))
grid = MultiResourceGroup("grid",
handler=Server("handler", mem="8GB"),
compute=Server("compute", mem="16GB"))
class NS23(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
app_server = Role("app_server", host_ref=Infra23.app)
queries = {}
for i in range(5):
queries["query_%d" % i] = Role("query_%d" % i, host_ref=Infra23.query[i])
with_roles(**queries)
del i, queries
assert NS23.query_0
def test24():
class Infra24(InfraModel):
app = Server("app")
query = MultiResource(Server("query", mem="8GB"))
grid = MultiResourceGroup("grid",
handler=Server("handler", mem="8GB"),
compute=Server("compute", mem="16GB"))
class NS24(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"))
app_server = Role("app_server", host_ref=Infra24.app)
queries = {}
for i in range(5):
queries["query_%d" % i] = Role("query_%d" % i, host_ref=Infra24.query[i])
with_roles(**queries)
del i, queries
infra = Infra24("infra24")
env = NS24()
env.compute_provisioning_for_environ(infra)
assert len(infra.components()) == 6
def test25():
class Infra25(InfraModel):
app = Server("app")
query = MultiResource(Server("query", mem="8GB"))
grid = MultiResourceGroup("grid",
handler=Server("handler", mem="8GB"),
compute=Server("compute", mem="16GB"))
class NS25(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"),
Var("APP_HOST", Infra25.query[10].provisionedName))
app_server = Role("app_server", host_ref=Infra25.app)
queries = {}
for i in range(5):
queries["query_%d" % i] = Role("query_%d" % i, host_ref=Infra25.query[i])
with_roles(**queries)
del i, queries
infra = Infra25("infra25")
env = NS25()
env.compute_provisioning_for_environ(infra)
assert len(infra.components()) == 7
def test26():
class Infra26(InfraModel):
app = Server("app")
query = MultiResource(Server("query", mem="8GB"))
grid = MultiResourceGroup("grid",
handler=Server("handler", mem="8GB"),
compute=Server("compute", mem="16GB"))
class NS26(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"),
Var("APP_HOST", Infra26.app.provisionedName),
Var("QUERY_HOST", Infra26.query[0]))
app_server = Role("app_server", host_ref=Infra26.app)
infra = Infra26("infra26")
env = NS26()
env.add_override(Var("QUERY_HOST", "staticHostName"))
env.compute_provisioning_for_environ(infra)
assert len(infra.components()) == 1, "override didn't wipe out ref to a new query server"
def test27():
class Infra27(InfraModel):
app = Server("app")
query = MultiResource(Server("query", mem="8GB"))
grid = MultiResourceGroup("grid",
handler=Server("handler", mem="8GB"),
compute=Server("compute", mem="16GB"))
class NS27(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"),
Var("APP_HOST", Infra27.app.provisionedName),
Var("QUERY_HOST", Infra27.query[0]))
app_server = Role("app_server", host_ref=Infra27.app)
infra = Infra27("infra26")
env = NS27()
env.add_override(Var("QUERY_HOST", "staticHostName"))
provs = env.compute_provisioning_for_environ(infra, exclude_refs=[Infra27.query[0], Infra27.app])
assert len(provs) == 0, "exclusions didn't wipe out the provisioning"
def test28():
class Infra28(InfraModel):
regional_server = MultiResource(Server("regional_server", mem="16GB"))
nf = lambda x: "reg_srvr_%d" % x
class NS28(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"),
Var("SERVER_ID", "server_!{ID}")
)
servers = {nf(i):Role(nf(i),
host_ref=Infra28.regional_server[nf(i)])
.add_variable(Var("ID", str(i)))
for i in range(5)}
with_roles(**servers)
del servers
ns = NS28()
assert ns.reg_srvr_0 is not None
def test29():
class Infra29(InfraModel):
regional_server = MultiResource(Server("regional_server", mem="16GB"))
nf = lambda x: "reg_srvr_%d" % x
class NS29(NamespaceModel):
with_variables(Var("APP_PORT", "8080"),
Var("QUERY_PORT", "8081"),
Var("GRID_PORT", "8082"),
Var("SERVER_ID", "server_!{ID}")
)
servers = {nf(i):Role(nf(i),
host_ref=Infra29.regional_server[nf(i)])
.add_variable(Var("ID", str(i)))
for i in range(5)}
with_roles(**servers)
del servers
ns = NS29()
assert ns.reg_srvr_0.future("SERVER_ID").value() == "server_0"
def test30():
class Infra30(InfraModel):
regional_server = Server("regional_server", mem="16GB")
nf = lambda x: "reg_srvr_%d" % x
class NS30(NamespaceModel):
with_variables(Var("TRICKY", "!{NAME} with id !{SERVER_ID}"),
Var("SERVER_ID", "server_!{ID}")
)
server1 = (Role(nf(1), host_ref=Infra30.regional_server)
.add_variable(Var("ID", str(1)), Var("NAME", nf(1))))
server2 = (Role(nf(2), host_ref=Infra30.regional_server)
.add_variable(Var("ID", str(2)), Var("NAME", nf(2))))
ns = NS30()
assert ns.server2.future("TRICKY").value() == "reg_srvr_2 with id server_2"
def test31():
nf = lambda x: "reg_srvr_%d" % x
class NS31(NamespaceModel):
with_variables(Var("TRICKY", "!{NAME} with id !{SERVER_ID}"),
Var("SERVER_ID", "server_!{ID}"),
Var("NAME", "--WRONG!!")
)
server1 = (Role(nf(1))
.add_variable(Var("ID", str(1)),
Var("NAME", nf(1))))
server2 = (Role(nf(2))
.add_variable(Var("ID", str(2)),
Var("NAME", nf(2))))
ns = NS31()
expected = set([("NAME", "reg_srvr_2"),
("ID", "2"),
("TRICKY", "reg_srvr_2 with id server_2"),
("SERVER_ID", "server_2")])
results = set([(k, v.get_value(ns.server2))
for k, v in ns.server2.get_visible_vars().items()])
assert expected == results
def test32():
class Infra32(InfraModel):
regional_server = Server("regional_server", mem="16GB")
nf = lambda x: "reg_srvr_%d" % x
class NS32(NamespaceModel):
with_variables(Var("TRICKY", "!{NAME} with id !{SERVER_ID}"),
Var("SERVER_ID", "server_!{ID}")
)
server1 = (Role(nf(1), host_ref=Infra32.regional_server)
.add_variable(Var("ID", str(1)), Var("NAME", nf(1))))
ns = NS32()
infra = Infra32("32")
ns.compute_provisioning_for_environ(infra)
assert ns.find_infra_model() is infra and ns.server1.find_infra_model() is infra
def test33():
class NS33(NamespaceModel):
with_variables(Var("TEST", "NOPE"))
ns = NS33()
ns.add_roles(server1=Role("server1").add_variable(Var("TEST", "YEP")))
assert ns.server1.future("TEST").value() == "YEP"
def test34():
class NS34(NamespaceModel):
with_variables(Var("TEST", "NOPE"))
ns = NS34()
server1 = Role("server1").add_variable(Var("TEST", "YEP"))
ns.add_roles(server1=server1)
server1.add_variable(Var("TEST", "--REALLY NOPE--"))
assert ns.server1.future("TEST").value() == "YEP"
def test35():
class NS35(NamespaceModel):
with_variables(Var("TEST", "NOPE"))
ns = NS35()
server1 = Role("server1").add_variable(Var("TEST", "YEP"))
ns.add_roles(server1=server1)
ns.server1.add_variable(Var("TEST", "YEP YEP"))
assert ns.server1.future("TEST").value() == "YEP YEP"
def test36():
class NS36(NamespaceModel):
with_variables(Var("TEST", "NOPE"))
ns = NS36()
server1 = Role("server1").add_variable(Var("TEST", "YEP"))
ns.add_roles(server1=server1, server2=Role("server2"))
ns.server1.add_override(Var("TEST", "YEP YEP YEP"))
assert ns.server1.future("TEST").value() == "YEP YEP YEP"
def test37():
class NS37(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
ns = NS37()
assert ns.daddy.future("MYSTERY").value() == "RIGHT!"
def test38():
class NS38(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
assert not isinstance(NS38.daddy, Role)
def test39():
class NS39(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
ns = NS39()
assert NS39.daddy is not ns.daddy
def test40():
class NS40(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
ns = NS40()
assert ns.daddy is ns.get_inst_ref(NS40.daddy)
def test41():
class NS41(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
ns = NS41()
assert not isinstance(ns.daddy.name, basestring)
def test42():
class NS42(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
daddy = Role("daddy").add_variable(Var("MYSTERY", "RIGHT!"))
kid = Role("kid")
ns = NS42()
assert ns.daddy.name.value() == "daddy"
def test43():
class NS43(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
family = RoleGroup("family", daddy=Role("daddy").add_variable(Var("MYSTERY", "RIGHT!")),
kid=Role("kid"))
ns = NS43()
assert ns.family.daddy.name.value() == "daddy"
def test44():
class NS44(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
family = RoleGroup("family",
daddy=Role("daddy"),
kid=Role("kid")).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS44()
var, _ = ns.family.kid.find_variable("MYSTERY")
assert var.get_value(ns.family.kid.value()) == "RIGHT!"
def test45():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
kids = MultiRole(Role("kid")).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS()
var, _ = ns.kids.find_variable("MYSTERY")
assert var.get_value(ns.kids.value()) == "RIGHT!"
def test46():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
kids = MultiRole(Role("kid")).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS()
var, _ = ns.kids[0].find_variable("MYSTERY")
assert var.get_value(ns.kids[0].value()) == "RIGHT!"
def test47():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
kids = MultiRole(Role("kid")).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS()
assert ns.kids[0] is ns.kids[0]
def test48():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
kids = MultiRole(Role("kid")).add_variable(Var("MYSTERY", "RIGHT!"))
ns1 = NS()
ns2 = NS()
assert ns1.kids[0] is not ns2.kids[0]
def test49():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
kids = MultiRole(Role("kid").add_variable(Var("MYSTERY", "maybe..."))).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS()
var, _ = ns.kids[0].find_variable("MYSTERY")
assert var.get_value(ns.kids[0].value()) == "maybe..."
def test52():
class Infra(InfraModel):
controller = Server("controller", mem="16GB")
grid = MultiResourceGroup("pod", foreman=Server("foreman", mem="8GB"),
worker=Server("grid-node", mem="8GB"))
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
grid = MultiRoleGroup("pod", foreman=Role("foreman", host_ref=ctxt.model.infra.grid[ctxt.comp.container._name].foreman),
worker=Role("grid-node", host_ref=ctxt.model.infra.grid[ctxt.comp.container._name].worker)).add_variable(Var("MYSTERY", "RIGHT!"))
infra = Infra("mcg")
ns = NS()
for i in range(5):
_ = ns.grid[i]
ns.compute_provisioning_for_environ(infra)
assert len(infra.grid) == 5 and len(infra.components()) == 11
def test53():
class Infra(InfraModel):
grid = MultiResourceGroup("grid",
foreman=Server("foreman", mem="8GB"),
workers=MultiResource(Server("grid-node", mem="8GB")))
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
grid = MultiRoleGroup("pod",
foreman=Role("foreman",
host_ref=ctxt.model.infra.grid[ctxt.comp.container._name].foreman),
workers=MultiRole(Role("grid-node",
host_ref=ctxt.model.infra.grid[ctxt.comp.container.container._name].workers[ctxt.name]))).add_variable(Var("MYSTERY", "RIGHT!"))
infra = Infra("mcg")
ns = NS()
for i in [2,4]:
grid = ns.grid[i]
for j in range(i):
_ = grid.workers[j]
ns.compute_provisioning_for_environ(infra)
assert len(infra.grid) == 2 and len(infra.grid[2].workers) == 2 and len(infra.grid[4].workers) == 4 and len(infra.components()) == 8
def test54():
class Infra(InfraModel):
grid = MultiResource(Server("foreman", mem="8GB"))
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
grid = MultiRole(Role("foreman", host_ref=ctxt.model.infra.grid[0])).add_variable(Var("MYSTERY", "RIGHT!"))
infra = Infra("mcg")
ns = NS()
for i in [2,4]:
_ = ns.grid[i]
ns.compute_provisioning_for_environ(infra)
assert len(infra.grid) == 1 and len(infra.components()) == 1
def test56():
class Infra(InfraModel):
grid = MultiResource(Server("node", mem="8GB"))
def bad_comp(ctxt):
#generate an attribute error
[].wibble
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
grid = MultiRole(Role("foreman", host_ref=bad_comp)).add_variable(Var("MYSTERY", "RIGHT!"))
infra = Infra("mcg")
ns = NS()
for i in [2,4]:
_ = ns.grid[i]
try:
ns.compute_provisioning_for_environ(infra)
assert False, "Should have complained about the back host_ref callable"
except ActuatorException, e:
assert "Callable arg failed" in e.message
def test57():
from actuator.namespace import _ComputableValue
try:
_ = _ComputableValue(object())
assert False, "_ComputableValue should have complained about the value supplied"
except NamespaceException, e:
assert "unrecognized" in e.message.lower()
def test58():
from actuator.namespace import VariableContainer
vc = VariableContainer(variables=[Var("ONE", "1"), Var("TWO", "2")])
assert set(vc.variables.keys()) == set(["ONE", "TWO"])
def test59():
from actuator.namespace import VariableContainer
vc = VariableContainer(overrides=[Var("ONE", "1"), Var("TWO", "2")])
assert set(vc.overrides.keys()) == set(["ONE", "TWO"])
def test60():
from actuator.namespace import VariableContainer
try:
_ = VariableContainer(overrides=[{"ONE":"1"}])
assert False, "Should have got an exception on a bad Var"
except TypeError, e:
assert "is not a var" in e.message.lower()
def test63():
class NS(NamespaceModel):
with_variables(Var("MYSTERY", "WRONG!"))
grid = MultiRole(Role("foreman")).add_variable(Var("MYSTERY", "RIGHT!"))
ns = NS()
for i in range(5):
_ = ns.grid[i]
clone = ns.grid.clone()
assert len(clone) == 5
def test64():
class NS(NamespaceModel):
with_variables(Var("NODE_NAME", "!{BASE_NAME}-!{NODE_ID}"))
grid = (MultiRole(Role("worker",
variables=[Var("NODE_ID", ctxt.name)]))
.add_variable(Var("BASE_NAME", "Grid")))
ns = NS()
value = ns.grid[5].var_value("NODE_NAME")
assert value and value == "Grid-5"
def test65():
#why does this work just like test64()? Because of where we ask for the
#var_value(); even though NODE_ID is defined on the model class itself,
#it gets evaluated in the context of ns.grid[5]. Since the value is
#a context expression, it gets evaluated relative to the context that
#needs its value, and the name in this context is '5'
class NS(NamespaceModel):
with_variables(Var("NODE_NAME", "!{BASE_NAME}-!{NODE_ID}"),
Var("NODE_ID", ctxt.name))
grid = (MultiRole(Role("worker"))
.add_variable(Var("BASE_NAME", "Grid")))
ns = NS()
value = ns.grid[5].var_value("NODE_NAME")
assert value and value == "Grid-5"
def test66():
class Infra(InfraModel):
grid_i = MultiResource(StaticServer("node", "127.0.0.1"))
class NS(NamespaceModel):
with_variables(Var("NODE_NAME", "!{BASE_NAME}-!{NODE_ID}"))
grid = (MultiRole(Role("worker",
variables=[Var("NODE_ID", ctxt.name)]))
.add_variable(Var("BASE_NAME", "Grid")))
infra = Infra("66")
ns = NS()
ns.compute_provisioning_for_environ(infra)
_ = infra.refs_for_components()
value = ns.grid[5].var_value("NODE_NAME")
assert value and value == "Grid-5"
# def test67():
# class NS(NamespaceModel):
# with_variables(Var("ONE", "1"),
# Var("TWO", "2"))
# ns = NS()
# assert ns.v.ONE() == "1"
def do_all():
setup()
for k, v in globals().items():
if k.startswith("test") and callable(v):
v()
if __name__ == "__main__":
do_all()
|
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core brax structs and some conversion and slicing functions."""
import os
from typing import Optional, Sequence, Tuple
from brax import jumpy as jp
from brax import math
from brax.io import file
from brax.physics import config_pb2
from flax import struct
from trimesh.exchange.load import load_mesh
@struct.dataclass
class Q(object):
"""Coordinates: position and rotation.
Attributes:
pos: Location of center of mass.
rot: Rotation about center of mass, represented as a quaternion.
"""
pos: jp.ndarray
rot: jp.ndarray
def __add__(self, o):
if isinstance(o, P):
return QP(self.pos, self.rot, o.vel, o.ang)
elif isinstance(o, Q):
return Q(self.pos + o.pos, self.rot + o.rot)
elif isinstance(o, QP):
return QP(self.pos + o.pos, self.rot + o.rot, o.vel, o.ang)
else:
raise ValueError("add only supported for P, Q, QP")
@struct.dataclass
class P(object):
"""Time derivatives: velocity and angular velocity.
Attributes:
vel: Velocity.
ang: Angular velocity about center of mass.
"""
vel: jp.ndarray
ang: jp.ndarray
def __add__(self, o):
if isinstance(o, P):
return P(self.vel + o.vel, self.ang + o.ang)
elif isinstance(o, Q):
return QP(o.pos, o.rot, self.vel, self.ang)
elif isinstance(o, QP):
return QP(o.pos, o.rot, self.vel + o.vel, self.ang + o.ang)
else:
raise ValueError("add only supported for P, Q, QP")
def __mul__(self, o):
return P(self.vel * o, self.ang * o)
@struct.dataclass
class QP(object):
"""A coordinate and time derivative frame for a brax body.
Attributes:
pos: Location of center of mass.
rot: Rotation about center of mass, represented as a quaternion.
vel: Velocity.
ang: Angular velocity about center of mass.
"""
pos: jp.ndarray
rot: jp.ndarray
vel: jp.ndarray
ang: jp.ndarray
def __add__(self, o):
if isinstance(o, P):
return QP(self.pos, self.rot, self.vel + o.vel, self.ang + o.ang)
elif isinstance(o, Q):
return QP(self.pos + o.pos, self.rot + o.rot, self.vel, self.ang)
elif isinstance(o, QP):
return QP(self.pos + o.pos, self.rot + o.rot, self.vel + o.vel,
self.ang + o.ang)
else:
raise ValueError("add only supported for P, Q, QP")
def __mul__(self, o):
return QP(self.pos * o, self.rot * o, self.vel * o, self.ang * o)
@classmethod
def zero(cls, shape=()):
return cls(
pos=jp.zeros(shape + (3,)),
rot=jp.tile(jp.array([1., 0., 0., 0]), reps=shape + (1,)),
vel=jp.zeros(shape + (3,)),
ang=jp.zeros(shape + (3,)))
def to_world(self, rpos: jp.ndarray) -> Tuple[jp.ndarray, jp.ndarray]:
"""Returns world information about a point relative to a part.
Args:
rpos: Point relative to center of mass of part.
Returns:
A 2-tuple containing:
* World-space coordinates of rpos
* World-space velocity of rpos
"""
rpos_off = math.rotate(rpos, self.rot)
rvel = jp.cross(self.ang, rpos_off)
return (self.pos + rpos_off, self.vel + rvel)
def world_velocity(self, pos: jp.ndarray) -> jp.ndarray:
"""Returns the velocity of the point on a rigidbody in world space.
Args:
pos: World space position which to use for velocity calculation.
"""
return self.vel + jp.cross(self.ang, pos - self.pos)
@struct.dataclass
class Info(object):
"""Auxilliary data calculated during the dynamics of each physics step.
Attributes:
contact: External contact forces applied at a step
joint: Joint constraint forces applied at a step
actuator: Actuator forces applied at a step
"""
contact: P
joint: P
actuator: P
def validate_config(
config: config_pb2.Config,
resource_paths: Optional[Sequence[str]] = None) -> config_pb2.Config:
"""Validate and normalize config settings for use in systems."""
if config.dt <= 0:
raise RuntimeError("config.dt must be positive")
if config.substeps == 0:
config.substeps = 1
def find_dupes(objs):
names = set()
for obj in objs:
if obj.name in names:
raise RuntimeError(f"duplicate name in config: {obj.name}")
names.add(obj.name)
find_dupes(config.bodies)
find_dupes(config.joints)
find_dupes(config.actuators)
find_dupes(config.mesh_geometries)
# Load the meshes.
if resource_paths is None:
resource_paths = [""]
for mesh_geom in config.mesh_geometries:
if mesh_geom.path:
# Clear the vertices and faces, if any.
del mesh_geom.vertices[:]
del mesh_geom.faces[:]
found = False
for resource_path in resource_paths:
path = os.path.join(resource_path, mesh_geom.path)
if not file.Exists(path):
continue
with file.File(path, "rb") as f:
trimesh = load_mesh(f, file_type=str(mesh_geom.path))
for v in trimesh.vertices:
mesh_geom.vertices.add(x=v[0], y=v[1], z=v[2])
mesh_geom.faces.extend(trimesh.faces.flatten())
for v in trimesh.vertex_normals:
mesh_geom.vertex_normals.add(x=v[0], y=v[1], z=v[2])
for v in trimesh.face_normals:
mesh_geom.face_normals.add(x=v[0], y=v[1], z=v[2])
found = True
break
assert found, f"{mesh_geom.path} is missing."
mesh_geom.ClearField("path") # Clear the path.
# TODO: more config validation
# reify all frozen dimensions in the system
allvec = config_pb2.Vector3(x=1.0, y=1.0, z=1.0)
frozen = config.frozen
if frozen.all:
frozen.position.CopyFrom(allvec)
frozen.rotation.CopyFrom(allvec)
if all([
frozen.position.x, frozen.position.y, frozen.position.z,
frozen.rotation.x, frozen.rotation.y, frozen.rotation.z
]):
config.frozen.all = True
for b in config.bodies:
inertia = b.inertia
if inertia.x == 0 and inertia.y == 0 and inertia.z == 0:
b.inertia.x, b.inertia.y, b.inertia.z = 1, 1, 1
b.frozen.position.x = b.frozen.position.x or frozen.position.x
b.frozen.position.y = b.frozen.position.y or frozen.position.y
b.frozen.position.z = b.frozen.position.z or frozen.position.z
b.frozen.rotation.x = b.frozen.rotation.x or frozen.rotation.x
b.frozen.rotation.y = b.frozen.rotation.y or frozen.rotation.y
b.frozen.rotation.z = b.frozen.rotation.z or frozen.rotation.z
if b.frozen.all:
b.frozen.position.CopyFrom(allvec)
b.frozen.rotation.CopyFrom(allvec)
if all([
b.frozen.position.x, b.frozen.position.y, b.frozen.position.z,
b.frozen.rotation.x, b.frozen.rotation.y, b.frozen.rotation.z
]):
b.frozen.all = True
# insert material properties to colliders
for c in b.colliders:
if not c.HasField("material"):
c.material.friction = config.friction
c.material.elasticity = config.elasticity
frozen.all = all(b.frozen.all for b in config.bodies)
return config
def vec_to_arr(vec: config_pb2.Vector3) -> jp.ndarray:
return jp.array([vec.x, vec.y, vec.z])
|
|
class BSTNode(object):
def __init__(self, key, value=None, left=None, right=None, parent=None):
self.key = key
self.value = value
self.left = left
self.right = right
self.parent = parent
def is_leaf(self):
return self.has_any_children() is False
def is_root(self):
return self.parent == None
def is_left_child(self):
if self.parent:
return self.parent.get_left_child() == self
return False
def is_right_child(self):
if self.parent:
return self.parent.get_right_child() == self
return False
def has_any_children(self):
return self.has_left_child() or self.has_right_child()
def has_both_children(self):
return self.has_left_child() and self.has_right_child()
def has_left_child(self):
return self.left is not None
def has_right_child(self):
return self.right is not None
def get_left_child(self):
if self.has_left_child():
return self.left
def get_right_child(self):
if self.has_right_child():
return self.right
def get_parent(self):
if not self.is_root():
return self.parent
def get_subtree_size(self):
left = 0
right = 0
if self.has_left_child():
left += self.get_left_child().get_subtree_size()
if self.has_right_child():
right += self.get_right_child().get_subtree_size()
return 1 + left + right
def get_subtree_height(self):
left = 0
right = 0
if self.has_left_child():
left += 1
left += self.get_left_child().get_subtree_height()
if self.has_right_child():
right += 1
right += self.get_right_child().get_subtree_height()
return max(left, right)
def __repr__(self):
return "'{0}' : {1}".format(self.key,self.value)
def __iter__(self):
if self.has_left_child():
yield self.left
if self.has_right_child():
yield self.right
class BinarySearchTree(object):
def __init__(self):
self.root = None
self.count = 0
def put(self, key, value):
if self.root is None:
self.root = BSTNode(key, value)
self.count = 1
else:
current_node = self.root
done = False
while not done:
if current_node.key == key:
current_node.value = value
done = True
elif current_node.key > key:
if current_node.has_left_child():
current_node = current_node.get_left_child()
else:
current_node.left = BSTNode(key, value, None, None, current_node)
done = True
elif current_node.key < key:
if current_node.has_right_child():
current_node = current_node.get_right_child()
else:
current_node.right = BSTNode(key, value, None, None, current_node)
done = True
self.count += 1
def get(self, key):
if key is not None:
current_node = self.root
done = False
while not done:
if current_node.key == key:
done = True
elif current_node.has_any_children():
if current_node.key > key:
if current_node.has_left_child():
current_node = current_node.get_left_child()
else:
done = True
elif current_node.key < key:
if current_node.has_right_child():
current_node = current_node.get_right_child()
else:
done = True
else:
done = True
if current_node.key == key:
return current_node
return None
def remove(self, key):
node_to_remove = self.get(key)
if node_to_remove:
# If it has no children
if node_to_remove.is_leaf():
if node_to_remove == self.root:
self.root = None
elif node_to_remove == node_to_remove.parent.get_left_child():
node_to_remove.parent.left = None
else:
node_to_remove.parent.right = None
# If it has both children
elif node_to_remove.has_both_children():
successor_node = self.get_successor(node_to_remove)
temp_key = successor_node.key
temp_value = successor_node.value
self.remove(successor_node.key) # must clear it before changing the BST path
node_to_remove.key = temp_key
node_to_remove.value = temp_value
# If it has only one child
else:
if node_to_remove.has_left_child():
left = node_to_remove.get_left_child()
if node_to_remove == self.root:
left.parent = None
self.root = left
else:
if node_to_remove.is_left_child():
node_to_remove.parent.left = left
elif node_to_remove.is_right_child():
node_to_remove.parent.right = left
left.parent = node_to_remove.parent
else:
right = node_to_remove.get_right_child()
if node_to_remove == self.root:
right.parent = None
self.root = right
else:
if node_to_remove.is_left_child():
node_to_remove.parent.left = right
elif node_to_remove.is_right_child():
node_to_remove.parent.right = right
right.parent = node_to_remove.parent
self.count -= 1
def get_min(self):
if self.root:
current_node = self.root
while current_node.has_left_child():
current_node = current_node.get_left_child()
return current_node
def get_max(self):
if self.root:
current_node = self.root
while current_node.has_right_child():
current_node = current_node.get_right_child()
return current_node
def get_successor(self, start_node):
successor_node = start_node.get_right_child()
while successor_node.has_left_child():
successor_node = successor_node.get_left_child()
return successor_node
def get_predecessor(self, start_node):
predecessor = start_node.get_left_child()
while predecessor.has_right_child():
predecessor = predecessor.get_right_child()
return predecessor
def __getitem__(self, item):
result = self.get(item)
if result:
return result.value
def __setitem__(self, key, value):
self.put(key,value)
def __delitem__(self, key):
self.remove(key)
def __len__(self):
return self.count
def __contains__(self, key):
if self.get(key):
return True
def __repr__(self):
if self.root is not None:
def repr_helper(node, items):
if node:
if node.has_left_child():
repr_helper(node.get_left_child(), items)
items.append(node)
if node.has_right_child():
repr_helper(node.get_right_child(), items)
items = []
repr_helper(self.root, items)
return str(items)
return "[]"
def __iter__(self):
if self.root:
start_node = self.root
items = []
self.__iterate_helper(start_node, items)
for item in items:
yield item
def __iterate_helper(self, node, items):
if node.has_left_child():
self.__iterate_helper(node.get_left_child(), items)
items.append(node)
if node.has_right_child():
self.__iterate_helper(node.get_right_child(), items)
def visualize_horizontal(self, level=0, node=None):
if node is None:
node = self.root
adder = "Root = "
if node.is_left_child():
adder = "Left = "
elif node.is_right_child():
adder = "Right = "
print ('\t' * level + adder + repr(node))
for child in node:
self.visualize(level + 1, child)
def visualize_vertical(self):
if self.root is None: return '<empty tree>'
def recurse(node):
if node is None: return [], 0, 0
label = str(node.key)
left_lines, left_pos, left_width = recurse(node.get_left_child())
right_lines, right_pos, right_width = recurse(node.get_right_child())
middle = max(right_pos + left_width - left_pos + 1, len(label), 2)
pos = left_pos + middle // 2
width = left_pos + middle + right_width - right_pos
while len(left_lines) < len(right_lines):
left_lines.append(' ' * left_width)
while len(right_lines) < len(left_lines):
right_lines.append(' ' * right_width)
if (middle - len(label)) % 2 == 1 and node.parent is not None and \
node is node.parent.get_left_child() and len(label) < middle:
label += '.'
label = label.center(middle, '.')
if label[0] == '.': label = ' ' + label[1:]
if label[-1] == '.': label = label[:-1] + ' '
lines = [' ' * left_pos + label + ' ' * (right_width - right_pos),
' ' * left_pos + '/' + ' ' * (middle-2) +
'\\' + ' ' * (right_width - right_pos)] + \
[left_line + ' ' * (width - left_width - right_width) +
right_line
for left_line, right_line in zip(left_lines, right_lines)]
return lines, pos, width
print("\n")
print ('\n'.join(recurse(self.root)[0]))
print("\n")
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of simple lock using datastore.
These locks rely on datastore's optimistic concurrency control to ensure
atomicity and support the semantics of a basic threading.Lock object.
This library differs from memcache_locks in that it guarantees the persistence
of locks and of mutual exclusion. It also offers an async api.
"""
import datetime
import logging
import uuid
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
_DATASTORE_LOCK_TYPE = u'ApphostingContribDatastoreLock2'
_INITIAL_DELAY = .3
_USE_DEFAULT_TIMEOUT = object() # Sentinel for using default timeout.
class Error(Exception):
"""Base class for all errors raised from this module."""
class AcquireLockError(Error):
"""Raised on error acquiring lock."""
class RefreshLockError(Error):
"""Raised on error refreshing lock."""
class ReleaseLockError(Error):
"""Raised on error releasing lock."""
class _DatastoreLockEntity(ndb.Model):
timeout = ndb.IntegerProperty()
acquired_at = ndb.DateTimeProperty(auto_now=True)
acquired = ndb.BooleanProperty(default=False)
lock_id = ndb.StringProperty()
@classmethod
def _get_kind(cls):
return _DATASTORE_LOCK_TYPE
@property
def lock_held(self):
"""Whether the lock is acquired and not orphaned."""
return self.acquired and (not self.timeout or
((datetime.datetime.utcnow() - self.acquired_at)
<= datetime.timedelta(seconds=self.timeout)))
_LOCK_DEFAULT_TIMEOUT = 60
_LOCK_DEFAULT_MAX_ACQUIRE_ATTEMPTS = 20
class DatastoreLock(object):
"""Implementation of basic (non-reentrant) lock using datastore.
Like MemcacheLock, this lock supports a timeout, after which the lock will be
considered "orphaned" and may be acquired. Once a lock is orphaned, all
methods on the original lock object (like Release) will result in undefined
behavior. As a result, it is good practice to set the timeout to greater than
the life of the HTTP request using the lock, so as to guarantee the original
lock holder will never use the lock after it is orphaned.
This lock offers asynchronous versions of Acquire and Release that should be
used in ndb tasklets in place of Acquire, Release, and context manager api.
Attributes:
_lock_id: A unique id for the lock, automatically generated on acquire. This
id is separate from the lock's primary identifier, and should be used to
verify ownership of the lock.
default_timeout: Default timeout in seconds for how long this lock will
be held if not explicitly released.
default_max_acquire_attempts: Default maximum number of attempts allowed to
acquire the lock.
"""
_use_cache = False
_use_memcache = False
def __init__(
self, id_, default_timeout=_LOCK_DEFAULT_TIMEOUT,
default_max_acquire_attempts=_LOCK_DEFAULT_MAX_ACQUIRE_ATTEMPTS):
"""Init for DatastoreLock.
Args:
id_: The ID of the lock entity stored in ndb.
default_timeout: Timeout in seconds for ths lock, used when 'timeout' is
not provided in the Acquire method. If set to None and the lock holder
dies before releasing the lock, it will be in a perpetual acquired
state.
default_max_acquire_attempts: Maximum number of attempts for acquiring the
lock, used when 'max_acquire_attempts' is not provided in the Acquire
method.
Raises:
ValueError: If default_max_acquire_attempts < 1.
"""
if default_max_acquire_attempts < 1:
raise ValueError(u'default_max_acquire_attempts must be >= 1')
self._id = id_
self._acquired = False
self._lock_id = None
self.default_timeout = default_timeout
self.default_max_acquire_attempts = default_max_acquire_attempts
def __getstate__(self):
return {
'_id': self._id,
'_acquired': self._acquired,
'_lock_id': self._lock_id,
'default_timeout': self.default_timeout,
'default_max_acquire_attempts': self.default_max_acquire_attempts,
}
def __setstate__(self, state):
# NOTE: When changing format, be sure to add a test
# to confirm old pickle objects can be deserialized and unlocked.
# e.g. testLoadOldPickledLockAfterNewCodeRelease
self._id = state['_id']
self._acquired = state['_acquired']
self._lock_id = state['_lock_id']
if 'default_timeout' in state:
self.default_timeout = state['default_timeout']
else:
self.default_timeout = _LOCK_DEFAULT_TIMEOUT
if 'default_max_acquire_attempts' in state:
self.default_max_acquire_attempts = state['default_max_acquire_attempts']
else:
self.default_max_acquire_attempts = _LOCK_DEFAULT_MAX_ACQUIRE_ATTEMPTS
@ndb.tasklet
def AcquireAsync(self,
blocking=True,
max_acquire_attempts=None,
timeout=_USE_DEFAULT_TIMEOUT):
"""Acquires a lock asynchronously, blocking or non-blocking.
If non-blocking, a single attempt will be made to acquire the lock;
otherwise, max_acquire_attempts will be made.
Args:
blocking: Whether to block waiting for the lock.
max_acquire_attempts: Maximum number of attempts to make in order to
acquire the lock if blocking. If None, default_max_acquire_attempts
will be used instead.
timeout: Optional timeout for the lock in seconds, after which it will be
assumed to be free (even if never explicitly released). Defaults to the
timeout value set during initialization. If this value is set to None
and the lock holder dies before releasing the lock, it will be in a
perpetual acquired state.
Returns:
True if the lock was acquired, or False if the lock was not acquired and
blocking=False.
Raises:
AcquireLockError: If the lock is already acquired via this lock object,
or if max_acquire_attempts is exceeded.
ValueError: If max_acquire_attempts < 1.
"""
if self._acquired:
raise AcquireLockError(u'Lock already acquired')
if max_acquire_attempts is None:
max_acquire_attempts = self.default_max_acquire_attempts
if max_acquire_attempts < 1:
raise ValueError(u'max_acquire_attempts must be >= 1')
if timeout is _USE_DEFAULT_TIMEOUT:
timeout = self.default_timeout
self._lock_id = str(uuid.uuid4())
self._acquired = yield self._AcquireAsync(timeout)
if self._acquired:
raise ndb.Return(True)
elif not blocking:
raise ndb.Return(False)
intervals = [_INITIAL_DELAY + i for i in xrange(max_acquire_attempts - 1)]
for sleep_time in intervals:
yield ndb.sleep(sleep_time)
self._acquired = yield self._AcquireAsync(timeout)
if self._acquired:
raise ndb.Return(True)
raise AcquireLockError(
u'Failed to acquire lock [{}] after {} tries.'.format(
self._id, max_acquire_attempts))
@ndb.tasklet
def ReacquireAsync(self, lock_id):
"""Asynchronously reacquires the lock.
Lock reacquisition should be intentional. An example use case is that a lock
is acquired in an API endpoint and then reacquired and released in
a backend process, with the endpoint passing the lock_id and id to the
backend process.
Args:
lock_id: The lock_id attribute of the original lock. Note that lock_id is
different from the id used to create the lock.
Returns:
True iff the lock was reacquired.
Raises:
AcquireLockError: If the lock owner tries to reacquire it again.
"""
if self._acquired:
raise AcquireLockError(u'Lock already acquired')
self._lock_id = lock_id
self._acquired = yield self._ReacquireAsync()
raise ndb.Return(self._acquired)
def RefreshAsync(self, timeout=_USE_DEFAULT_TIMEOUT):
"""Asynchronously refreshes the lock by resetting timeout.
Args:
timeout: Same as the timeout argument in AcquireAsync method.
Returns:
True iff the lock was successfully refreshed.
Raises:
RefreshLockError: If the lock was never acquired.
"""
if timeout is _USE_DEFAULT_TIMEOUT:
timeout = self.default_timeout
if not self._acquired:
raise RefreshLockError(u'Lock [{}] never acquired'.format(self._id))
return self._RefreshAsync(timeout)
@ndb.tasklet
def ReleaseAsync(self):
"""Releases the held lock asynchronously.
Returns:
True iff the lock was successfully released. The lock may not be
successfully released if it has timed out and was subsequently acquired
by another user.
Raises:
ReleaseLockError: If the lock was never acquired.
"""
if not self._acquired:
raise ReleaseLockError(u'Lock [{}] never acquired'.format(self._id))
status = yield self._ReleaseAsync()
self._acquired = False
self._lock_id = None
raise ndb.Return(status)
def Acquire(self, *args, **kwargs):
"""Synchronous version of AcquireAsync."""
return self.AcquireAsync(*args, **kwargs).get_result()
def Reacquire(self, *args, **kwargs):
"""Synchronous version of ReacquireAsync."""
return self.ReacquireAsync(*args, **kwargs).get_result()
def Refresh(self, *args, **kwargs):
"""Synchronous version of RefreshAsync."""
return self.RefreshAsync(*args, **kwargs).get_result()
def Release(self, *args, **kwargs):
"""Synchronous version of ReleaseAsync."""
return self.ReleaseAsync(*args, **kwargs).get_result()
@ndb.tasklet
def _AcquireAsync(self, timeout):
"""Acquires the lock via datastore or returns False."""
@ndb.transactional_tasklet(retries=0)
def _TransactionalAcquireAsync():
lock_entity = yield _DatastoreLockEntity.get_or_insert_async(self._id)
if lock_entity.lock_held:
raise ndb.Return(False)
lock_entity.lock_id = self._lock_id
lock_entity.acquired = True
lock_entity.timeout = timeout
yield lock_entity.put_async()
raise ndb.Return(True)
try:
raise ndb.Return((yield _TransactionalAcquireAsync()))
except datastore_errors.Error:
raise ndb.Return(False)
@ndb.tasklet
def _ReacquireAsync(self):
lock_entity = yield _DatastoreLockEntity.get_by_id_async(self._id)
if lock_entity.lock_id != self._lock_id:
logging.warning(u'Invalid lock ID detected when reacquiring the '
u'lock [%s]', self._id)
raise ndb.Return(False)
raise ndb.Return(True)
@ndb.transactional_tasklet
def _RefreshAsync(self, timeout):
"""Refreshes the lock via datastore."""
lock_entity = yield _DatastoreLockEntity.get_by_id_async(self._id)
if lock_entity.lock_id != self._lock_id:
logging.warning(u'Invalid lock ID detected when refreshing the '
u'lock [%s]', self._id)
raise ndb.Return(False)
lock_entity.timeout = timeout
yield lock_entity.put_async() # This also resets acquired_at.
raise ndb.Return(True)
@ndb.transactional_tasklet(retries=10)
def _ReleaseAsync(self):
lock_entity = yield _DatastoreLockEntity.get_by_id_async(self._id)
if lock_entity.lock_id != self._lock_id:
logging.warning('lock acquired by someone else')
raise ndb.Return(False)
lock_entity.acquired = False
yield lock_entity.put_async()
raise ndb.Return(True)
# Add pep-8 aliases and allow lock to be used as context manager.
acquire_async = AcquireAsync
reacquire_async = ReacquireAsync
refresh_async = RefreshAsync
release_async = ReleaseAsync
acquire = Acquire
reacquire = Reacquire
refresh = Refresh
release = Release
__enter__ = Acquire
def __exit__(self, *unused_args):
self.Release()
|
|
""" CSV wrapper objects. """
import codecs
import csv
import io
import os
from .pyver import PY3
DEFAULT_CHARSET = "utf-8"
CSV_CHARSET = "utf-8"
def set_global_csv_field_size_limit(limit):
""" Wrapper function. """
csv.field_size_limit(limit)
def csv_open(file_name, mode, **kwargs):
""" Wrapper function for opening a file for use by CSV reader/writer.
Returns a file IO object; not a CSV object.
Attempts to handle mode and encoding options on the file object
so that they will be compatible with a CSV reader or writer.
It is intended to be a drop-in replacement for open() or io.open(),
with the exception that
the newline option should not be specified when using this function.
"""
if mode is None:
mode = "r"
if mode not in ("r", "w"):
raise LookupError("mode must be 'r' or 'w'")
if "newline" in kwargs:
raise AttributeError("newline parameter is not allowed for CSV")
# NOTE:
# Python 3 says to ensure newline=""
# Python 2 says to ensure file is open in binary mode,
# but if we open in binary mode, we cannot easily honor the encoding.
# Trying to open in binary mode and wrap it with a codec reader/writer
# doesn't seem to work.
# It appears that Python3 can reopen the STDOUT stream,
# but Python2 cannot. The result is that newlines cannot be
# controlled when writing to stdout in Python2.
kwargs = dict(kwargs) # copy
if True:
kwargs["newline"] = ""
return io.open(file_name, mode, **kwargs)
else:
# The purpose of all this python2 code
# is to try and handle newlines correctly,
# but it doesn't seem to work any better than
# if we just executed the PY3 case above:
encoding = None
errors = None
if "encoding" in kwargs:
encoding = kwargs["encoding"]
del kwargs["encoding"]
if "errors" in kwargs:
errors = kwargs["errors"]
del kwargs["errors"]
if not encoding:
encoding = CSV_CHARSET
if mode == "r":
CodecStream = codecs.getreader(encoding)
elif mode == "w":
CodecStream = codecs.getwriter(encoding)
mode = mode + "b"
file_io = io.open(file_name, mode, **kwargs)
if errors:
file_io = CodecStream(file_io, errors)
else:
file_io = CodecStream(file_io)
return file_io
def lookup_delimiter(delimiter_name):
""" Maps a delimiter name (e.g. "tab") to a delimter value (e.g. "\t")
This is mostly useful for tabs since Windows commandline
makes it nearly impossible to specify a tab without an alias name.
"""
delimiter = delimiter_name
if delimiter_name is not None:
delimiter_name = delimiter_name.lower()
if not delimiter_name:
pass
elif delimiter_name in ("tab", "\\t"):
delimiter = "\t"
elif delimiter_name in ("space", "sp"):
delimiter = " "
elif delimiter_name in ("comma",):
delimiter = ","
elif delimiter_name in ("pipe", "vbar", "verticalbar"):
delimiter = "|"
elif delimiter_name in ("semicolon",):
delimiter = ";"
return delimiter
def lookup_charset(in_charset_name):
""" Provides some additional aliases for text encoding names. """
out_charset_name = in_charset_name
if out_charset_name is not None:
out_charset_name = out_charset_name.lower()
out_charset_name = out_charset_name.replace("-", "_")
if out_charset_name == "windows-1252":
out_charset_name = "cp1252"
return out_charset_name
def lookup_newline(in_newline_name):
""" Provides commandline-friendly aliases for newline character names. """
out_newline = in_newline_name
if out_newline is not None:
out_newline = out_newline.lower()
out_newline = out_newline.replace("\\r", "\r")
out_newline = out_newline.replace("\\n", "\n")
out_newline = out_newline.replace("\\r\\n", "\r\n")
if out_newline == "sys":
out_newline = os.linesep
elif out_newline == "std":
# 'std' newline convention is the "standard" for this toolset;
# LF was chosen to try and accommodate some pipe scenarios,
# but this might change.
# The RFC-4180 recommendation is to use \r\n.
out_newline = "\n"
#out_newline = os.linesep
elif out_newline in (
"cr",
"macintosh",
"mac",
"\\r",
):
out_newline = "\r"
elif out_newline in (
"lf",
"unix",
"posix",
"\\n",
):
out_newline = "\n"
elif out_newline in (
"crlf",
"windows",
"win",
"dos",
"\\r\\n",
):
out_newline = "\r\n"
return out_newline
def lookup_quote_symbol(quote_symbol_name):
''' Provides some commandline friendly aliases for quote symbols
'''
quote_symbol = quote_symbol_name
if quote_symbol_name is not None:
quote_symbol_name = quote_symbol_name.lower()
if not quote_symbol_name:
pass
elif quote_symbol_name in (
"quot",
"dquote",
"double_quote",
"double-quote",
):
quote_symbol = "\""
elif quote_symbol_name in (
"apos",
"squote",
"single_quote",
"single-quote",
):
quote_symbol = "'"
return quote_symbol
def normalize_column_name(column_name):
""" puts a CSV column name into a "normalized" form for comparison. """
norm_column_name = column_name
if norm_column_name is not None:
norm_column_name = norm_column_name.strip()
norm_column_name = norm_column_name.lower()
return norm_column_name
class CsvRecoder:
""" Wraps an iterator of unicode to re-encode it for use with csv module. """
encoding = CSV_CHARSET # target encoding
def __init__(
self,
in_iter,
encoding, # source encoding
):
if not encoding:
self.in_iter = in_iter
else:
decoder = codecs.getreader(encoding)
self.in_iter = decoder(in_iter)
def __iter__(self):
return self
def next(self):
line_ustr = self.in_iter.next()
return line_ustr.encode(self.encoding)
def decode(self, s):
return s.decode(self.encoding)
class UnicodeCsvReader:
""" Wraps a csv.reader with a text encoder. """
def __init__(
self,
in_io,
**kwargs):
encoding = None
self.recoder = CsvRecoder(in_io, encoding)
self.reader = csv.reader(self.recoder, **kwargs)
def next(self):
row = self.reader.next()
return [self.recoder.decode(cell) for cell in row]
def __iter__(self):
return self
class UnicodeCsvWriter:
""" Wraps a csv.writer to write to a unicode stream. """
def __init__(
self,
out_io,
**kwargs):
from cStringIO import StringIO
self.buf_io = StringIO()
self.writer = csv.writer(self.buf_io, **kwargs)
self.out_io = out_io
def writerow(self, row):
self.writer.writerow([cell_ustr.encode(CSV_CHARSET) for cell_ustr in row])
row_str = self.buf_io.getvalue()
self.buf_io.truncate(0)
row_ustr = row_str.decode(CSV_CHARSET)
self.out_io.write(row_ustr)
def writerows(self, rows):
for row in rows:
self.writerow(row)
if PY3:
CsvReader = csv.reader
CsvWriter = csv.writer
else:
CsvReader = UnicodeCsvReader
CsvWriter = UnicodeCsvWriter
|
|
"""Utilities for writing code that runs on Python 2 and 3"""
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform == "java":
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["django.utils.six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
### Additional customizations for Django ###
if PY3:
_iterlists = "lists"
else:
_iterlists = "iterlists"
def iterlists(d):
"""Return an iterator over the values of a MultiValueDict."""
return getattr(d, _iterlists)()
add_move(MovedModule("_dummy_thread", "dummy_thread"))
add_move(MovedModule("_thread", "thread"))
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import groupby
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import (
index_at_dt,
)
from zipline.utils.data import RollingPanel
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(field,
buffer_frame,
digest_frame,
pre_digest_values):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# Get values which are NaN at the beginning of the period.
first_bar = buffer_frame.iloc[0]
def iter_nan_sids():
"""
Helper for iterating over the remaining nan sids in first_bar.
"""
return (sid for sid in first_bar[first_bar.isnull()].index)
# Try to fill with the last entry from the digest frame.
if digest_frame is not None:
# We don't store a digest frame for frequencies that only have a bar
# count of 1.
for sid in iter_nan_sids():
buffer_frame[sid][0] = digest_frame.ix[-1, sid]
# If we still have nan sids, try to fill with pre_digest_values.
for sid in iter_nan_sids():
prior_sid_value = pre_digest_values[field].get(sid)
if prior_sid_value:
# If the prior value is greater than the timestamp of our first
# bar.
if prior_sid_value.get('dt', first_bar.name) > first_bar.name:
buffer_frame[sid][0] = prior_sid_value.get('value', np.nan)
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(field, digest_frame, prior_values):
"""
Forward-fill a digest frame, falling back to the last known priof values if
necessary.
"""
if digest_frame is not None:
# Digest frame is None in the case that we only have length 1 history
# specs for a given frequency.
# It's possible that the first bar in our digest frame is storing NaN
# values. If so, check if we've tracked an older value and use that as
# an ffill value for the first bar.
first_bar = digest_frame.ix[0]
nan_sids = first_bar[first_bar.isnull()].index
for sid in nan_sids:
try:
# Only use prior value if it is before the index,
# so that a backfill does not accidentally occur.
if prior_values[field][sid]['dt'] <= digest_frame.index[0]:
digest_frame[sid][0] = prior_values[field][sid]['value']
except KeyError:
# Allow case where there is no previous value.
# e.g. with leading nans.
pass
digest_frame = digest_frame.ffill()
return digest_frame
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
def group_by_frequency(history_specs):
"""
Takes an iterable of history specs and returns a dictionary mapping unique
frequencies to a list of specs with that frequency.
Within each list, the HistorySpecs are sorted by ascending bar count.
Example:
[HistorySpec(3, '1d', 'price', True),
HistorySpec(2, '2d', 'open', True),
HistorySpec(2, '1d', 'open', False),
HistorySpec(5, '1m', 'open', True)]
yields
{Frequency('1d') : [HistorySpec(2, '1d', 'open', False)],
HistorySpec(3, '1d', 'price', True),
Frequency('2d') : [HistorySpec(2, '2d', 'open', True)],
Frequency('1m') : [HistorySpec(5, '1m', 'open', True)]}
"""
return {key: list(group)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
def __init__(self, history_specs, initial_sids, initial_dt):
# History specs to be served by this container.
self.history_specs = history_specs
self.frequency_groups = \
group_by_frequency(itervalues(self.history_specs))
# The set of fields specified by all history specs
self.fields = set(spec.field for spec in itervalues(history_specs))
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(
initial_sids,
initial_dt,
)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Populating initial frames here, so that the cost of creating the
# initial frames does not show up when profiling. These frames are
# cached since mid-stream creation of containing data frames on every
# bar is expensive.
self.create_return_frames(initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = {field: {} for field in self.fields}
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.frequency_groups)
def create_digest_panels(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
Relies on the fact that group_by_frequency sorts the value lists by
ascending bar count.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, specs in iteritems(self.frequency_groups):
# Relying on the sorting of group_by_frequency to get the spec
# requiring the largest number of bars.
largest_spec = specs[-1]
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.window_open(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
initial_dates = index_at_dt(largest_spec, initial_dt)
# Set up dates for our first digest roll, which is keyed to the
# close of the first entry in our initial index.
first_window_closes[freq] = initial_dates[0]
first_window_starts[freq] = freq.window_open(initial_dates[0])
rp = RollingPanel(len(initial_dates) - 1,
self.fields,
initial_sids)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_sids, initial_dt):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(freq.max_minutes
for freq in self.unique_frequencies)
rp = RollingPanel(
max_bars_needed,
self.fields,
initial_sids,
# Restrict the initial data down to just the fields being used in
# this container.
)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def create_return_frames(self, algo_dt):
"""
Populates the return frame cache.
Called during init and at universe rollovers.
"""
self.return_frames = {}
for spec_key, history_spec in iteritems(self.history_specs):
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
frame = pd.DataFrame(
index=index,
columns=self.convert_columns(
self.buffer_panel.minor_axis.values),
dtype=np.float64)
self.return_frames[spec_key] = frame
def buffer_panel_minutes(self,
buffer_panel=None,
earliest_minute=None,
latest_minute=None):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@last_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object. If no panel is supplied, we use self.buffer_panel.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
buffer_panel = buffer_panel or self.buffer_panel
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current()
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
self.update_digest_panels(algo_dt, self.buffer_panel)
fields = self.fields
frame = pd.DataFrame(
{sid: {field: bar[field] for field in fields}
for sid, bar in data.iteritems()
if (bar
and
bar['dt'] == algo_dt
and
# Only use data which is keyed in the data panel.
# Prevents crashes due to custom data.
sid in self.buffer_panel.minor_axis)})
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
"""
for frequency in self.unique_frequencies:
if freq_filter is not None and not freq_filter(frequency):
continue
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
)
# Create a digest from minutes_to_process and add it to
# digest_panel.
self.roll(frequency,
digest_panel,
minutes_to_process,
latest_minute)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def roll(self, frequency, digest_panel, buffer_minutes, digest_dt):
"""
Package up minutes in @buffer_minutes insert that bar into
@digest_panel at index @last_minute, and update
self.cur_window_{starts|closes} for the given frequency.
"""
if digest_panel is None:
# This happens if the only spec we have at this frequency has a bar
# count of 1.
return
rolled = pd.DataFrame(
index=self.fields,
columns=buffer_minutes.minor_axis)
for field in self.fields:
if field in CLOSING_PRICE_FIELDS:
# Use the last close, or NaN if we have no minutes.
try:
prices = buffer_minutes.loc[field].ffill().iloc[-1]
except IndexError:
# Scalar assignment sets the value for all entries.
prices = np.nan
rolled.ix[field] = prices
elif field == 'open_price':
# Use the first open, or NaN if we have no minutes.
try:
opens = buffer_minutes.loc[field].bfill().iloc[0]
except IndexError:
# Scalar assignment sets the value for all entries.
opens = np.nan
rolled.ix['open_price'] = opens
elif field == 'volume':
# Volume is the sum of the volumes during the
# course of the period.
volumes = buffer_minutes.ix['volume'].sum().fillna(0)
rolled.ix['volume'] = volumes
elif field == 'high':
# Use the highest high.
highs = buffer_minutes.ix['high'].max()
rolled.ix['high'] = highs
elif field == 'low':
# Use the lowest low.
lows = buffer_minutes.ix['low'].min()
rolled.ix['low'] = lows
for sid, value in rolled.ix[field].iterkv():
if not np.isnan(value):
try:
prior_values = \
self.last_known_prior_values[field][sid]
except KeyError:
prior_values = {}
self.last_known_prior_values[field][sid] = \
prior_values
prior_values['dt'] = digest_dt
prior_values['value'] = value
digest_panel.add_frame(digest_dt, rolled)
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
bar_count = history_spec.bar_count
do_ffill = history_spec.ffill
index = pd.to_datetime(index_at_dt(history_spec, algo_dt))
return_frame = self.return_frames[history_spec.key_str]
# Overwrite the index.
# Not worrying about values here since the values are overwritten
# in the next step.
return_frame.index = index
if bar_count > 1:
# Get the last bar_count - 1 frames from our stored historical
# frames.
digest_panel = self.digest_panels[history_spec.frequency]\
.get_current()
digest_frame = digest_panel[field].copy().ix[1 - bar_count:]
else:
digest_frame = None
# Get minutes from our buffer panel to build the last row.
buffer_frame = self.buffer_panel_minutes(
earliest_minute=self.cur_window_starts[history_spec.frequency],
)[field]
if do_ffill:
digest_frame = ffill_digest_frame_from_prior_values(
field,
digest_frame,
self.last_known_prior_values,
)
buffer_frame = ffill_buffer_from_prior_values(
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
)
if digest_frame is not None:
return_frame.ix[:-1] = digest_frame.ix[:]
if field == 'volume':
return_frame.ix[algo_dt] = buffer_frame.fillna(0).sum()
elif field == 'high':
return_frame.ix[algo_dt] = buffer_frame.max()
elif field == 'low':
return_frame.ix[algo_dt] = buffer_frame.min()
elif field == 'open_price':
return_frame.ix[algo_dt] = buffer_frame.iloc[0]
else:
return_frame.ix[algo_dt] = buffer_frame.loc[algo_dt]
# Returning a copy of the DataFrame so that we don't crash if the user
# adds columns to the frame. Ideally we would just drop any added
# columns, but pandas 0.12.0 doesn't support in-place dropping of
# columns. We should re-evaluate this implementation once we're on a
# more up-to-date pandas.
return return_frame.copy()
|
|
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Flirtcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
print("Output file" + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return "%s/blk%05d.dat" % (self.settings['input'], fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file" + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" in blkmap:
print("not found")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
"""
The ``zen.io.gml`` module (available as ``zen.gml``) supports the reading and
writing network data in the `Graph Modeling Language (GML)
<http://en.wikipedia.org/wiki/Graph_Modelling_Language>`_. At present only
reading GML is supported.
GML is a flexible language for specifying the structure of a network (nodes
and edges) that can be annotated with arbitrary attributes and information.
This module provides a full implementation of the GML file format as specified
in the `technical report
<http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf>`_:
.. note::
Source: Michael Himsolt. *GML: A portable Graph File Format*. Technical
Report, Universitat Passau.
Functions
---------
.. autofunction:: zen.io.gml.read(fname[,weight_fxn=None])
"""
from zen.exceptions import *
from zen.graph import Graph
from zen.digraph import DiGraph
from zen.bipartite import BipartiteGraph
from gml_codec import BasicGMLCodec, ZenGMLCodec
from gml_interpreter import GMLInterpreter
from gml_tokenizer import GMLTokenizer
from collections import Iterable, Hashable
import os
import cgi
import re
import codecs
import pdb
__all__ = ['read','write']
DIGITS = tuple(['%d' % x for x in range(10)]) + ('+','-')
DIGITS_AND_QUOTES = DIGITS + ('"',)
def write(G, filename, **kwargs):
"""
Writes graph to file using Graph Modeling Language (gml). Node / Edge /
Graph objects, if not None, are stored in the `name` attribute, and are
restricted to numeric (but not complex), string, and boolean data types,
otherwise an exception is raised.
Node / Edge / Graph data, if not None, are stored in a zen_data attribute
and are similarly restricted. Support may be added later for serialization
of arbitrary objects / data and associated to zen graphs.
see
http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf
for more info about gml.
**Args**
* ``G`` (:py:class:`zen.Graph`, :py:class:'zen.Digraph`,
:py:class:`zen.BipartiteGraph): Graph object to be written to
file. Hypergraphs are not supported.
* ``filename`` (str): Absolute path for the file to be written.
**KwArgs**
* ``write-data`` (bool | (bool, bool)): If 2-tuple of booleans
supplied, first indicates whether to write out node data second
whether to write out edge data. If bool provided, it is applied
for both node and edge data.
* ``use-zen-data`` (bool | (bool, bool)): Indicates whether to write
out If 2-tuple of booleans supplied, first indicates whether to
write out node data second whether to write out edge data. If bool
provided, it is applied for both node and edge data.
**Returns**:
* None
"""
# Determine the write mode. There are various options the user can specify
# which are reconsiled and validated here
write_node_data, write_edge_data, use_node_zen_data, use_edge_zen_data = resolve_write_mode(kwargs)
# Get the encoder to use. This call resolves the various ways the user can
# specify encoding, and does basic checks that the encoder is valid
enc = resolve_codec(kwargs)
fh = open(filename, 'w')
fh.write('# This is a graph object in gml file format\n')
fh.write('# produced by the zen graph library\n\n')
# Describe the encoding method used to generate the file.
fh.write('ZenCodec "%s"\n' % enc.__name__)
fh.write('ZenStringEncoder "%s"\n\n' % enc.encode_str.__name__)
# Start writing the graph
fh.write('graph [\n')
if G.is_directed():
fh.write('\tdirected 1\n')
else:
fh.write('\tdirected 0\n')
if isinstance(G, BipartiteGraph):
is_bipartite = True
fh.write('\tbipartite 1\n')
else:
is_bipartite = False
fh.write('\tbipartite 0\n')
# iterate over nodes, writing them to the new gml file
for nidx, nobj, ndata in G.nodes_iter_(obj=True, data=True):
fh.write('\tnode [\n')
fh.write('\t\tid ' + str(nidx) + '\n')
if is_bipartite:
fh.write(format_zen_data('isInU', G.is_in_U_(nidx), 2, enc))
if nobj != None:
fh.write(format_zen_data('name', nobj, 2, enc))
if ndata != None and write_node_data:
if use_node_zen_data:
fh.write(format_zen_data('zenData', ndata, 2, enc))
else: # expects zenData to be dict
for key, val in ndata.items():
fh.write(format_zen_data(key, val, 2, enc))
fh.write('\t]\n')
# iterate over edges
for eidx, edata, weight in G.edges_iter_(data=True, weight=True):
fh.write('\tedge [\n')
# for digraphs, assumes endpoints order [source, target]
fh.write('\t\tsource ' + str(G.endpoints_(eidx)[0]) + '\n')
fh.write('\t\ttarget ' + str(G.endpoints_(eidx)[1]) + '\n')
fh.write('\t\tweight ' + str(weight) + '\n')
if edata != None and write_edge_data:
if use_edge_zen_data:
fh.write(format_zen_data('zenData', edata, 2, enc))
else: #expects zenData to be dict
for key, val in edata.items():
fh.write(format_zen_data(key, val, 2, enc))
fh.write('\t]\n')
fh.write(']\n')
fh.close()
def format_zen_data(keyname, data, tab_depth, encoder, strict=True):
"""
Reformats supplied data to use gml. Enforces restrictions on the types of
data that can be written to gml.
**Args**
* data (bool | int | long | float | str | dict | list): object to be
written in gml
* key (str): key to be used in gml. Needed here because in gml lists
are made by repeating the key in front of each value.
* tab_depth (int): number of tab characters to add to the beginning
of each line for nice formatting.
**Returns**
* formatted_data (str): gml representation of data
"""
# Validation: key names must be strictly alphanumeric
if re.search('[^a-zA-Z0-9]', keyname) and strict:
raise ZenException(
'gml supports only characters from [a-zA-Z0-9] in keys')
formatted_data = ''
tabs = '\t' * tab_depth
if not isinstance(data, (dict, list, tuple)):
encoded_data = encoder.encode(data)
# Validate encoder output
if strict:
try:
assert(isinstance(encoded_data, basestring))
assert(encoded_data.startswith(DIGITS_AND_QUOTES))
encoded_data.encode('ascii')
if encoded_data.startswith(DIGITS):
num = float(encoded_data)
assert(num < 2147483647 or num > -2147483648)
except AssertionError, UnicodeEncodeError:
raise ZenException('GML Encoder has violated gml specifications. '\
'see <http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf>. \n Use '\
'gml.write(..., strict=False) to force writing.')
# The encoded data is legal for gml. Append extras.
formatted_data = tabs + keyname + ' ' + encoded_data + '\n'
# Recursive call for dicts
elif isinstance(data, dict):
formatted_data += tabs + keyname + ' [\n'
for key, val in data.items():
formatted_data += format_zen_data(key, val, tab_depth + 1, encoder)
formatted_data += tabs + ']\n'
# Recursive call for lists. GML represents lists by repeating the
# key with different values
else:
assert(isinstance(data, (list, tuple)))
for val in data:
formatted_data += format_zen_data(keyname, val, tab_depth, encoder)
return formatted_data
def make_tree(fname, **kwargs):
# resolve the codec. The user can specify the codec in various ways.
codec = resolve_codec(kwargs)
# read the file
fh = open(fname,'r')
gml_str = fh.read()
fh.close()
# tokenize the gml string
tok = GMLTokenizer()
tokens = tok.tokenize(gml_str)
# interpret the gml document into an internal datastructure
interpreter = GMLInterpreter(codec, tok)
gml_tree = interpreter.interpret(tokens)
return gml_tree
def read(fname,**kwargs):
"""
Read GML-formatted network data stored in file named ``fname``.
The node's ``id`` attribute is used to specify the node index. The node's
``name`` attribute is preferably used as the node object.
However, if the ``name`` attribute is missing and the ``label`` is present,
then the node's ``label`` attribute will be used as the node object. If
both are missing, then the node id will be used as the node object.
.. note::
Currently graph attributes are not supported by the reader. If
encountered, they will simply be skipped over and not added to the
final graph. This is simply because graph objects don't support
arbitrary data yet.
**KwArgs**:
* ``weight_fxn [=None]``: derive weight assignments from edge data. If
specified, this function is called with one parameter: the full
set of attributes that were specified for the edge.
"""
# extract keyword arguments
weight_fxn = kwargs.pop('weight_fxn',None)
# parse the gml into a python dict structure
gml_tree = make_tree(fname, **kwargs)
if 'graph' in gml_tree:
graph_tree = gml_tree['graph']
if(isinstance(gml_tree, list)):
graph_tree = gml_tree[0]
print 'Warning: multiple graphs stored in this file. Use '\
'gml.read_all(fname, [...]) to get list of all graphs'
return build_graph(graph_tree, weight_fxn)
else:
return None
def read_all(fname, **kwargs):
# extract keyword arguments
weight_fxn = kwargs.pop('weight_fxn',None)
# parse the gml into a python dict structure
gml_tree = make_tree(fname, **kwargs)
if 'graph' in gml_tree:
graph_tree = gml_tree['graph']
if not isinstance(graph_tree, list):
graph_tree = [ gml_tree['graph'] ]
graph_tree = gml_tree[0]
print 'Warning: multiple graphs stored in this file. Use '\
'gml.read_all(fname, [...]) to get list of all graphs'
return build_graph(graph_tree, weight_fxn)
else:
return None
def build_graph(graph_tree, weight_fxn):
# What kind of graph is being built?
is_bipartite = bool('bipartite' in graph_tree and graph_tree['bipartite'])
is_directed = bool('directed' in graph_tree and graph_tree['directed'])
if is_bipartite:
G = BipartiteGraph()
elif is_directed:
G = DiGraph()
else:
G = Graph()
# Build the nodes
if 'node' in graph_tree:
# get the list of nodes
nodes = graph_tree['node']
# ensure the node-list is a list (needed if there's only one node)
if not isinstance(nodes, list):
nodes = [ nodes ]
# Build each node and add to the graph
for node in nodes:
# Does the node have an id?
has_id = True
has_valid_id = True
if 'id' not in node:
has_id = False
has_valid_id = False
# We can only use positive integer node ids as graph idx
# If that's not the case, treat it like any other attribute
elif not isinstance(node['id'], int) or node['id'] < 0:
has_valid_id = False
# Got a valid node id
node_idx = node['id']
# For bipartite graphs determine which node set this belongs to
if is_bipartite:
is_in_U = node['isInU']
# collect and verify all the node properties
standard_keys = set(['id', 'name', 'zenData'])
node_data = {}
node_obj = None
zen_data = None
for key, val in node.items():
if key == 'name':
node_obj = val
# give preference to 'name' as source of node_obj
elif key == 'label' and node_obj is None:
node_obj = val
elif key == 'zenData':
zen_data = val
# node_data is dict of all other attributes
else:
node_data[key] = val
# _set_ to node_data else _append_
if zen_data is not None:
if len(node_data) == 0:
node_data = zen_data
else:
node_data['zenData'] = zen_data
elif len(node_data) == 0:
node_data = None
# make sure that the node object is hashable otherwise put it
if not isinstance(node_obj, basestring) and node_obj is not None:
if not isinstance(node_obj, Hashable):\
if not isinstance(node_obj, Iterable):
node_obj = None
else:
node_obj = tuple(node_obj)
# For bipartite graph, this insertion method does not guarantee
# that indices will be unchanged after a read-write cycle
if is_bipartite:
G.add_node_by_class(is_in_U, node_obj, node_data)
elif has_id and has_valid_id:
if is_directed:
G.add_node_x(node_idx, G.edge_list_capacity,
G.edge_list_capacity, node_obj,node_data)
else:
G.add_node_x(node_idx, G.edge_list_capacity, node_obj,
node_data)
else:
if G.is_directed:
G.add_node(nobj=node_obj, data=node_data)
else:
G.add_node(nobj=node_obj, data=node_data)
# add edges
if 'edge' in graph_tree:
# ensure edge list is a list (needed if there is only one edge)
edges = graph_tree['edge']
if not isinstance(edges, list):
edges = [ edges ]
# iterate over the edges, add each one to the graph
for edge in edges:
# make sure source and target are specified
source = None
target = None
if 'source' not in edge:
raise ZenException('Edge is missing the source attribute '\
'(edge = %s)' % str(edge))
if 'target' not in edge:
raise ZenException('Edge is missing the target attribute '\
'(edge = %s)' % str(edge))
weight = 1
edge_idx = None
zen_data = None
edge_data = {}
for key, val in edge.items():
if key == 'id':
edge_idx = val
if type(val) != int:
raise ZenException('Edge id attribute must be a '\
'positive integer (edge = %s)' % str(edge))
elif key == 'source':
source = val
if type(val) != int or val < 0:
raise ZenException('Edge source attribute must be a '\
'positive integer (edge = %s)' % str(edge))
elif key == 'target':
target = val
if type(val) != int or val < 0:
raise ZenException('Edge target attribute must be a '\
'positive integer (edge = %s)' % str(edge))
elif key == 'weight':
weight = float(val)
elif key == 'zenData':
zen_data = val
# edge_data is dict of all other attributes
else:
edge_data[key] = val
# give precedence to a weight-getting function if provided
if weight_fxn != None:
weight = weight_fxn(edge)
# if zenData is only other attribute aside from those handled above
# _set_ to edge_data else _append_
if zen_data is not None:
if len(edge_data) == 0:
edge_data = zen_data
else:
edge_data['zenData'] = zen_data
elif len(edge_data) == 0:
edge_data = None;
if edge_idx != None:
G.add_edge_x(edge_idx,source,target,edge_data,weight)
else:
G.add_edge_(source,target,edge_data,weight)
return G
def resolve_write_mode(kwargs):
# Decide whether to write node and edge data
# Default is to write both
write_node_data, write_edge_data = True, True
if 'write_data' in kwargs:
# write_data might be a tuple
if type(kwargs['write_data']) == tuple:
write_node_data, write_edge_data = kwargs['write_data']
# or just a single bool
else:
write_node_data = write_edge_data = kwargs['write_data']
#validation:
if(not isinstance(write_node_data, bool) or not
isintstance(write_edge_data, bool)):
raise zenException('write_data keyword argument takes bool or'\
' 2-tuple of bools. Found: %s (%s)' %(
write_data, type(write_data)))
# Decide *how* to write node / edge data to file
# Default is to use zen_data for both
use_node_zen_data, use_edge_zen_data = True, True
if 'use_zen_data' in kwargs:
if type(kwargs['use_zen_data']) == tuple:
use_node_zen_data, use_edge_zen_data = kwargs['use_zen_data']
else:
use_node_zen_data = use_edge_zen_data = kwargs['use_zen_data']
#validation:
if(not isinstance(write_node_data, bool) or
not isintstance(write_edge_data, bool)):
raise zenException('write_data keyword argument takes bool or'\
' 2-tuple of bools. Found: %s (%s)' %(
write_data, type(write_data)))
return write_node_data, write_edge_data, use_node_zen_data, use_edge_zen_data
def resolve_codec(kwargs):
# Resolve the data encoder, check if user passed encoder *instance*
if 'gml_codec' in kwargs:
enc = kwargs['encoder']
try:
assert(isinstance(enc.encode(''), basestring))
enc.__name__.encode('ascii')
except (AttributeError, AssertionError) as e:
raise ZenException('encoder must define encode() to take type '\
'basestring and return type basestring containing only ascii-'\
'encodable characers. It must also provide an ascii-encodable '\
'__name__ attribute.')
# Resolve the data encoder, check if user passed encoder by *name*
elif 'gml_coding' in kwargs:
if kwargs['encoding'] == 'basic':
enc = BasicGMLCodec()
elif kwargs['encoding'] == 'zen':
enc = ZenGMLCodec()
else:
raise ZenException('encoding must be string equal to "basic" or'\
' "zen"')
# default encoder
else:
enc = BasicGMLCodec()
# User can also just pass a string-encoder.
# (The full-fledged gml encoders handle various data types)
if 'string_encoder' in kwargs:
str_enc = kwargs['string_encoder']
try:
assert(isinstance(str_enc(''), basestring))
except (TypeError, AssertionError) as e:
raise ZenException('string_encoder must be a function that takes '\
'basestring and returns basestring.')
enc.encode_str = str_enc
# User can also just pass a string-decoder.
# (The full-fledged gml encoders handle various data types)
if 'string_decoder' in kwargs:
str_dec = kwargs['string_decoder']
try:
assert(isinstance(str_dec(''), basestring))
except (TypeError, AssertionError) as e:
raise ZenException('string_decoder must be a function that takes '\
'basestring and returns basestring.')
enc.decode_str = str_dec
return enc
|
|
"""
Collection of views related to how talks and events in general are scheduled
on barcamps.
"""
import datetime
import collections
import logging
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from .. import forms, models, utils
from .base import BarcampBaseView, APP_NAME
LOG = logging.getLogger(__name__)
class BarcampScheduleView(BarcampBaseView):
"""
The schedule view presents all the talks and side events of the
barcamp. The normal web version renders a grid with slots and
places (i.e. rooms) as dimensions while the iphone version just
provides a simple listening per day and room.
"""
template_name = 'barcamp/barcamp-schedule.html'
template_name_iphone = 'barcamp/iphone/barcamp-schedule.html'
def __init__(self, *args, **kwargs):
super(BarcampScheduleView, self).__init__(*args, **kwargs)
self.dict_grid = None
self.open_slots = None
self.days = None
self.rooms = None
def prepare(self, *args, **kwargs):
super(BarcampScheduleView, self).prepare(*args, **kwargs)
self.days = utils.get_days(self.barcamp.start, self.barcamp.end)
self.rooms = self.barcamp.places.filter(is_sessionroom=True)
def view(self, *args, **kwargs):
"""Provide a full grid of talks as well as the side events."""
rooms = self.rooms
detached_talks = models.Talk.objects.filter(timeslot=None,
barcamp=self.barcamp).all()
sideevents = models.SideEvent.objects.filter(barcamp=self.barcamp)\
.order_by('start')
se_per_day = collections.defaultdict(list)
for event in sideevents:
se_per_day[event.start.date()].append(event)
utils.mark_talklist_permissions(detached_talks, self.request.user,
self.barcamp)
self.data['detached_talks'] = detached_talks
self.data['sideevents'] = [
(k, se_per_day[k]) for k in sorted(se_per_day.keys())
]
self.data['grid'] = utils.SlotGrid.create_from_barcamp(self.barcamp,
list(rooms), per_day=True, mark_for_user=self.request.user)[0]
return self.render()
def view_iphone(self, *args, **kwargs):
"""
Here we only require a simple list of talks per room per day
"""
days = {}
for day in self.days:
days[day.date()] = {}
for room in self.rooms:
days[day.date()][room] = []
self.data['days'] = self.days
for talk in models.Talk.objects.filter(barcamp=self.barcamp)\
.order_by('start'):
days[talk.start.date()][talk.place].append(talk)
self.data['talks_per_day'] = [
{'day': day, 'rooms': rooms} for day, rooms in days.items()
]
return self.render()
class BarcampNowView(BarcampBaseView):
"""
For the iPhone version this provides a simple listing of all
the currently active events.
"""
template_name_iphone = 'barcamp/iphone/now.html'
def view(self, *args, **kwargs):
now = datetime.datetime.now()
self.data['events'] = self.barcamp.events.filter(start__lte=now,
end__gte=now)
return self.render()
class BarcampUpcomingView(BarcampBaseView):
"""
Again for the iPhone version this provides a list of the upcoming
events. The number of listed events is based on the number of available
places.
"""
template_name_iphone = 'barcamp/iphone/upcoming.html'
def view(self, *args, **kwargs):
now = datetime.datetime.now()
self.data['events'] = self.barcamp.events.filter(start__gte=now)\
.order_by('start')[:self.barcamp.places.count()]
return self.render()
class BarcampCreateSlotView(BarcampBaseView):
"""
Organizers can create a timeslot for one of the days of the barcamp
with this view.
"""
template_name = 'barcamp/create-slot.html'
def view(self, *args, **kwargs):
if self.request.method == 'POST':
form = forms.CreateSlotForm(self.request.POST,
barcamp=self.barcamp)
if form.is_valid():
obj = models.TimeSlot()
obj.barcamp = self.barcamp
obj.start = form.get_start()
obj.end = form.get_end()
if form.cleaned_data['room'] != u'0':
obj.place = self.barcamp.places.get(
pk=form.cleaned_data['room'])
obj.save()
return HttpResponseRedirect(reverse('barcamp:schedule',
current_app=APP_NAME, args=[self.barcamp.slug]))
else:
form = forms.CreateSlotForm(barcamp=self.barcamp)
self.data['form'] = form
return self.render()
class BarcampDeleteSlotView(BarcampBaseView):
"""
Allows organizers to delete a given slot.
"""
template_name = 'barcamp/confirm-delete-slot.html'
def view(self, *args, **kwargs):
slot_pk = kwargs.get('slot_pk')
slot = get_object_or_404(self.barcamp.slots, pk=slot_pk)
if self.request.method == 'POST':
slot.talks.clear()
slot.delete()
return HttpResponseRedirect(reverse('barcamp:schedule',
current_app=APP_NAME, args=[self.barcamp.slug]))
self.data['slot'] = slot
return self.render()
class BarcampCreateTalkView(BarcampBaseView):
"""
Create a talk within a given slot.
"""
template_name = 'barcamp/create-talk-for-slot.html'
def view(self, *args, **kwargs):
slot = get_object_or_404(self.barcamp.slots.select_related(),
pk=kwargs['slot_pk'])
room = get_object_or_404(self.barcamp.places, pk=kwargs['room_pk'])
# Make sure, that the room is still free
if 0 < models.Talk.objects.filter(place=room, timeslot=slot).count():
return render_to_response('barcamp/slot-taken.html', self.data,
context_instance=RequestContext(self.request))
if self.request.method == 'POST':
form = forms.TalkForSlotForm(self.request.POST)
form.barcamp = self.barcamp
form.timeslot = slot
form.room = room
if form.is_valid():
talk = form.save(commit=False)
talk.barcamp = self.barcamp
talk.timeslot = slot
talk.place = room
talk.start = slot.start
talk.end = slot.end
talk.save()
return HttpResponseRedirect(reverse('barcamp:schedule',
current_app=APP_NAME, args=[self.barcamp.slug]))
else:
form = forms.TalkForSlotForm()
self.data.update({
'form': form,
'slot': slot,
'room': room
})
return self.render(self.template_name)
class BarcampEditTalkView(BarcampBaseView):
"""
Edit a talk without changing its slot, start and end time.
"""
template_name = 'barcamp/edit-talk-for-slot.html'
def view(self, *args, **kwargs):
talk = get_object_or_404(models.Talk.objects.select_related(),
pk=kwargs['talk_pk'], barcamp=self.barcamp)
if not (self.request.user.is_staff or self.request.user.is_superuser
or self.request.user in talk.speakers.all()
or self.request.user in self.organizers):
raise Http404
if self.request.method == 'POST':
form = forms.TalkForSlotForm(self.request.POST, instance=talk)
form.barcamp = self.barcamp
form.timeslot = talk.timeslot
form.room = talk.place
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('barcamp:schedule',
args=[self.barcamp.slug], current_app=APP_NAME))
else:
form = forms.TalkForSlotForm(instance=talk)
self.data.update({
'talk': talk,
'form': form,
})
return self.render(self.template_name)
class BarcampDeleteTalkView(BarcampBaseView):
"""
Delete a talk.
"""
template_name = 'barcamp/confirm-delete-talk.html'
def view(self, *args, **kwargs):
talk = get_object_or_404(models.Talk.objects.select_related(),
pk=kwargs['talk_pk'], barcamp=self.barcamp)
if not (self.request.user.is_staff or self.request.user.is_superuser
or self.request.user in talk.speakers.all()
or self.request.user in self.organizers):
raise Http404
if self.request.method == 'POST':
talk.delete()
return HttpResponseRedirect(reverse('barcamp:schedule',
args=[self.barcamp.slug], current_app=APP_NAME))
self.data['talk'] = talk
return self.render(self.template_name)
class BarcampDetachTalkView(BarcampBaseView):
"""
Detach a talk from its timeslot.
"""
template_name = 'barcamp/confirm-detach-talk.html'
def view(self, *args, **kwargs):
talk = get_object_or_404(models.Talk.objects.select_related(),
pk=kwargs['talk_pk'], barcamp=self.barcamp)
if not (self.request.user.is_staff or self.request.user.is_superuser
or self.request.user in talk.speakers.all()
or self.request.user in self.organizers):
raise Http404
if self.request.method == 'POST':
talk.timeslot = None
talk.save()
return HttpResponseRedirect(reverse('barcamp:schedule',
args=[self.barcamp.slug], current_app=APP_NAME))
self.data['talk'] = talk
return self.render(self.template_name)
class BarcampMoveTalkView(BarcampBaseView):
"""
Move talks between (available) timeslots.
"""
template_name = 'barcamp/move-talk.html'
def view(self, *args, **kwargs):
talk = get_object_or_404(models.Talk.objects.select_related(),
pk=kwargs['talk_pk'], barcamp=self.barcamp)
if not (self.request.user.is_staff or self.request.user.is_superuser
or self.request.user in talk.speakers.all()
or self.request.user in self.organizers):
raise Http404
open_slots = utils.SlotGrid.create_from_barcamp(
self.barcamp)[1]
if self.request.method == 'POST':
form = forms.MoveTalkForm(self.request.POST, instance=talk,
open_slots=open_slots)
if form.is_valid():
slot = form.open_slots[form.cleaned_data['slot']]
talk.place = slot.place
talk.timeslot = slot.slot
talk.start = slot.slot.start
talk.end = slot.slot.end
talk.save()
return HttpResponseRedirect(reverse('barcamp:schedule',
args=[self.barcamp.slug], current_app=APP_NAME))
else:
form = forms.MoveTalkForm(instance=talk,
open_slots=open_slots)
self.data.update({
'form': form,
'talk': talk,
})
return self.render(self.template_name)
class BarcampEventView(BarcampBaseView):
"""Show the details of a single event."""
template_name = 'barcamp/barcamp-event.html'
template_name_iphone = 'barcamp/iphone/barcamp-event.html'
def view(self, *args, **kwargs):
event = get_object_or_404(models.Event.objects.select_related(),
pk=kwargs['event_pk'], barcamp=self.barcamp)
self.data['event'] = event
return self.render()
class BarcampCreateSideEventView(BarcampBaseView):
"""Create a side event."""
template_name = 'barcamp/create-sideevent.html'
def view(self, *args, **kwargs):
form = forms.CreateSideEventForm(barcamp=self.barcamp)
if self.request.method == 'POST':
form = forms.CreateSideEventForm(self.request.POST, barcamp=self.barcamp)
if form.is_valid():
event = form.save(commit=False)
event.barcamp = self.barcamp
event.save()
return self.redirect_to_schedule()
self.data['form'] = form
return self.render()
class BarcampEditSideEventView(BarcampBaseView):
"""Edit the details of a side event."""
template_name = 'barcamp/edit-sideevent.html'
def view(self, *args, **kwargs):
event = get_object_or_404(models.SideEvent.objects.filter(
barcamp=self.barcamp, pk=kwargs['event_pk']))
form = forms.CreateSideEventForm(instance=event)
if self.request.method == 'POST':
form = forms.CreateSideEventForm(self.request.POST, instance=event)
form.barcamp = self.barcamp
if form.is_valid():
event = form.save(commit=False)
event.barcamp = self.barcamp
event.save()
return self.redirect_to_schedule()
self.data['form'] = form
return self.render()
class BarcampDeleteSideEventView(BarcampDeleteTalkView):
"""Delete a side event."""
def view(self, *args, **kwargs):
event = get_object_or_404(models.SideEvent.objects.filter(
barcamp=self.barcamp, pk=kwargs['event_pk']))
if self.request.method == 'POST':
event.delete()
return self.redirect_to_schedule()
self.data['event'] = event
return self.render()
|
|
import imp
import re
from functools import partial
from inspect import getargspec
from django.conf import settings
from django.template.context import Context, RequestContext, ContextPopException
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.utils.functional import Promise
from django.utils.text import smart_split, unescape_string_literal, get_text_list
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.translation import ugettext_lazy
from django.utils.safestring import SafeData, EscapeData, mark_safe, mark_for_escaping
from django.utils.formats import localize
from django.utils.html import escape
from django.utils.module_loading import module_has_submodule
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end delimiters
tag_re = re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END)))
# global dictionary of libraries that have been loaded using get_library
libraries = {}
# global list of libraries to load by default for a new parser
builtins = []
# True if TEMPLATE_STRING_IF_INVALID contains a format string (%s). None means
# uninitialised.
invalid_var_format_string = None
class TemplateSyntaxError(Exception):
pass
class TemplateDoesNotExist(Exception):
pass
class TemplateEncodingError(Exception):
pass
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.msg % tuple([force_unicode(p, errors='replace') for p in self.params])
class InvalidTemplateLibrary(Exception):
pass
class Origin(object):
def __init__(self, name):
self.name = name
def reload(self):
raise NotImplementedError
def __str__(self):
return self.name
class StringOrigin(Origin):
def __init__(self, source):
super(StringOrigin, self).__init__(UNKNOWN_SOURCE)
self.source = source
def reload(self):
return self.source
class Template(object):
def __init__(self, template_string, origin=None, name='<Unknown Template>'):
try:
template_string = smart_unicode(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed from unicode or UTF-8 strings.")
if settings.TEMPLATE_DEBUG and origin is None:
origin = StringOrigin(template_string)
self.nodelist = compile_string(template_string, origin)
self.name = name
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
return self._render(context)
finally:
context.render_context.pop()
def compile_string(template_string, origin):
"Compiles template_string into NodeList ready for rendering"
if settings.TEMPLATE_DEBUG:
from debug import DebugLexer, DebugParser
lexer_class, parser_class = DebugLexer, DebugParser
else:
lexer_class, parser_class = Lexer, Parser
lexer = lexer_class(template_string, origin)
parser = parser_class(lexer.tokenize())
return parser.parse()
class Token(object):
def __init__(self, token_type, contents):
# token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or TOKEN_COMMENT.
self.token_type, self.contents = token_type, contents
self.lineno = None
def __str__(self):
return '<%s token: "%s...">' % \
({TOKEN_TEXT: 'Text', TOKEN_VAR: 'Var', TOKEN_BLOCK: 'Block', TOKEN_COMMENT: 'Comment'}[self.token_type],
self.contents[:20].replace('\n', ''))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith('_("') or bit.startswith("_('"):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = bits.next()
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string, origin):
self.template_string = template_string
self.origin = origin
self.lineno = 1
def tokenize(self):
"Return a list of tokens from a given template_string."
in_tag = False
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, in_tag))
in_tag = not in_tag
return result
def create_token(self, token_string, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[len(VARIABLE_TAG_START):-len(VARIABLE_TAG_END)].strip())
elif token_string.startswith(BLOCK_TAG_START):
token = Token(TOKEN_BLOCK, token_string[len(BLOCK_TAG_START):-len(BLOCK_TAG_END)].strip())
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[len(COMMENT_TAG_START):-len(COMMENT_TAG_END)].strip()
token = Token(TOKEN_COMMENT, content)
else:
token = Token(TOKEN_TEXT, token_string)
token.lineno = self.lineno
self.lineno += token_string.count('\n')
return token
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.tags = {}
self.filters = {}
for lib in builtins:
self.add_library(lib)
def parse(self, parse_until=None):
if parse_until is None: parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_TEXT:
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == TOKEN_VAR:
if not token.contents:
self.empty_variable(token)
filter_expression = self.compile_filter(token.contents)
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node,token)
elif token.token_type == TOKEN_BLOCK:
if token.contents in parse_until:
# put token back on token list so calling code knows why it terminated
self.prepend_token(token)
return nodelist
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
# execute callback function for this tag and append resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError, e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def create_variable_node(self, filter_expression):
return VariableNode(filter_expression)
def create_nodelist(self):
return NodeList()
def extend_nodelist(self, nodelist, node, token):
if node.must_be_first and nodelist:
try:
if nodelist.contains_nontext:
raise AttributeError
except AttributeError:
raise TemplateSyntaxError("%r must be the first tag in the template." % node)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
nodelist.append(node)
def enter_command(self, command, token):
pass
def exit_command(self):
pass
def error(self, token, msg):
return TemplateSyntaxError(msg)
def empty_variable(self, token):
raise self.error(token, "Empty variable tag")
def empty_block_tag(self, token):
raise self.error(token, "Empty block tag")
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" % (command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until))
def compile_function_error(self, token, e):
pass
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"Convenient wrapper for FilterExpression"
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
class TokenParser(object):
"""
Subclass this and implement the top() method to parse a template line. When
instantiating the parser, pass in the line from the Django template parser.
The parser's "tagname" instance-variable stores the name of the tag that
the filter was called with.
"""
def __init__(self, subject):
self.subject = subject
self.pointer = 0
self.backout = []
self.tagname = self.tag()
def top(self):
"Overload this method to do the actual parsing and return the result."
raise NotImplementedError()
def more(self):
"Returns True if there is more stuff in the tag."
return self.pointer < len(self.subject)
def back(self):
"Undoes the last microparser. Use this for lookahead and backtracking."
if not len(self.backout):
raise TemplateSyntaxError("back called without some previous parsing")
self.pointer = self.backout.pop()
def tag(self):
"A microparser that just returns the next tag from the line."
subject = self.subject
i = self.pointer
if i >= len(subject):
raise TemplateSyntaxError("expected another tag, found end of string: %s" % subject)
p = i
while i < len(subject) and subject[i] not in (' ', '\t'):
i += 1
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
def value(self):
"A microparser that parses for a value: some string constant or variable name."
subject = self.subject
i = self.pointer
def next_space_index(subject, i):
"Increment pointer until a real space (i.e. a space not within quotes) is encountered"
while i < len(subject) and subject[i] not in (' ', '\t'):
if subject[i] in ('"', "'"):
c = subject[i]
i += 1
while i < len(subject) and subject[i] != c:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected end of string in column %d: %s" % (i, subject))
i += 1
return i
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Expected another value but found end of string: %s" % subject)
if subject[i] in ('"', "'"):
p = i
i += 1
while i < len(subject) and subject[i] != subject[p]:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected end of string in column %d: %s" % (i, subject))
i += 1
# Continue parsing until next "real" space, so that filters are also included
i = next_space_index(subject, i)
res = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return res
else:
p = i
i = next_space_index(subject, i)
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open' : re.escape("_("),
'i18n_close' : re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:%(filter_sep)s
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\." ,
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE|re.VERBOSE)
class FilterExpression(object):
r"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample:
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
This class should never be instantiated outside of the
get_filters_from_token helper function.
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: %s|%s|%s" % \
(token[:upto], token[upto:start], token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
if settings.TEMPLATE_STRING_IF_INVALID:
global invalid_var_format_string
if invalid_var_format_string is None:
invalid_var_format_string = '%s' in settings.TEMPLATE_STRING_IF_INVALID
if invalid_var_format_string:
return settings.TEMPLATE_STRING_IF_INVALID % self.var
return settings.TEMPLATE_STRING_IF_INVALID
else:
obj = settings.TEMPLATE_STRING_IF_INVALID
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
plen = len(provided)
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
# First argument is filter input.
args.pop(0)
if defaults:
nondefs = args[:-len(defaults)]
else:
nondefs = args
# Args without defaults must be provided.
try:
for arg in nondefs:
provided.pop(0)
except IndexError:
# Not enough
raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, len(nondefs), plen))
# Defaults can be overridden.
defaults = defaults and list(defaults) or []
try:
for parg in provided:
defaults.pop(0)
except IndexError:
# Too many.
raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, len(nondefs), plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
return Variable(path).resolve(context)
class Variable(object):
r"""
A template variable, resolvable against a given context. The variable may be
a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may not begin with underscores: '%s'" % var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
except (TypeError, AttributeError, KeyError):
try: # attribute lookup
current = getattr(current, bit)
except (TypeError, AttributeError):
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError, # unsubscriptable object
):
raise VariableDoesNotExist("Failed lookup for key [%s] in %r", (bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError: # arguments *were* required
# GOTCHA: This will also catch any TypeError
# raised in the function itself.
current = settings.TEMPLATE_STRING_IF_INVALID # invalid method call
except Exception, e:
if getattr(e, 'silent_variable_failure', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
def render(self, context):
"Return the node rendered as a string"
pass
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes (within this node and its nodelist) of the given type"
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bits.append(self.render_node(node, context))
else:
bits.append(node)
return mark_safe(''.join([force_unicode(b) for b in bits]))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
def render_node(self, node, context):
return node.render(context)
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<Text Node: '%s'>" % smart_str(self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def _render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = localize(value, use_l10n=context.use_l10n)
value = force_unicode(value)
if (context.autoescape and not isinstance(value, SafeData)) or isinstance(value, EscapeData):
return escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail quietly.
return ''
return _render_value_in_context(output, context)
def generic_tag_compiler(params, defaults, name, node_class, parser, token):
"Returns a template.Node subclass."
bits = token.split_contents()[1:]
bmax = len(params)
def_len = defaults and len(defaults) or 0
bmin = bmax - def_len
if(len(bits) < bmin or len(bits) > bmax):
if bmin == bmax:
message = "%s takes %s arguments" % (name, bmin)
else:
message = "%s takes between %s and %s arguments" % (name, bmin, bmax)
raise TemplateSyntaxError(message)
return node_class(bits)
class Library(object):
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name == None and compile_function == None:
# @register.tag()
return self.tag_function
elif name != None and compile_function == None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name != None and compile_function != None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise InvalidTemplateLibrary("Unsupported arguments to Library.tag: (%r, %r)", (name, compile_function))
def tag_function(self,func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None):
if name == None and filter_func == None:
# @register.filter()
return self.filter_function
elif filter_func == None:
if callable(name):
# @register.filter
return self.filter_function(name)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func)
return dec
elif name != None and filter_func != None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to Library.filter: (%r, %r)", (name, filter_func))
def filter_function(self, func):
self.filters[getattr(func, "_decorated_function", func).__name__] = func
return func
def simple_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, xx, xxx, defaults = getargspec(func)
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError("Any tag function decorated with takes_context=True must have a first argument of 'context'")
class SimpleNode(Node):
def __init__(self, vars_to_resolve):
self.vars_to_resolve = map(Variable, vars_to_resolve)
def render(self, context):
resolved_vars = [var.resolve(context) for var in self.vars_to_resolve]
if takes_context:
func_args = [context] + resolved_vars
else:
func_args = resolved_vars
return func(*func_args)
function_name = name or getattr(func, '_decorated_function', func).__name__
compile_func = partial(generic_tag_compiler, params, defaults, function_name, SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, xx, xxx, defaults = getargspec(func)
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError("Any tag function decorated with takes_context=True must have a first argument of 'context'")
class AssignmentNode(Node):
def __init__(self, params_vars, target_var):
self.params_vars = map(Variable, params_vars)
self.target_var = target_var
def render(self, context):
resolved_vars = [var.resolve(context) for var in self.params_vars]
if takes_context:
func_args = [context] + resolved_vars
else:
func_args = resolved_vars
context[self.target_var] = func(*func_args)
return ''
def compile_func(parser, token):
bits = token.split_contents()
tag_name = bits[0]
bits = bits[1:]
params_max = len(params)
defaults_length = defaults and len(defaults) or 0
params_min = params_max - defaults_length
if (len(bits) < 2 or bits[-2] != 'as'):
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % tag_name)
params_vars = bits[:-2]
target_var = bits[-1]
if (len(params_vars) < params_min or
len(params_vars) > params_max):
if params_min == params_max:
raise TemplateSyntaxError(
"%s takes %s arguments" % (tag_name, params_min))
else:
raise TemplateSyntaxError(
"%s takes between %s and %s arguments"
% (tag_name, params_min, params_max))
return AssignmentNode(params_vars, target_var)
function_name = name or getattr(func, '_decorated_function', func).__name__
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
def inclusion_tag(self, file_name, context_class=Context, takes_context=False, name=None):
def dec(func):
params, xx, xxx, defaults = getargspec(func)
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError("Any tag function decorated with takes_context=True must have a first argument of 'context'")
class InclusionNode(Node):
def __init__(self, vars_to_resolve):
self.vars_to_resolve = map(Variable, vars_to_resolve)
def render(self, context):
resolved_vars = [var.resolve(context) for var in self.vars_to_resolve]
if takes_context:
args = [context] + resolved_vars
else:
args = resolved_vars
dict = func(*args)
if not getattr(self, 'nodelist', False):
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
self.nodelist = t.nodelist
new_context = context_class(dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
})
# Copy across the CSRF token, if present, because inclusion
# tags are often used for forms, and we need instructions
# for using CSRF protection to be as simple as possible.
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return self.nodelist.render(new_context)
function_name = name or getattr(func, '_decorated_function', func).__name__
compile_func = partial(generic_tag_compiler, params, defaults, function_name, InclusionNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
return dec
def import_library(taglib_module):
"""Load a template tag library module.
Verifies that the library contains a 'register' attribute, and
returns that attribute as the representation of the library
"""
app_path, taglib = taglib_module.rsplit('.',1)
app_module = import_module(app_path)
try:
mod = import_module(taglib_module)
except ImportError, e:
# If the ImportError is because the taglib submodule does not exist, that's not
# an error that should be raised. If the submodule exists and raised an ImportError
# on the attempt to load it, that we want to raise.
if not module_has_submodule(app_module, taglib):
return None
else:
raise InvalidTemplateLibrary("ImportError raised loading %s: %s" % (taglib_module, e))
try:
return mod.register
except AttributeError:
raise InvalidTemplateLibrary("Template library %s does not have a variable named 'register'" % taglib_module)
templatetags_modules = []
def get_templatetags_modules():
"""Return the list of all available template tag modules.
Caches the result for faster access.
"""
global templatetags_modules
if not templatetags_modules:
_templatetags_modules = []
# Populate list once per thread.
for app_module in ['django'] + list(settings.INSTALLED_APPS):
try:
templatetag_module = '%s.templatetags' % app_module
import_module(templatetag_module)
_templatetags_modules.append(templatetag_module)
except ImportError:
continue
templatetags_modules = _templatetags_modules
return templatetags_modules
def get_library(library_name):
"""
Load the template library module with the given name.
If library is not already loaded loop over all templatetags modules to locate it.
{% load somelib %} and {% load someotherlib %} loops twice.
Subsequent loads eg. {% load somelib %} in the same process will grab the cached
module from libraries.
"""
lib = libraries.get(library_name, None)
if not lib:
templatetags_modules = get_templatetags_modules()
tried_modules = []
for module in templatetags_modules:
taglib_module = '%s.%s' % (module, library_name)
tried_modules.append(taglib_module)
lib = import_library(taglib_module)
if lib:
libraries[library_name] = lib
break
if not lib:
raise InvalidTemplateLibrary("Template library %s not found, tried %s" % (library_name, ','.join(tried_modules)))
return lib
def add_to_builtins(module):
builtins.append(import_library(module))
add_to_builtins('django.template.defaulttags')
add_to_builtins('django.template.defaultfilters')
|
|
#!/usr/bin/env python3
# -*-coding:utf-8-*-
"""
This module is used to extract features from the data
"""
import numpy as np
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
import python_speech_features
eps = 0.00000001
def file_length(soundParams):
"""Returns the file length, in seconds"""
return soundParams[3] / soundParams[2]
def zcr(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return countZ / (count - 1)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / len(frame)
def energy_entropy(frame, numOfShortBlocks=10):
"""Computes entropy of energy"""
tfe = np.sum(frame ** 2) # total frame energy
L = len(frame)
subWinLength = int(np.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
frame = frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(subWindows ** 2, axis=0) / (tfe + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -1 * np.sum(s * np.log2(s + eps))
return entropy
def spectral_centroid_and_spread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
C = (NUM / DEN) # Centroid
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN) # Spread
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def avg_mfcc(sound_obj, avg=True):
"""Extract the MFCC from the sound object"""
soundD = sound_obj["sound"] # raw data
sr = sound_obj["params"][2] # samplerate
# nf = sound_obj["params"][3] # nframes
all_mfcc = python_speech_features.mfcc(soundD, samplerate=sr, winlen=0.025, winstep=1)
if avg:
return np.mean(all_mfcc, axis=0)
return all_mfcc
def mfcc_init_filter_banks(fs, nfft):
"""Computes the triangular filterbank for MFCC computation"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = np.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nFiltTotal, nfft))
nfreqs = np.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def mfcc(X, fbank, nceps=13):
"""Computes the MFCCs of a frame, given the fft mag"""
mspec = np.log10(np.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def extract_all_features0(sound_obj):
"""Extract the features from the sound object"""
# fl = file_length(sound_obj["params"])
test_mfcc_avg = avg_mfcc(sound_obj)
# return np.concatenate(([fl], test_mfcc_avg))
return test_mfcc_avg
def features_labels0():
"""Give a name to each feature"""
return ["mfcc{}".format(i) for i in range(13)]
def extract_all_features(sound_obj, wins=None, steps=None):
"""Extract the features from the sound object"""
sr = sound_obj["params"][2] # samplerate
nbs = sound_obj["params"][3] # number of samples
if wins is None:
wins = int(0.050 * sr)
if steps is None:
steps = int(nbs/15 - wins)
# Signal normalization
signal = sound_obj["sound"]
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (np.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
curPos = steps // 2 # skip the very beginning
nFFT = wins // 2
# compute the triangular filter banks used in the mfcc calculation
#[fbank, _] = mfcc_init_filter_banks(sr, nFFT)
totalNumOfFeatures = 5 + 13
stFeatures = []
while curPos + wins - 1 < N: # for each short-term window until the end of signal
x = signal[curPos:curPos+wins] # get current window
curPos = curPos + steps # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
curFV = np.zeros(totalNumOfFeatures)
curFV[0] = zcr(x) # zero crossing rate
curFV[1] = energy(x) # short-term energy
curFV[2] = energy_entropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = spectral_centroid_and_spread(X, sr) # spectral centroid and spread
# curFV[5] = stSpectralEntropy(X) # spectral entropy
# curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
# curFV[7] = stSpectralRollOff(X, 0.90, sr) # spectral rolloff
# curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
#
# chromaNames, chromaF = stChromaFeatures(X, sr, nChroma, nFreqsPerChroma)
# curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
# curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
#curFV[5:18] = mfcc(X, fbank, 13)
#curFV[0:13] = mfcc(X, fbank, 13)
curFV[5:18] = python_speech_features.mfcc(x, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# TEMP
#curFV = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins, winstep=steps).T
stFeatures.append(curFV)
# stFeatures = np.array(stFeatures)
stFeatures = np.concatenate(stFeatures, 0).flatten()
#stFeatures = np.mean(stFeatures, axis=0)
# stFeatures = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# stFeatures = np.mean(stFeatures, axis=0)
return stFeatures
# sound_obj2 = sound_obj.copy()
# sound_obj2["sound"] = signal
#
# # fl = file_length(sound_obj["params"])
# test_mfcc_avg = avg_mfcc(sound_obj2)
# # return np.concatenate(([fl], test_mfcc_avg))
# return test_mfcc_avg
def features_labels():
"""Give a name to each feature"""
return ["zrc", "energy", "en_ent", "centr", "spread"] + ["mfcc{}".format(i) for i in range(13)]
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import collections
import inspect
import importlib.util
import sys
import traceback
import types
import discord
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .cog import Cog
__all__ = (
'when_mentioned',
'when_mentioned_or',
'Bot',
'AutoShardedBot',
)
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> ']
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return '<default-help-command>'
_default = _DefaultRepr()
class BotBase(GroupMixin):
def __init__(self, command_prefix, help_command=_default, description=None, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = options.get('owner_id')
self.owner_ids = options.get('owner_ids', set())
self.strip_after_prefix = options.get('strip_after_prefix', False)
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):
raise TypeError(f'owner_ids must be a collection not {self.owner_ids.__class__!r}')
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
for event in self.extra_events.get(ev, []):
self._schedule_event(event, ev, *args, **kwargs)
async def close(self):
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await super().close()
async def on_command_error(self, context, exception):
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f'Ignoring exception in command {context.command}:', file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# global check registration
def check(self, func):
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
self.add_check(func)
return func
def add_check(self, func, *, call_once=False):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func, *, call_once=False):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check_once(self, func):
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx, *, call_once=False):
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
return await discord.utils.async_all(f(ctx) for f in data)
async def is_owner(self, user):
"""|coro|
Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
.. versionchanged:: 1.3
The function also checks if the application is team-owned if
:attr:`owner_ids` is not set.
Parameters
-----------
user: :class:`.abc.User`
The user to check for.
Returns
--------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user.id == self.owner_id
elif self.owner_ids:
return user.id in self.owner_ids
else:
app = await self.application_info()
if app.team:
self.owner_ids = ids = {m.id for m in app.team.members}
return user.id in ids
else:
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`.listen`.
Parameters
-----------
func: :ref:`coroutine <coroutine>`
The function to call.
name: Optional[:class:`str`]
The name of the event to listen for. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python3
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise TypeError('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name: :class:`str`
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`.on_ready`
The functions being listened to must be a :ref:`coroutine <coroutine>`.
Example
--------
.. code-block:: python3
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
TypeError
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog: Cog, *, override: bool = False) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
.. versionchanged:: 2.0
:exc:`.ClientException` is raised when a cog with the same name
is already loaded.
Parameters
-----------
cog: :class:`.Cog`
The cog to register to the bot.
override: :class:`bool`
If a previously loaded cog with the same name should be ejected
instead of raising an error.
.. versionadded:: 2.0
Raises
-------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
.ClientException
A cog with the same name is already loaded.
"""
if not isinstance(cog, Cog):
raise TypeError('cogs must derive from Cog')
cog_name = cog.__cog_name__
existing = self.__cogs.get(cog_name)
if existing is not None:
if not override:
raise discord.ClientException(f'Cog named {cog_name!r} already loaded')
self.remove_cog(cog_name)
cog = cog._inject(self)
self.__cogs[cog_name] = cog
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
Returns
--------
Optional[:class:`Cog`]
The cog that was requested. If not found, returns ``None``.
"""
return self.__cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
-----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self._help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
@property
def cogs(self):
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog name to cog."""
return types.MappingProxyType(self.__cogs)
# extensions
def _remove_module_references(self, name):
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if event.__module__ is not None and _is_submodule(name, event.__module__):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib, key):
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec, key):
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise errors.ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'setup')
except AttributeError:
del sys.modules[key]
raise errors.NoEntryPointError(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise errors.ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def _resolve_name(self, name, package):
try:
return importlib.util.resolve_name(name, package)
except ImportError:
raise errors.ExtensionNotFound(name)
def load_extension(self, name, *, package=None):
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``setup`` defined as
the entry point on what to do when the extension is loaded. This entry
point must have a single argument, the ``bot``.
Parameters
------------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when loading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
--------
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionAlreadyLoaded
The extension is already loaded.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
name = self._resolve_name(name, package)
if name in self.__extensions:
raise errors.ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise errors.ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name, *, package=None):
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function, ``teardown``,
to do miscellaneous clean-up if necessary. This function takes a single
parameter, the ``bot``, similar to ``setup`` from
:meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when unloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotFound
The name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionNotLoaded
The extension was not loaded.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name, *, package=None):
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when reloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self):
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only mapping of extension name to extension."""
return types.MappingProxyType(self.__extensions)
# help command stuff
@property
def help_command(self):
return self._help_command
@help_command.setter
def help_command(self, value):
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}")
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message, *, cls=Context):
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`discord.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id:
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = discord.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError("get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}")
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError("Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}")
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx):
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure('The global check once functions failed.')
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch('command_completion', ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch('command_error', ctx, exc)
async def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`discord.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
class Bot(BotBase, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation. While this may be useful in DMs it should be avoided
in servers, as it's likely to cause performance issues and unintended
command invocations.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``. This
attribute does not carry over to groups. You must set it to every group if
you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see :ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. If this is not set and is then queried via
:meth:`.is_owner` then it is fetched automatically using
:meth:`~.Bot.application_info`.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to :attr:`owner_id`.
If this is not set and the application is team based, then it is
fetched automatically using :meth:`~.Bot.application_info`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both ``owner_id`` and ``owner_ids``.
.. versionadded:: 1.3
strip_after_prefix: :class:`bool`
Whether to strip whitespace characters after encountering the command
prefix. This allows for ``! hello`` and ``!hello`` to both work if
the ``command_prefix`` is set to ``!``. Defaults to ``False``.
.. versionadded:: 1.7
"""
pass
class AutoShardedBot(BotBase, discord.AutoShardedClient):
"""This is similar to :class:`.Bot` except that it is inherited from
:class:`discord.AutoShardedClient` instead.
"""
pass
|
|
# coding: utf-8
from __future__ import unicode_literals
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
import warnings
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
class StandardTransmuter(object):
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def get_transformed_structures(self):
"""
Returns all TransformedStructures.
.. deprecated:: v2.1.0
Use transformed_structures attribute instead. Will be removed in
next version.
"""
warnings.warn("Use transformed_structures attribute instead.",
DeprecationWarning)
return self.transformed_structures
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
#need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to create
vasp input files from structures
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Callable to create subdirectory name from
transformed_structure. e.g.,
lambda x: x.other_parameters["tags"][0] to use the first tag.
include_cif (bool): Whether to output a CIF as well. CIF files
are generally better supported in visualization programs.
"""
batch_write_vasp_input(self.transformed_structures, vasp_input_set,
output_dir, create_directory, subfolder,
include_cif)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match("^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super(CifTransmuter, self).__init__(transformed_structures,
transformations, extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super(PoscarTransmuter, self).__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set, output_dir,
create_directory=True, subfolder=None,
include_cif=False):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub("\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
|
|
#Graham Rockwell
#7.6.2009
from core.model.LinearModel import LinearModel
from core.model.ModelFactory import CreateLinearModel
from core.reader.FluxModelParser import FluxModelFlatFileParser
from util.Report import Report
import re, os, pickle
class MatrixTools:
def __init__(self):
pass
def vectorMultiply(self,v1,v2,verbose = False):
result = 0;
s = ''
for key in v1.keys():
if key in v2.keys():
value1 = v1[key]
value2 = v2[key]
result += value1*value2
s += " + %s[%s]" % (key,value1*value2)
#print "%s = %s * %s" % (result,value1,value2)
if verbose:
print "%s = %s" % (result,s[3:])
return result
def vectorMScalar(self,v1,s):
result = {}
for key in v1.keys():
value = v1[key]
result[key] = value * s
return result
def matrixVectorM(self,m,v):
result = {}
delta = 1e-3
for rName in m.getRowNames():
v1 = m.getRow(rName)
if v1 == None:
pass
if v1 != None:
iv = vectorMultiply(v,v1)
result[rName] = iv
return result
class ProcessGeneticObjective:
def init(self):
pass
def translateGeneNumber(self,term,annotation):
group = term.replace("_"," ")
geneGroup = term.replace("_","").replace("and","_").replace("or","_")
geneGroup = geneGroup.replace("(","").replace(")","").split("_")
for geneNumber in geneGroup:
if geneNumber in annotation.keys():
iName = annotation[geneNumber]["gene"]
group = group.replace(geneNumber,iName)
return group
def processObjective(self,geneticObjective,annotation,nb,iter,report=None,clusterData=None):
verbose = True
if report == None:
report = Report()
names = {}
function = {}
adjustment = {}
tag = {}
clusterTags = {}
for key in geneticObjective.keys():
value = geneticObjective[key]
checkName = ''
if key == checkName:
pass
group = key.replace("_"," ")
geneGroup = key.replace("_","").replace("and","_").replace("or","_")
geneGroup = geneGroup.replace("(","").replace(")","").split("_")
for geneNumber in geneGroup:
adjustment[geneNumber] =value
if geneNumber in annotation.keys():
iName = annotation[geneNumber]["gene"]
names[geneNumber] = iName
function[geneNumber] = annotation[geneNumber]["function"]
group = group.replace(geneNumber,iName)
for geneNumber in geneGroup:
tag[geneNumber] = group
if clusterData != None:
if key not in clusterData.keys():
continue
cluster = clusterData[key]
clusterTag = str(cluster).replace("set([","").replace("])","").replace(", ",",").replace(" ","").replace("'","")
clusterTag = clusterTag.replace("_"," ")
clusterGroup = clusterTag.replace("_","").replace("and","_").replace("or","_")
clusterGroup = clusterGroup.replace("(","").replace(")","").replace(" ","").replace(",","_")
clusterGroup = clusterGroup.split("_")
for geneNumber in clusterGroup:
if geneNumber in annotation.keys():
iName = annotation[geneNumber]["gene"]
clusterTag = clusterTag.replace(geneNumber,iName)
verbose = False
for geneNumber in geneGroup:
clusterTags[geneNumber] = clusterTag
if verbose:
print "--- cluster---"
print geneNumber
print names[geneNumber]
print tag[geneNumber]
print str(cluster)
print clusterGroup
print clusterTag
report["gene name"] = names
report["gene ID"] = tag
if clusterData != None:
report["gene cluster"] = clusterTags
report["function"] = function
adjName = "%s_%s" % (nb,iter)
report[adjName] = adjustment
return report
def stringToMap(self,value):
result = eval(value)
return result
def processAnalysis(self,analysisData,annotationData,nbKey,iterKey,objKey,clusterData=None):
if analysisData == None:
return None
report = Report()
targets = {}
maxIter = 0
maxNb = 0
for key in analysisData.keys():
data = analysisData[key]
nb = int(data[nbKey])
iter = int(data[iterKey])
targetString = data[objKey].rstrip()
#print targetString
target = eval(targetString)
targets[(nb,iter)] = target
if clusterData != None:
clusterTargets = {}
for key in targets.keys():
target = targets[key]
newTarget = {}
newTarget.update(target)
for tk in target.keys():
if tk in clusterData.keys():
for clusterTarget in clusterData[tk]:
iValue = target[tk]
newTarget[clusterTarget] = iValue
clusterTargets[key] = newTarget
targets =clusterTargets
if annotationData != None:
k = targets.keys()
k.sort()
for (nb,iter) in k:
target = targets[(nb,iter)]
report = self.processObjective(target,annotationData,nb,iter,report,clusterData)
return (targets,report)
class ProcessModel:
def __init__(self):
self.configFileName =''
self.verbose = False
self.preLoad = False
self.useGeneMap = False
self.useReduced = False
self.model = None
self.geneFill = False
def load(self,fileName):
data = None
if os.path.exists(fileName):
if self.verbose: "===loading pre-existing model data=="
fileHandel = open(fileName,'r')
data = pickle.load(fileHandel)
fileHandel.close()
return data
def getFluxModel(self,configFileName,modelNames):
gconfigParser = GeneralMultiConfigParser()
gconfigs = gconfigParser.parse(configFileName)
modelParser = FluxModelFlatFileParser()
modelParser.setWrapper("\'")
fluxModel = None
pName = ''
iFluxModel = LinearModel()
for mName in modelNames:
if mName != '':
gconfig = gconfigs[mName]
iFluxModel = modelParser.parseGenConfig(gconfig, mName)
if fluxModel == None:
fluxModel = iFluxModel
pName = fluxModel.objectives.keys()[0]
else:
fluxModel.extend(pName,mName,iFluxModel)
#---------------------------
# Parse Linear Model
#---------------------------
modelName = modelNames[0]
modelMaker = CreateLinearModel()
modelMaker.setScale(1)
modelMatrix = modelMaker.fromFluxModel(fluxModel, modelName)
#-------------------------
#Set Filter Target List
#------------------------
nonTargets = []
targetList = fluxModel.getMetabolicNetwork().getAnnotation("ValidTarget")
for name in targetList.keys():
value = targetList[name]
if value =="FALSE":
nonTargets.append(name)
return (fluxModel,modelMatrix,nonTargets)
def refactorStringMap(self,data):
result = {}
for (key,value) in data.items():
ikey = key.replace(" ","_")
ivalue = value.replace(" ","_")
result[ikey] = ivalue
return result
def parseGeneMap(self,fluxModel,nonTargets):
'''
gene map is rxn key to gene name value
currently only produces gene group -< reaction list
#! update to remove insure removal of blank gene names
'''
reactionGeneMap = fluxModel.getMetabolicNetwork().getAnnotation("GeneAssociation")
geneMap = {}
for (key,value) in reactionGeneMap.items():
#if key not in nonTargets and value != '':
if key not in nonTargets:
geneMap[key] = str(value)
elif key not in nonTargets:
if self.geneFill:
geneMap[key] = str("gene_"+key)
if self.verbose: print "filling in gene for reaction %s" % (key)
else:
if self.verbose: print "removing reaction gene pair [%s:%s]" % (key,value)
geneMap = self.refactorStringMap(geneMap)
rGeneMap = {}
for (key,value) in geneMap.items():
#vs = set([value])
rGeneMap[key] = set([value])
return (geneMap,rGeneMap)
def parseGenePairs(self,reducer,geneMap,rGeneMap):
#-----------------------------
# Adjust for model reduction
# -gene pairs lists relation between reactions and genes many genes to many reduced reactions
# -point gene conglomerate to geneNames of memebers
#-----------------------------
genePairs = []
geneCluster = {}
if self.useReduced:
(genePairs,rGeneMap,geneCluster) = reducer.reduceGeneMap(geneMap,rGeneMap)
#-------------------------------------
# Transfer remainder of gene mapping
# to gene pair list
#-------------------------------------
for (rxnName,geneNames) in rGeneMap.items():
for geneName in geneNames:
genePairs.append((rxnName,geneName))
if geneName not in geneCluster.keys():
geneCluster[geneName] = set([geneName])
geneCluster[geneName].add(geneName)
return (genePairs,geneCluster)
def reduce(self,modelMatrix,geneMap,rGeneMap,nonTargets,objective,syntheticObjectiveName,loadFileName):
if self.verbose: print "==Preforming Reduction=="
reducer = ReduceLp()
reducer.preferedTargets = nonTargets
reducer.geneMap = geneMap
reducer.rGeneMap = rGeneMap
rTargets = modelMatrix.getColumnNames()
rTargets.remove(objective)
rTargets.remove(syntheticObjectiveName)
modelMatrix = reducer.reduceColumns(modelMatrix)
modelMatrix = reducer.reduce(modelMatrix,rTargets)
modelMatrix = reducer.reduceRows(modelMatrix)
loadFile = open(loadFileName,'w')
pickle.dump((reducer,modelMatrix),loadFile)
loadFile.close()
return (reducer,modelMatrix)
def loadModel(self,modelNames,objective,syntheticObjectiveName):
#------------------
# Parse Flux Model
# Set model matrix
#------------------
(fluxModel,modelMatrix,nonTargets) = self.getFluxModel(self.configFileName,modelNames)
#update to allow for more complex objective
modelName = str(modelNames)
modelMatrix.setObjective({objective:-1})
nonTargets.append(objective)
nonTargets.append(syntheticObjectiveName)
if self.verbose: print "Non Targets %s" % (len(nonTargets))
#--------------------
#Get Gene Mappings
#--------------------
geneMap = None
rGeneMap = None
if self.useGeneMap:
(geneMap,rGeneMap) = self.parseGeneMap(fluxModel,nonTargets)
#----------------------------
# Reduce Model
#----------------------------
reducedFileName = "reducedFile_" + modelName + "_" + objective + "_" + syntheticObjectiveName
reducer = None
if self.preLoad and os.path.exists(reducedFileName):
(reducer,modelMatrix) = self.load(reducedFileName)
elif self.useReduced:
(reducer,modelMatrix) = self.reduce(modelMatrix,geneMap,rGeneMap,nonTargets,objective,syntheticObjectiveName,reducedFileName)
#---------------------------------
# Populate gene reation pair list
#---------------------------------
genePairs = None
geneCluster = None
if geneMap != None:
(genePairs,geneCluster) = self.parseGenePairs(reducer,geneMap,rGeneMap)
return (fluxModel,modelMatrix,reducer,nonTargets,genePairs,geneMap,rGeneMap,geneCluster)
def getReactionGeneMap(self,data):
result = {}
for key in data.keys():
value = data[key]
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
value = value.replace('and',' ')
value = value.replace('or',' ')
terms = value.split(' ')
result[key] = terms
return result
def mapMultipleAnnotations(self,data,catigory=None,sep=' '):
result = {}
for key in data.keys():
value = data[key]
if catigory != None:
value = value[catigory]
terms = re.split(sep,value)
result[key] = terms
return result
def reverseAnnotation(self,data, list=False):
result = {}
for (key,value) in data.items():
if list:
for v in value:
if v not in result.keys():
result[v] = set()
result[v].add(key)
else:
result[value]=key
return result
def mergeMAnnotations(self,a1,a2):
result = {}
for k1 in a1.keys():
v1 = a1[k1]
for vi in v1:
if vi in a2.keys():
v2 = a2[vi]
if k1 not in result.keys():
result[k1] = set()
result[k1] = result[k1].union(v2)
return result
class OptimizationPriceAnalysis:
def init(self):
pass
def findAllPrices(self,model,targets,dim=2):
result = {}
lo = LinearOptimization()
lo.setModel(model)
lo.runSimplex()
for i in targets:
prices = lo.getPointReducedCosts({i: -1.0},dim)
for j in prices.keys():
term = (i,j)
result[term] = prices[j]
lo.clear()
return result
def findTargetPrices(self,model,objectives,targets,dim=2,coeff = -1.0):
result = Report()
lo = LinearOptimization()
lo.setModel(model)
for obj in objectives:
lo.clearObjective()
lo.setObjectiveMap({obj:-1})
lo.runSimplex()
for i in targets:
name = obj + "_" + i
result[name] = lo.getPointReducedCosts({i: coeff},dim)
lo.clear()
return result
def findReducedPrices(self,model,objective,target,dim=2):
lo = LinearOptimization()
lo.setModel(newModel)
lo.clearObjective()
lo.setObjective(objective)
lo.runSimplex()
prices = lo.getPointReducedCosts(targets,dim)
return prices
class ModelProperties:
def __init__(self):
pass
def findMinMax(self, model, objectiveName, minValue, targets):
positive = {}
negative = {}
imodel = LinearModel()
imodel.extend(model)
if minValue < 0:
imodel.addColumnLimit(objectiveName,(None,minValue))
else:
imodel.addColumnLimit(objectiveName,(minValue,None))
lp = LinearOptimization()
lp.setModel(model)
for t in targets:
lp.clearObjective()
lp.setObjectiveMap({t: -1})
lp.runSimplex()
fluxes = lp.getPredictionMap()
lowValue = fluxes[t]
negative[t] = lowValue
lp.clearObjective()
lp.setObjectiveMap({t: 1})
lp.runSimplex()
fluxes = lp.getPredictionMap()
highValue = fluxes[t]
positive[t] = highValue
return (positive,negative)
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import re
import string
import fnmatch
import IECore
import Gaffer
import GafferUI
import GafferScene
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNodeDescription(
GafferScene.Shader,
"""The base type for all nodes which create shaders. Use the ShaderAssignment node to assign them to objects in the scene.""",
"name",
{
"description" :
"""The name of the shader being represented. This should be considered read-only. Use the Shader.loadShader() method to load a shader.""",
"nodeUI:section" : "header",
},
"parameters",
"""Where the parameters for the shader are represented.""",
)
##########################################################################
# PlugValueWidgets
##########################################################################
class __ShaderNamePlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, row, plug, **kw )
with row :
self.__label = GafferUI.Label( "" )
GafferUI.Spacer( IECore.V2i( 1 ), parenting = { "expand" : True } )
self.__button = GafferUI.Button( "Reload" )
self.__buttonClickedConnection = self.__button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
self._updateFromPlug()
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
with self.getContext() :
shaderName = self.getPlug().getValue()
self.__label.setText( "<h3>Shader : " + shaderName + "</h3>" )
## \todo Disable the type check once we've got all the shader types implementing reloading properly.
nodeType = self.getPlug().node().typeName()
self.__button.setEnabled( bool( shaderName ) and ( "RenderMan" in nodeType or "OSL" in nodeType ) )
def __buttonClicked( self, button ) :
node = self.getPlug().node()
node.shaderLoader().clear()
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
node.loadShader( node["name"].getValue(), keepExistingValues = True )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader.staticTypeId(), "name", __ShaderNamePlugValueWidget )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader.staticTypeId(), "parameters", GafferUI.CompoundPlugValueWidget, collapsed=None )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader.staticTypeId(), "out", None )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader.staticTypeId(), "type", None )
GafferUI.Metadata.registerPlugValue( GafferScene.Shader.staticTypeId(), "enabled", "nodeUI:section", "Node" )
##########################################################################
# NodeGadgets and Nodules
##########################################################################
def __nodeGadgetCreator( node ) :
return GafferUI.StandardNodeGadget( node, GafferUI.LinearContainer.Orientation.Y )
GafferUI.NodeGadget.registerNodeGadget( GafferScene.Shader.staticTypeId(), __nodeGadgetCreator )
def __parametersNoduleCreator( plug ) :
return GafferUI.CompoundNodule( plug, GafferUI.LinearContainer.Orientation.Y, spacing = 0.2 )
GafferUI.Nodule.registerNodule( GafferScene.Shader.staticTypeId(), "parameters", __parametersNoduleCreator )
GafferUI.Nodule.registerNodule( GafferScene.Shader.staticTypeId(), "name", lambda plug : None )
GafferUI.Nodule.registerNodule( GafferScene.Shader.staticTypeId(), "type", lambda plug : None )
GafferUI.Nodule.registerNodule( GafferScene.Shader.staticTypeId(), "enabled", lambda plug : None )
# we leave it to the derived class uis to register creators for the parameters.* plugs, because only the derived classes know whether
# or not networkability makes sense in each case.
##########################################################################
# NodeFinderDialogue mode
##########################################################################
def __shaderNameExtractor( node ) :
if isinstance( node, GafferScene.Shader ) :
return node["name"].getValue()
else :
return ""
GafferUI.NodeFinderDialogue.registerMode( "Shader Names", __shaderNameExtractor )
##########################################################################
# Shader menu
##########################################################################
## Appends menu items for the creation of all shaders found on some searchpaths.
def appendShaders( menuDefinition, prefix, searchPaths, extensions, nodeCreator, matchExpression = "*" ) :
menuDefinition.append( prefix, { "subMenu" : IECore.curry( __shaderSubMenu, searchPaths, extensions, nodeCreator, matchExpression ) } )
def __nodeName( shaderName ) :
nodeName = os.path.split( shaderName )[-1]
nodeName = nodeName.translate( string.maketrans( ".-", "__" ) )
return nodeName
def __loadFromFile( menu, extensions, nodeCreator ) :
path = Gaffer.FileSystemPath( os.getcwd() )
path.setFilter( Gaffer.FileSystemPath.createStandardFilter( extensions ) )
dialogue = GafferUI.PathChooserDialogue( path, title="Load Shader", confirmLabel = "Load", valid=True, leaf=True )
path = dialogue.waitForPath( parentWindow = menu.ancestor( GafferUI.ScriptWindow ) )
if not path :
return None
shaderName = os.path.splitext( str( path ) )[0]
return nodeCreator( __nodeName( shaderName ), shaderName )
def __shaderSubMenu( searchPaths, extensions, nodeCreator, matchExpression ) :
if isinstance( matchExpression, str ) :
matchExpression = re.compile( fnmatch.translate( matchExpression ) )
shaders = set()
pathsVisited = set()
for path in searchPaths :
if path in pathsVisited :
continue
for root, dirs, files in os.walk( path ) :
for file in files :
if os.path.splitext( file )[1][1:] in extensions :
shaderPath = os.path.join( root, file ).partition( path )[-1].lstrip( "/" )
if shaderPath not in shaders and matchExpression.match( shaderPath ) :
shaders.add( os.path.splitext( shaderPath )[0] )
pathsVisited.add( path )
shaders = sorted( list( shaders ) )
categorisedShaders = [ x for x in shaders if "/" in x ]
uncategorisedShaders = [ x for x in shaders if "/" not in x ]
shadersAndMenuPaths = []
for shader in categorisedShaders :
shadersAndMenuPaths.append( ( shader, "/" + shader ) )
for shader in uncategorisedShaders :
if not categorisedShaders :
shadersAndMenuPaths.append( ( shader, "/" + shader ) )
else :
shadersAndMenuPaths.append( ( shader, "/Other/" + shader ) )
result = IECore.MenuDefinition()
for shader, menuPath in shadersAndMenuPaths :
menuPath = "/".join( [ IECore.CamelCase.toSpaced( x ) for x in menuPath.split( "/" ) ] )
result.append(
menuPath,
{
"command" : GafferUI.NodeMenu.nodeCreatorWrapper( IECore.curry( nodeCreator, __nodeName( shader ), shader ) ),
"searchText" : menuPath.rpartition( "/" )[-1].replace( " ", "" ),
},
)
result.append( "/LoadDivider", { "divider" : True } )
result.append( "/Load...", { "command" : GafferUI.NodeMenu.nodeCreatorWrapper( lambda menu : __loadFromFile( menu, extensions, nodeCreator ) ) } )
return result
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base constants and handlers."""
import base64
import Cookie
import datetime
import hmac
import json
import logging
import os
import sys
import time
import traceback
import urlparse
import jinja2
import webapp2
from google.appengine.api import users
from core.domain import config_domain
from core.domain import config_services
from core.domain import rights_manager
from core.domain import user_services
from core.platform import models
import feconf
import jinja_utils
import utils
app_identity_services = models.Registry.import_app_identity_services()
current_user_services = models.Registry.import_current_user_services()
(user_models,) = models.Registry.import_models([models.NAMES.user])
ONE_DAY_AGO_IN_SECS = -24 * 60 * 60
DEFAULT_CSRF_SECRET = 'oppia csrf secret'
CSRF_SECRET = config_domain.ConfigProperty(
'oppia_csrf_secret', {'type': 'unicode'},
'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)
BEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty(
'before_end_head_tag_hook', {
'type': 'unicode',
'ui_config': {
'rows': 7,
},
},
'Code to insert just before the closing </head> tag in all pages.', '')
def _clear_login_cookies(response_headers):
"""Clears login cookies from the given response headers."""
# App Engine sets the ACSID cookie for http:// and the SACSID cookie
# for https:// . We just unset both below.
cookie = Cookie.SimpleCookie()
for cookie_name in ['ACSID', 'SACSID']:
cookie = Cookie.SimpleCookie()
cookie[cookie_name] = ''
cookie[cookie_name]['expires'] = (
datetime.datetime.utcnow() +
datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)
).strftime('%a, %d %b %Y %H:%M:%S GMT')
response_headers.add_header(*cookie.output().split(': ', 1))
class LogoutPage(webapp2.RequestHandler):
"""Class which handles the logout URL."""
def get(self):
"""Logs the user out, and returns them to a specified follow-up
page (or the home page if no follow-up page is specified).
"""
# The str conversion is needed, otherwise an InvalidResponseError
# asking for the 'Location' header value to be str instead of
# 'unicode' will result.
url_to_redirect_to = str(self.request.get('return_url') or '/')
_clear_login_cookies(self.response.headers)
if feconf.DEV_MODE:
self.redirect(users.create_logout_url(url_to_redirect_to))
else:
self.redirect(url_to_redirect_to)
class UserFacingExceptions(object):
"""This class contains all the exception class definitions used."""
class NotLoggedInException(Exception):
"""Error class for users that are not logged in (error code 401)."""
class InvalidInputException(Exception):
"""Error class for invalid input on the user side (error code 400)."""
class UnauthorizedUserException(Exception):
"""Error class for unauthorized access."""
class PageNotFoundException(Exception):
"""Error class for a page not found error (error code 404)."""
class InternalErrorException(Exception):
"""Error class for an internal server side error (error code 500)."""
class BaseHandler(webapp2.RequestHandler):
"""Base class for all Oppia handlers."""
# Whether to check POST and PUT payloads for CSRF tokens prior to
# processing them. Can be overridden by subclasses if this check is
# not necessary.
REQUIRE_PAYLOAD_CSRF_CHECK = True
# Whether to redirect requests corresponding to a logged-in user who has
# not completed signup in to the signup page. This ensures that logged-in
# users have agreed to the latest terms.
REDIRECT_UNFINISHED_SIGNUPS = True
# What format the get method returns when exception raised, json or html
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML
@webapp2.cached_property
def jinja2_env(self):
return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR)
def __init__(self, request, response): # pylint: disable=super-init-not-called
# Set self.request, self.response and self.app.
self.initialize(request, response)
self.start_time = datetime.datetime.utcnow()
# Initializes the return dict for the handlers.
self.values = {}
self.user_id = current_user_services.get_current_user_id()
self.username = None
self.has_seen_editor_tutorial = False
self.partially_logged_in = False
self.values['profile_picture_data_url'] = None
self.preferred_site_language_code = None
if self.user_id:
user_settings = user_services.get_user_settings(
self.user_id, strict=False)
if user_settings is None:
email = current_user_services.get_current_user_email()
user_settings = user_services.create_new_user(
self.user_id, email)
self.values['user_email'] = user_settings.email
if (self.REDIRECT_UNFINISHED_SIGNUPS and not
user_services.has_fully_registered(self.user_id)):
_clear_login_cookies(self.response.headers)
self.partially_logged_in = True
self.user_id = None
else:
self.username = user_settings.username
self.preferred_site_language_code = (
user_settings.preferred_site_language_code)
self.values['username'] = self.username
self.values['profile_picture_data_url'] = (
user_settings.profile_picture_data_url)
if user_settings.last_started_state_editor_tutorial:
self.has_seen_editor_tutorial = True
# In order to avoid too many datastore writes, we do not bother
# recording a log-in if the current time is sufficiently close
# to the last log-in time.
if (user_settings.last_logged_in is None or
not utils.are_datetimes_close(
datetime.datetime.utcnow(),
user_settings.last_logged_in)):
user_services.record_user_logged_in(self.user_id)
self.role = (
feconf.ROLE_ID_GUEST
if self.user_id is None else user_settings.role)
self.user = user_services.UserActionsInfo(self.user_id)
self.is_super_admin = (
current_user_services.is_current_user_super_admin())
self.values['is_moderator'] = user_services.is_at_least_moderator(
self.user_id)
self.values['is_admin'] = user_services.is_admin(self.user_id)
self.values['is_super_admin'] = self.is_super_admin
if self.request.get('payload'):
self.payload = json.loads(self.request.get('payload'))
else:
self.payload = None
def dispatch(self):
"""Overrides dispatch method in webapp2 superclass.
Raises:
Exception: The CSRF token is missing.
UnauthorizedUserException: The CSRF token is invalid.
"""
# If the request is to the old demo server, redirect it permanently to
# the new demo server.
if self.request.uri.startswith('https://oppiaserver.appspot.com'):
self.redirect('https://oppiatestserver.appspot.com', True)
return
# In DEV_MODE, clearing cookies does not log out the user, so we
# force-clear them by redirecting to the logout URL.
if feconf.DEV_MODE and self.partially_logged_in:
self.redirect(users.create_logout_url(self.request.uri))
return
if self.payload is not None and self.REQUIRE_PAYLOAD_CSRF_CHECK:
try:
csrf_token = self.request.get('csrf_token')
if not csrf_token:
raise Exception(
'Missing CSRF token. Changes were not saved. '
'Please report this bug.')
is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid(
self.user_id, csrf_token)
if not is_csrf_token_valid:
raise self.UnauthorizedUserException(
'Your session has expired, and unfortunately your '
'changes cannot be saved. Please refresh the page.')
except Exception as e:
logging.error(
'%s: payload %s',
e, self.payload)
return self.handle_exception(e, self.app.debug)
super(BaseHandler, self).dispatch()
def get(self, *args, **kwargs): # pylint: disable=unused-argument
"""Base method to handle GET requests."""
raise self.PageNotFoundException
def post(self, *args): # pylint: disable=unused-argument
"""Base method to handle POST requests."""
raise self.PageNotFoundException
def put(self, *args): # pylint: disable=unused-argument
"""Base method to handle PUT requests."""
raise self.PageNotFoundException
def delete(self, *args): # pylint: disable=unused-argument
"""Base method to handle DELETE requests."""
raise self.PageNotFoundException
def render_json(self, values):
"""Prepares JSON response to be sent to the client.
Args:
values: dict. The key-value pairs to encode in the JSON response.
"""
self.response.content_type = 'application/javascript; charset=utf-8'
self.response.headers['Content-Disposition'] = (
'attachment; filename="oppia-attachment.txt"')
self.response.headers['Strict-Transport-Security'] = (
'max-age=31536000; includeSubDomains')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
json_output = json.dumps(values, cls=utils.JSONEncoderForHTML)
self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output))
def render_template(
self, filepath, iframe_restriction='DENY',
redirect_url_on_logout=None):
"""Prepares an HTML response to be sent to the client.
Args:
filepath: str. The template filepath.
iframe_restriction: str or None. Possible values are
'DENY' and 'SAMEORIGIN':
DENY: Strictly prevents the template to load in an iframe.
SAMEORIGIN: The template can only be displayed in a frame
on the same origin as the page itself.
redirect_url_on_logout: str or None. URL to redirect to on logout.
"""
values = self.values
scheme, netloc, path, _, _ = urlparse.urlsplit(self.request.uri)
values.update({
'ASSET_DIR_PREFIX': utils.get_asset_dir_prefix(),
'BEFORE_END_HEAD_TAG_HOOK': jinja2.utils.Markup(
BEFORE_END_HEAD_TAG_HOOK.value),
'DEV_MODE': feconf.DEV_MODE,
'MINIFICATION': feconf.IS_MINIFIED,
'DOMAIN_URL': '%s://%s' % (scheme, netloc),
'ACTIVITY_STATUS_PRIVATE': (
rights_manager.ACTIVITY_STATUS_PRIVATE),
'ACTIVITY_STATUS_PUBLIC': (
rights_manager.ACTIVITY_STATUS_PUBLIC),
'GCS_RESOURCE_BUCKET_NAME': (
app_identity_services.get_gcs_resource_bucket_name()),
# The 'path' variable starts with a forward slash.
'FULL_URL': '%s://%s%s' % (scheme, netloc, path),
'INVALID_NAME_CHARS': feconf.INVALID_NAME_CHARS,
'SITE_FEEDBACK_FORM_URL': feconf.SITE_FEEDBACK_FORM_URL,
'SITE_NAME': feconf.SITE_NAME,
'SYSTEM_USERNAMES': feconf.SYSTEM_USERNAMES,
'TEMPLATE_DIR_PREFIX': utils.get_template_dir_prefix(),
'can_create_collections': bool(
self.role == feconf.ROLE_ID_COLLECTION_EDITOR),
'username': self.username,
'user_is_logged_in': user_services.has_fully_registered(
self.user_id),
'preferred_site_language_code': self.preferred_site_language_code,
'allow_yaml_file_upload': feconf.ALLOW_YAML_FILE_UPLOAD
})
if feconf.ENABLE_PROMO_BAR:
promo_bar_enabled = config_domain.PROMO_BAR_ENABLED.value
promo_bar_message = config_domain.PROMO_BAR_MESSAGE.value
else:
promo_bar_enabled = False
promo_bar_message = ''
values.update({
'promo_bar_enabled': promo_bar_enabled,
'promo_bar_message': promo_bar_message,
})
if 'status_code' not in values:
values['status_code'] = 200
if 'meta_name' not in values:
values['meta_name'] = 'Personalized Online Learning from Oppia'
if 'meta_description' not in values:
values['meta_description'] = (
'Oppia is a free, open-source learning platform. Join the '
'community to create or try an exploration today!')
# nav_mode is used as part of the GLOBALS object in the frontend, but
# not every backend handler declares a nav_mode. Thus, the following
# code is a failsafe to ensure that the nav_mode key is added to all
# page requests.
if 'nav_mode' not in values:
values['nav_mode'] = ''
if redirect_url_on_logout is None:
redirect_url_on_logout = self.request.uri
if self.user_id:
values['login_url'] = None
values['logout_url'] = (
current_user_services.create_logout_url(
redirect_url_on_logout))
else:
target_url = (
'/' if self.request.uri.endswith(feconf.SPLASH_URL)
else self.request.uri)
values['login_url'] = (
current_user_services.create_login_url(target_url))
values['logout_url'] = None
# Create a new csrf token for inclusion in HTML responses. This assumes
# that tokens generated in one handler will be sent back to a handler
# with the same page name.
values['csrf_token'] = ''
if self.REQUIRE_PAYLOAD_CSRF_CHECK:
values['csrf_token'] = CsrfTokenManager.create_csrf_token(
self.user_id)
self.response.cache_control.no_cache = True
self.response.cache_control.must_revalidate = True
self.response.headers['Strict-Transport-Security'] = (
'max-age=31536000; includeSubDomains')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
if iframe_restriction is not None:
if iframe_restriction in ['SAMEORIGIN', 'DENY']:
self.response.headers['X-Frame-Options'] = iframe_restriction
else:
raise Exception(
'Invalid X-Frame-Options: %s' % iframe_restriction)
self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'
self.response.pragma = 'no-cache'
self.response.write(
self.jinja2_env.get_template(filepath).render(**values))
def _render_exception(self, error_code, values):
"""Renders an error page, or an error JSON response.
Args:
error_code: int. The HTTP status code (expected to be one of
400, 401, 404 or 500).
values: dict. The key-value pairs to include in the response.
"""
assert error_code in [400, 401, 404, 500]
values['status_code'] = error_code
# This checks if the response should be JSON or HTML.
# For GET requests, there is no payload, so we check against
# GET_HANDLER_ERROR_RETURN_TYPE.
# Otherwise, we check whether self.payload exists.
if (self.payload is not None or
self.GET_HANDLER_ERROR_RETURN_TYPE ==
feconf.HANDLER_TYPE_JSON):
self.render_json(values)
else:
self.values.update(values)
if 'iframed' in self.values and self.values['iframed']:
self.render_template(
'pages/error/error_iframed.html', iframe_restriction=None)
else:
self.render_template('pages/error/error.html')
def handle_exception(self, exception, unused_debug_mode):
"""Overwrites the default exception handler.
Args:
exception: The exception that was thrown.
unused_debug_mode: bool. True if the web application is running
in debug mode.
"""
if isinstance(exception, self.NotLoggedInException):
self.redirect(
current_user_services.create_login_url(self.request.uri))
return
logging.info(''.join(traceback.format_exception(*sys.exc_info())))
logging.error('Exception raised: %s', exception)
if isinstance(exception, self.PageNotFoundException):
logging.error('Invalid URL requested: %s', self.request.uri)
self.error(404)
self._render_exception(404, {
'error': 'Could not find the page %s.' % self.request.uri})
return
if isinstance(exception, self.UnauthorizedUserException):
self.error(401)
self._render_exception(401, {'error': unicode(exception)})
return
if isinstance(exception, self.InvalidInputException):
self.error(400)
self._render_exception(400, {'error': unicode(exception)})
return
if isinstance(exception, self.InternalErrorException):
self.error(500)
self._render_exception(500, {'error': unicode(exception)})
return
self.error(500)
self._render_exception(500, {'error': unicode(exception)})
InternalErrorException = UserFacingExceptions.InternalErrorException
InvalidInputException = UserFacingExceptions.InvalidInputException
NotLoggedInException = UserFacingExceptions.NotLoggedInException
PageNotFoundException = UserFacingExceptions.PageNotFoundException
UnauthorizedUserException = UserFacingExceptions.UnauthorizedUserException
class Error404Handler(BaseHandler):
"""Handles 404 errors."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
class CsrfTokenManager(object):
"""Manages page/user tokens in memcache to protect against CSRF."""
# Max age of the token (48 hours).
_CSRF_TOKEN_AGE_SECS = 60 * 60 * 48
# Default user id for non-logged-in users.
_USER_ID_DEFAULT = 'non_logged_in_user'
@classmethod
def init_csrf_secret(cls):
"""Verify that non-default CSRF secret exists; creates one if not."""
# Any non-default value is fine.
if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET:
return
# Initialize to random value.
config_services.set_property(
feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name,
base64.urlsafe_b64encode(os.urandom(20)))
@classmethod
def _create_token(cls, user_id, issued_on):
"""Creates a new CSRF token.
Args:
user_id: str. The user_id for whom the token is generated.
issued_on: float. The timestamp at which the token was issued.
Returns:
str: The generated CSRF token.
"""
cls.init_csrf_secret()
# The token has 4 parts: hash of the actor user id, hash of the page
# name, hash of the time issued and plain text of the time issued.
if user_id is None:
user_id = cls._USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(CSRF_SECRET.value))
digester.update(str(user_id))
digester.update(':')
digester.update(str(issued_on))
digest = digester.digest()
token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest))
return token
@classmethod
def _get_current_time(cls):
return time.time()
@classmethod
def create_csrf_token(cls, user_id):
return cls._create_token(user_id, cls._get_current_time())
@classmethod
def is_csrf_token_valid(cls, user_id, token):
"""Validates a given CSRF token.
Args:
user_id: str. The user_id to validate the CSRF token against.
token: str. The CSRF token to validate.
"""
try:
parts = token.split('/')
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = cls._get_current_time() - issued_on
if age > cls._CSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(user_id, issued_on)
if authentic_token == token:
return True
return False
except Exception:
return False
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
# pytype: skip-file
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from typing import Any
from typing import Iterable
from typing import List
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import DurationTypes # pylint: disable=unused-import
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.timestamp import TimestampTypes # pylint: disable=unused-import
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta,
urns.RunnerApiFn)): # type: ignore[misc]
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(
self,
timestamp, # type: TimestampTypes
element=None,
window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
# type: (AssignContext) -> Iterable[BoundedWindow]
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
# type: (Iterable[BoundedWindow]) -> None
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
# type: (TimestampTypes) -> None
self._end = Timestamp.of(end)
@property
def start(self):
# type: () -> Timestamp
raise NotImplementedError
@property
def end(self):
# type: () -> Timestamp
return self._end
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
@total_ordering
class IntervalWindow(windowed_value._IntervalWindowBase, BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
# type: (Any, TimestampTypes) -> None
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __eq__(self, other):
return (
type(self) == type(other) and self.value == other.value and
self.timestamp == other.timestamp)
def __hash__(self):
return hash((self.value, self.timestamp))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if type(self) != type(other):
return type(self).__name__ < type(other).__name__
if self.value != other.value:
return self.value < other.value
return self.timestamp < other.timestamp
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(GlobalWindow._getTimestampFromProto())
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@property
def start(self):
# type: () -> Timestamp
return MIN_TIMESTAMP
@staticmethod
def _getTimestampFromProto():
ts_millis = int(
common_urns.constants.GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS.constant)
return Timestamp(micros=ts_millis * 1000)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(
cls,
value,
timestamp=MIN_TIMESTAMP,
pane_info=windowed_value.PANE_INFO_UNKNOWN):
return WindowedValue(value, timestamp, (GlobalWindow(), ), pane_info)
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return common_urns.global_windows.urn, None
@staticmethod
@urns.RunnerApiFn.register_urn(common_urns.global_windows.urn, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
def __init__(
self,
size, # type: DurationTypes
offset=0 # type: TimestampTypes
):
"""Initialize a ``FixedWindows`` function for a given size and offset.
Args:
size (int): Size of the window in seconds.
offset(int): Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __hash__(self):
return hash((self.size, self.offset))
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self,
size, # type: DurationTypes
period, # type: DurationTypes
offset=0, # type: TimestampTypes
):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(
start.micros,
timestamp.micros - self.size.micros,
-self.period.micros)
]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (
self.size == other.size and self.offset == other.offset and
self.period == other.period)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.offset, self.period))
def to_runner_api_parameter(self, context):
return (
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros),
period=proto_utils.from_micros(
duration_pb2.Duration, self.period.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()),
period=Duration(micros=fn_parameter.period.ToMicroseconds()))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
# type: (DurationTypes) -> None
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
to_merge = [] # type: List[BoundedWindow]
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(
to_merge, IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.gap_size)
def to_runner_api_parameter(self, context):
return (
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionWindowsPayload(
gap_size=proto_utils.from_micros(
duration_pb2.Duration, self.gap_size.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(
gap_size=Duration(micros=fn_parameter.gap_size.ToMicroseconds()))
|
|
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.db.models import Q
from datetime import datetime
import re
from youbama.main.models import Video, Upload, Contact
from youbama.settings import MEDIA_ROOT
from youbama.main.helpers import *
def staticpage(request):
return HttpResponseRedirect('/home')
# TODO: Look at http://blog.awarelabs.com/?p=29 for pagination
def home(request, page='1'):
popular_by_day_video_list = Video.objects.extra(select={'d' : "date(add_datetime)"}).filter(visible=True, in_reply_to=0).order_by('-d', '-votecount')
if page=='1':
featured_video_list = Video.objects.filter(Q(id=663) | Q(id=1279)).order_by('-id')
else:
featured_video_list = []
return render_to_response('main/home.html', {
'popular_by_day_video_list' : popular_by_day_video_list,
'featured_video_list' : featured_video_list,
'page' : page,
'type' : 'home', # used by pagination.html
})
def popular(request, page='1'):
popular_video_list = Video.objects.filter(visible=True, in_reply_to=0).order_by('-votecount')
return render_to_response('main/popular.html', {
'popular_video_list' : popular_video_list,
'page' : page,
'type' : 'popular', # used by pagination.html
})
def newest(request, page='1'):
newest_video_list = Video.objects.filter(visible=True, in_reply_to=0).order_by('-add_datetime')
return render_to_response('main/newest.html', {
'newest_video_list' : newest_video_list,
'page' : page,
'type' : 'newest', # used by pagination.html
})
def search(request, query='', page='1'):
#if request.method == 'GET':
if request.method == 'POST':
query = request.POST['query']
if (query == ''):
search_video_list = []
else:
search_video_list = Video.objects.filter(Q(title__icontains=query) | Q(description__icontains=query),visible=True).order_by('-votecount')
return render_to_response('main/search.html', {
'search_video_list' : search_video_list,
'query' : query,
'page' : page,
'type' : 'search/' + query, # used by pagination.html
})
def replies(request, id):
replies_list = Video.objects.filter(visible=True, in_reply_to=id).order_by('add_datetime')
return render_to_response('main/replies.html', {
'replies_list' : replies_list
})
def about(request):
return render_to_response('main/about.html', { })
def upload(request, in_reply_to=0):
video = ''
if (in_reply_to > 0):
video = get_object_or_404(Video, id=in_reply_to)
if request.method == 'GET':
return render_to_response('main/upload.html', {
'video' : video,
})
elif request.method == 'POST':
email = sanitize(request.POST['email'])
# Validate e-mail address
if (is_valid_email(email) == False):
error_message = "Invalid e-mail address"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
if 'file' in request.FILES:
return handle_file_upload(request, email, in_reply_to, video)
else:
return handle_youtube_upload(request, email, in_reply_to, video)
return HttpResponseRedirect('/')
def handle_file_upload(request, email, in_reply_to, video):
# TODO: sanitize filename
file = request.FILES['file']
filename = random_string(10) + "_" + file['filename']
# Sanity checks
if (len(filename) > 255):
error_message = "Filename is too long"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
title = sanitize(request.POST['title'])
desc = sanitize(request.POST['description'])
if ((desc == "") or (title == "")):
error_message = "Title and description cannot be empty"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
fd = open('%s/videos/%s' % (MEDIA_ROOT, filename), 'wb')
fd.write(file['content'])
fd.close()
c, created = Contact.objects.get_or_create(email=email, defaults={'validated' : False})
# TODO: verify uniqueness
hash = random_string(26)
v = Video.objects.create(
user_id='1',
contact=c,
type='1',
add_datetime=datetime.today(),
title=title,
short_description=desc[0:130],
description=desc,
duration_seconds=0,
duration_string="00:00:00",
viewcount=1,
votecount=0,
reportcount=0,
in_reply_to=in_reply_to,
replycount=0,
validation_hash=hash,
visible=False,
youtube_id=0)
u = Upload.objects.create(
video_id=v.id,
file=filename,
uploaded=False,
processed=False,
validated=False)
sendmail(title, email, hash)
return render_to_response('main/uploaded.html', {
'video' : video,
})
def handle_youtube_upload(request, email, in_reply_to, video):
youtubeurl = request.POST['youtubeurl']
# Extract YouTube video ID from URL
idrule = re.compile('v=([A-Za-z0-9_-]+)')
r = idrule.search(youtubeurl)
if (r == None):
error_message = "Invalid YouTube URL"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
else:
youtube_id = r.group(1)
video_list = Video.objects.filter(youtube_id=youtube_id)
if len(video_list) > 0:
error_message = "This video already exists on the site"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
data = get_video_info(youtube_id)
if data == None:
error_message = "Video not found"
return render_to_response('main/upload.html', {
'video' : video,
'error_message' : error_message,
})
c, created = Contact.objects.get_or_create(email=email, defaults={'validated' : False})
desc = data['description']
# TODO: verify uniqueness
hash = random_string(26)
v = Video.objects.create(
user_id='1',
contact=c,
type='2',
add_datetime=datetime.today(),
title=data['title'],
short_description=desc[0:130],
description=desc,
duration_seconds=data['duration_seconds'],
duration_string=data['duration_string'],
viewcount=data['viewcount'],
votecount=0,
reportcount=0,
in_reply_to=in_reply_to,
replycount=0,
validation_hash=hash,
visible=False,
youtube_id=youtube_id)
sendmail(data['title'], email, hash)
return render_to_response('main/uploaded.html', {
'video' : video,
})
def voteup(request, id):
v = get_object_or_404(Video, id=id)
v.votecount = v.votecount + 1
v.save()
return HttpResponse("<em>Votes: </em>" + str(v.votecount))
def report(request, id):
v = get_object_or_404(Video, id=id)
v.reportcount = v.reportcount + 1
v.save()
return HttpResponse("<em>Reported!</em>")
# TODO: Create index on hash
def validate(request, hash):
video_list = Video.objects.filter(validation_hash=hash)
if len(video_list) > 0:
v = video_list[0]
# Validate contact
c = Contact.objects.get(id=v.contact_id)
c.validated = True
c.save()
# If this is an uploaded video:
if v.type == 1:
upload_list = Upload.objects.filter(video=v)
if len(upload_list) > 0:
u = upload_list[0]
u.validated = True
u.save()
if (u.uploaded == True and u.processed == True):
v.visible = True
v.save()
if (v.in_reply_to > 0):
update_num_replies(v.in_reply_to)
# If this is a youtube video:
elif v.type == 2:
v.visible = True
v.save()
if (v.in_reply_to > 0):
update_num_replies(v.in_reply_to)
error_message = ''
return render_to_response('main/validated.html', {
'video' : v,
})
else:
return render_to_response('main/validation_failed.html', {
})
|
|
# -*- coding: utf-8 -*-
import networkx as nx
import matplotlib.pyplot as plt
from networkx.utils import powerlaw_sequence
import operator
import random
import csv
import copy
import subprocess, os
import time
import UtilityFunctions as UF
import NetworkModels as NM
import math
import sys
import numpy as np
import strutral_controllability as LBSC # short for Liu & Barabasi Structural Controllability
def RandomNumber01():
""" Generate a random number in (0, 1)
"""
while True:
r = random.random()
if math.fabs(r) > 1E-8:
return r
def set_random_weight(G):
myWeights = {}
for (u, v) in G.edges_iter():
myWeights[(u, v)] = RandomNumber01()
nx.set_edge_attributes(G, 'EdgeWeight', myWeights)
return G
def low_middle_high_degree_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high-degree nodes
"""
if not nx.is_directed(G):
raise nx.NetworkXError("control_nodes() is not defined for undirected graphs.")
#
drivers = LBSC.control_nodes(G)
degree = nx.degree(G)
temp_degree = sorted(degree.items(), key=operator.itemgetter(1))
my_degree = [x for (x, deg) in temp_degree]
third = len(my_degree) / 3
two_third = len(my_degree) * 2 / 3
last_remain = len(my_degree) - two_third
low_degree = set(my_degree[0:third])
middle_degree = set(my_degree[third:two_third])
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_degree:
low_cnt += 1
elif node in middle_degree:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_degree)
middle_ratio = float(middle_cnt) / len(middle_degree)
high_ratio = float(high_cnt) / (last_remain)
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_in_degree_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high- in-degree nodes
"""
all_in_degrees = {}
all_in_degrees = nx.in_degree_centrality(G)
sorted_all_in_degrees = sorted(all_in_degrees.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_in_degrees]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_out_degree_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high- out-degree nodes
"""
all_out_degrees = {}
all_out_degrees = nx.out_degree_centrality(G)
sorted_all_out_degrees = sorted(all_out_degrees.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_out_degrees]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_betweenness_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high-betweenness nodes
"""
all_bets = {}
if not nx.get_edge_attributes(G, 'EdgeWeight'):
all_bets = nx.betweenness_centrality(G)
else:
all_bets = nx.betweenness_centrality(G, weight='EdgeWeight')
sorted_all_bets = sorted(all_bets.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_bets]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_closeness_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high-closeness nodes
"""
all_clos = {}
if not nx.get_edge_attributes(G, 'EdgeWeight'):
all_clos = nx.closeness_centrality(G)
else:
all_clos = nx.closeness_centrality(G, distance='EdgeWeight')
sorted_all_clos = sorted(all_clos.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_clos]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_eigenvector_ratio(G):
""" Return the ratio of driver nodes in low-, middle-, high-eigenvector nodes
eigenvecot centrality is defined as: x_i = 1/\lambda \sum_{j}(A_{ij}x_j)
"""
all_eigvs = {}
if not nx.get_edge_attributes(G, 'EdgeWeight'):
all_eigvs = nx.eigenvector_centrality(G, max_iter=1000)
else:
all_eigvs = nx.eigenvector_centrality(G, max_iter=1000, weight='EdgeWeight')
sorted_all_eigvs = sorted(all_eigvs.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_eigvs]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_kats_ratio(G):
""" Return the ratio of driver nodes in low-, middle-,
high-kats centrality nodes
Note: kats centrality is a generalization of the eigenvector centrality.
Defined as: x_i = \alpha \sum_{j}(A_{ij}x_j) + \beta
"""
all_kats = {}
if not nx.get_edge_attributes(G, 'EdgeWeight'):
all_kats = nx.katz.katz_centrality(G)
else:
all_kats = nx.katz.katz_centrality(G, weight='EdgeWeight')
sorted_all_kats = sorted(all_kats.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_kats]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
def low_middle_high_pagerank_ratio(G):
""" Return the ratio of driver nodes in low, middle, high-pagerank nodes
"""
all_pageranks = {}
if not nx.get_edge_attributes(G, 'EdgeWeight'):
all_pageranks = nx.pagerank_alg.pagerank_numpy(G)
else:
all_pageranks = nx.pagerank_alg.pagerank_numpy(G, weight='EdgeWeight')
sorted_all_pageranks = sorted(all_pageranks.items(), key=operator.itemgetter(1))
sorted_nodes = [k for (k, v) in sorted_all_pageranks]
n = nx.number_of_nodes(G)
third = n / 3
two_third = n * 2 / 3
last = n - two_third
low_nodes = set(sorted_nodes[0:third])
middle_nodes = set(sorted_nodes[third:two_third])
drivers = LBSC.control_nodes(G)
low_cnt = 0
middle_cnt = 0
high_cnt = 0
for node in drivers:
if node in low_nodes:
low_cnt += 1
elif node in middle_nodes:
middle_cnt += 1
else:
high_cnt += 1
low_ratio = float(low_cnt) / len(low_nodes)
middle_ratio = float(middle_cnt) / len(middle_nodes)
high_ratio = float(high_cnt) / last
return (low_ratio, middle_ratio, high_ratio)
if __name__ == "__main__":
n = 1000
p = 0.2
k = 6
RunCnt = 50
tot1 = 0.0
tot2 = 0.0
tot3 = 0.0
for i in range(RunCnt):
print "current run count:\t",i+1
#G = NM.directed_erdos_renyi_network(n, p, seed=i+1)
#G = NM.directed_watts_strogatz_graph(n,k,p, seed=i+1)
G = NM.directed_newman_watts_strogatz_graph(n,k,p,seed=i+1)
#G = NM.directed_barabasi_albert_graph(n, 6, seed=i+1)
print 'average degree:\t', UF.average_degree(G)
set_random_weight(G)
#(temp1, temp2, temp3) = low_middle_high_degree_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_betweenness_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_closeness_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_eigenvector_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_kats_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_pagerank_ratio(G)
#(temp1, temp2, temp3) = low_middle_high_in_degree_ratio(G)
(temp1, temp2, temp3) = low_middle_high_out_degree_ratio(G)
tot1 += temp1
tot2 += temp2
tot3 += temp3
tot1 = tot1 / RunCnt
tot2 = tot2 / RunCnt
tot3 = tot3 / RunCnt
print 'low ratio:\t', tot1
print 'middle ratio:\t', tot2
print 'high ratio:\t', tot3
#
#drivers = LBSC.control_nodes(G)
#total_avg_degree = sum(nx.degree(G).values()) / float(n);
#driver_avg_degree = sum(nx.degree(G, nbunch=drivers).values()) / float(n)
#print 'total average degree: ', total_avg_degree
#print 'driver average degree: ', driver_avg_degree
#total_avg_neighbor_degree = sum(nx.average_neighbor_degree(G).values()) / float(n)
#drivers_avg_neighbor_degree = sum(nx.average_neighbor_degree(G, nodes=drivers).values())/float(n)
#print 'total average neighbor degree: ', total_avg_neighbor_degree
#print 'drivers average neighbor degree:', drivers_avg_neighbor_degree
#
#X = []
#Y1 = []
#Y2 = []
#cnt = 0
#nbr_degree = nx.average_neighbor_degree(G, nodes=drivers)
#print 'Number of driver nodes: ', len(drivers)
#for node in drivers:
# X.append(node)
# Y1.append(G.degree(node))
# Y2.append(nbr_degree[node])
# if nbr_degree[node] > G.degree(node):
# cnt += 1
#print 'cnt = ', cnt
#print 'percent: ', float(cnt) * 100 / len(drivers), '%'
#Line1, = plt.plot(X, Y1, 'bo', label='degree (driver node)')
#Line2, = plt.plot(X, Y2, 'rs', label='neighbor degree (driver node)')
#plt.legend(handles=[Line1, Line2])
#plt.xlabel('driver node ID')
#plt.ylabel('degree')
#plt.show()
#degree = nx.degree(G)
#betweenness = nx.betweenness.betweenness_centrality(G, weight='EdgeWeight')
#closeness = nx.closeness.closeness_centrality(G, distance='EdgeWeight')
#
#Y_driver_deg = []
#Y_driver_bet = []
#Y_driver_clo = []
#for node in drivers:
# Y_driver_deg.append(degree[node])
# Y_driver_bet.append(betweenness[node])
# Y_driver_clo.append(closeness[node])
#plt.plot(Y_driver_deg, Y_driver_bet, 'ro')
#plt.title('betweenness-degree correlationship (driver nodes)')
#plt.xlabel('degree')
#plt.ylabel('betweenness')
#plt.show()
#plt.plot(Y_driver_deg, Y_driver_clo, 'ro')
#plt.title('closeness-degree correlationship (driver nodes)')
#plt.xlabel('degree')
#plt.ylabel('closeness')
#plt.show()
#X2 = []
#driver_bet = []
#driver_clo = []
#for node in drivers:
# X2.append(node)
# driver_bet.append(betweenness[node])
# driver_clo.append(closeness[node])
#plt.figure(100)
#plt.plot(X2, 'ro')
#plt.title('driver nodes')
#plt.show()
#driver_bet.sort()
#plt.figure(2)
#plt.plot(driver_bet, 'bo')
#plt.xlabel('driver nodes')
#plt.ylabel('betweenness')
#plt.title('driver nodes betweenness')
#plt.show()
#allNodes_bet = []
#allNodes_bet = betweenness.values()
#allNodes_bet.sort()
#plt.plot(allNodes_bet, 'ks')
#plt.title('all nodes betweenness')
#plt.show()
#driver_clo.sort()
#plt.figure(3)
#plt.plot(driver_clo, 'bo')
#plt.xlabel('driver nodes')
#plt.ylabel('closeness')
#plt.show()
#allNodes_clo = []
#allNodes_clo = closeness.values()
#allNodes_clo.sort()
#plt.plot(allNodes_clo, 'ks')
#plt.title('all nodes closeness')
#plt.show()
|
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
from robot.errors import DataError
from robot.variables import is_var
from robot.output import LOGGER
from robot import utils
from robot.writer import DataFileWriter
from .comments import Comment
from .populators import FromFilePopulator, FromDirectoryPopulator
from .settings import (Documentation, Fixture, Timeout, Tags, Metadata, Library,
Resource, Variables, Arguments, Return, Template, MetadataList, ImportList)
def TestData(parent=None, source=None, include_suites=None,
warn_on_skipped=False):
"""Parses a file or directory to a corresponding model object.
:param parent: (optional) parent to be used in creation of the model object.
:param source: path where test data is read from.
:returns: :class:`~.model.TestDataDirectory` if `source` is a directory,
:class:`~.model.TestCaseFile` otherwise.
"""
if os.path.isdir(source):
return TestDataDirectory(parent, source).populate(include_suites,
warn_on_skipped)
return TestCaseFile(parent, source).populate()
class _TestData(object):
_setting_table_names = 'Setting', 'Settings', 'Metadata'
_variable_table_names = 'Variable', 'Variables'
_testcase_table_names = 'Test Case', 'Test Cases'
_keyword_table_names = 'Keyword', 'Keywords', 'User Keyword', 'User Keywords'
def __init__(self, parent=None, source=None):
self.parent = parent
self.source = utils.abspath(source) if source else None
self.children = []
self._tables = utils.NormalizedDict(self._get_tables())
def _get_tables(self):
for names, table in [(self._setting_table_names, self.setting_table),
(self._variable_table_names, self.variable_table),
(self._testcase_table_names, self.testcase_table),
(self._keyword_table_names, self.keyword_table)]:
for name in names:
yield name, table
def start_table(self, header_row):
try:
table = self._tables[header_row[0]]
except (KeyError, IndexError):
return None
if not self._table_is_allowed(table):
return None
table.set_header(header_row)
return table
@property
def name(self):
return self._format_name(self._get_basename()) if self.source else None
def _get_basename(self):
return os.path.splitext(os.path.basename(self.source))[0]
def _format_name(self, name):
name = self._strip_possible_prefix_from_name(name)
name = name.replace('_', ' ').strip()
return name.title() if name.islower() else name
def _strip_possible_prefix_from_name(self, name):
return name.split('__', 1)[-1]
@property
def keywords(self):
return self.keyword_table.keywords
@property
def imports(self):
return self.setting_table.imports
def report_invalid_syntax(self, message, level='ERROR'):
initfile = getattr(self, 'initfile', None)
path = os.path.join(self.source, initfile) if initfile else self.source
LOGGER.write("Error in file '%s': %s" % (path, message), level)
def save(self, **options):
"""Writes this datafile to disk.
:param options: Configuration for writing. These are passed to
:py:class:`~robot.writer.datafilewriter.WritingContext` as
keyword arguments.
See also :py:class:`robot.writer.datafilewriter.DataFileWriter`
"""
return DataFileWriter(**options).write(self)
class TestCaseFile(_TestData):
"""The parsed test case file object.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = TestCaseFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._validate()
return self
def _validate(self):
if not self.testcase_table.is_started():
raise DataError('File has no test case table.')
def _table_is_allowed(self, table):
return True
def has_tests(self):
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table,
self.testcase_table, self.keyword_table]:
yield table
class ResourceFile(_TestData):
"""The parsed resource file object.
:param source: path where resource file is read from.
"""
def __init__(self, source=None):
self.directory = os.path.dirname(source) if source else None
self.setting_table = ResourceFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, source=source)
def populate(self):
FromFilePopulator(self).populate(self.source)
self._report_status()
return self
def _report_status(self):
if self.setting_table or self.variable_table or self.keyword_table:
LOGGER.info("Imported resource file '%s' (%d keywords)."
% (self.source, len(self.keyword_table.keywords)))
else:
LOGGER.warn("Imported resource file '%s' is empty." % self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
raise DataError("Resource file '%s' contains a test case table "
"which is not allowed." % self.source)
return True
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class TestDataDirectory(_TestData):
"""The parsed test data directory object. Contains hiearchical structure
of other :py:class:`.TestDataDirectory` and :py:class:`.TestCaseFile`
objects.
:param parent: parent object to be used in creation of the model object.
:param source: path where test data is read from.
"""
def __init__(self, parent=None, source=None):
self.directory = source
self.initfile = None
self.setting_table = InitFileSettingTable(self)
self.variable_table = VariableTable(self)
self.testcase_table = TestCaseTable(self)
self.keyword_table = KeywordTable(self)
_TestData.__init__(self, parent, source)
def populate(self, include_suites=None, warn_on_skipped=False, recurse=True):
FromDirectoryPopulator().populate(self.source, self, include_suites,
warn_on_skipped, recurse)
self.children = [ch for ch in self.children if ch.has_tests()]
return self
def _get_basename(self):
return os.path.basename(self.source)
def _table_is_allowed(self, table):
if table is self.testcase_table:
LOGGER.error("Test suite init file in '%s' contains a test case "
"table which is not allowed." % self.source)
return False
return True
def add_child(self, path, include_suites):
self.children.append(TestData(parent=self,source=path,
include_suites=include_suites))
def has_tests(self):
return any(ch.has_tests() for ch in self.children)
def __iter__(self):
for table in [self.setting_table, self.variable_table, self.keyword_table]:
yield table
class _Table(object):
def __init__(self, parent):
self.parent = parent
self._header = None
def set_header(self, header):
self._header = self._prune_old_style_headers(header)
def _prune_old_style_headers(self, header):
if len(header) < 3:
return header
if self._old_header_matcher.match(header):
return [header[0]]
return header
@property
def header(self):
return self._header or [self.type.title() + 's']
@property
def name(self):
return self.header[0]
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def __nonzero__(self):
return bool(self._header or len(self))
def __len__(self):
return sum(1 for item in self)
class _WithSettings(object):
def get_setter(self, setting_name):
normalized = self.normalize(setting_name)
if normalized in self._setters:
return self._setters[normalized](self)
self.report_invalid_syntax("Non-existing setting '%s'." % setting_name)
def is_setting(self, setting_name):
return self.normalize(setting_name) in self._setters
def normalize(self, setting):
result = utils.normalize(setting)
return result[0:-1] if result and result[-1]==':' else result
class _SettingTable(_Table, _WithSettings):
type = 'setting'
def __init__(self, parent):
_Table.__init__(self, parent)
self.doc = Documentation('Documentation', self)
self.suite_setup = Fixture('Suite Setup', self)
self.suite_teardown = Fixture('Suite Teardown', self)
self.test_setup = Fixture('Test Setup', self)
self.test_teardown = Fixture('Test Teardown', self)
self.force_tags = Tags('Force Tags', self)
self.default_tags = Tags('Default Tags', self)
self.test_template = Template('Test Template', self)
self.test_timeout = Timeout('Test Timeout', self)
self.metadata = MetadataList(self)
self.imports = ImportList(self)
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add_metadata(self, name, value='', comment=None):
self.metadata.add(Metadata(self, name, value, comment))
return self.metadata[-1]
def add_library(self, name, args=None, comment=None):
self.imports.add(Library(self, name, args, comment=comment))
return self.imports[-1]
def add_resource(self, name, invalid_args=None, comment=None):
self.imports.add(Resource(self, name, invalid_args, comment=comment))
return self.imports[-1]
def add_variables(self, name, args=None, comment=None):
self.imports.add(Variables(self, name, args, comment=comment))
return self.imports[-1]
def __len__(self):
return sum(1 for setting in self if setting.is_set())
class TestCaseFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'forcetags': lambda s: s.force_tags.populate,
'defaulttags': lambda s: s.default_tags.populate,
'testtemplate': lambda s: s.test_template.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.default_tags, self.test_template, self.test_timeout] \
+ self.metadata.data + self.imports.data:
yield setting
class ResourceFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables}
def __iter__(self):
for setting in [self.doc] + self.imports.data:
yield setting
class InitFileSettingTable(_SettingTable):
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'suitesetup': lambda s: s.suite_setup.populate,
'suiteprecondition': lambda s: s.suite_setup.populate,
'suiteteardown': lambda s: s.suite_teardown.populate,
'suitepostcondition': lambda s: s.suite_teardown.populate,
'testsetup': lambda s: s.test_setup.populate,
'testprecondition': lambda s: s.test_setup.populate,
'testteardown': lambda s: s.test_teardown.populate,
'testpostcondition': lambda s: s.test_teardown.populate,
'testtimeout': lambda s: s.test_timeout.populate,
'forcetags': lambda s: s.force_tags.populate,
'library': lambda s: s.imports.populate_library,
'resource': lambda s: s.imports.populate_resource,
'variables': lambda s: s.imports.populate_variables,
'metadata': lambda s: s.metadata.populate}
def __iter__(self):
for setting in [self.doc, self.suite_setup, self.suite_teardown,
self.test_setup, self.test_teardown, self.force_tags,
self.test_timeout] + self.metadata.data + self.imports.data:
yield setting
class VariableTable(_Table):
type = 'variable'
def __init__(self, parent):
_Table.__init__(self, parent)
self.variables = []
@property
def _old_header_matcher(self):
return OldStyleSettingAndVariableTableHeaderMatcher()
def add(self, name, value, comment=None):
self.variables.append(Variable(self, name, value, comment))
def __iter__(self):
return iter(self.variables)
class TestCaseTable(_Table):
type = 'test case'
def __init__(self, parent):
_Table.__init__(self, parent)
self.tests = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.tests.append(TestCase(self, name))
return self.tests[-1]
def __iter__(self):
return iter(self.tests)
def is_started(self):
return bool(self._header)
def __nonzero__(self):
return True
class KeywordTable(_Table):
type = 'keyword'
def __init__(self, parent):
_Table.__init__(self, parent)
self.keywords = []
@property
def _old_header_matcher(self):
return OldStyleTestAndKeywordTableHeaderMatcher()
def add(self, name):
self.keywords.append(UserKeyword(self, name))
return self.keywords[-1]
def __iter__(self):
return iter(self.keywords)
class Variable(object):
def __init__(self, parent, name, value, comment=None):
self.parent = parent
self.name = name.rstrip('= ')
if name.startswith('$') and value == []:
value = ''
if isinstance(value, basestring):
value = [value] # Must support scalar lists until RF 2.8 (issue 939)
self.value = value
self.comment = Comment(comment)
def as_list(self):
if self.has_data():
return [self.name] + self.value + self.comment.as_list()
return self.comment.as_list()
def is_set(self):
return True
def is_for_loop(self):
return False
def has_data(self):
return bool(self.name or ''.join(self.value))
def __nonzero__(self):
return self.has_data()
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax("Setting variable '%s' failed: %s"
% (self.name, message), level)
class _WithSteps(object):
def add_step(self, content, comment=None):
self.steps.append(Step(content, comment))
return self.steps[-1]
def copy(self, name):
new = copy.deepcopy(self)
new.name = name
self._add_to_parent(new)
return new
class TestCase(_WithSteps, _WithSettings):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.template = Template('[Template]', self)
self.tags = Tags('[Tags]', self)
self.setup = Fixture('[Setup]', self)
self.teardown = Fixture('[Teardown]', self)
self.timeout = Timeout('[Timeout]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'template': lambda s: s.template.populate,
'setup': lambda s: s.setup.populate,
'precondition': lambda s: s.setup.populate,
'teardown': lambda s: s.teardown.populate,
'postcondition': lambda s: s.teardown.populate,
'tags': lambda s: s.tags.populate,
'timeout': lambda s: s.timeout.populate}
@property
def source(self):
return self.parent.source
@property
def directory(self):
return self.parent.directory
def add_for_loop(self, declaration, comment=None):
self.steps.append(ForLoop(declaration, comment))
return self.steps[-1]
def report_invalid_syntax(self, message, level='ERROR'):
type_ = 'test case' if type(self) is TestCase else 'keyword'
message = "Invalid syntax in %s '%s': %s" % (type_, self.name, message)
self.parent.report_invalid_syntax(message, level)
def _add_to_parent(self, test):
self.parent.tests.append(test)
@property
def settings(self):
return [self.doc, self.tags, self.setup, self.template, self.timeout,
self.teardown]
def __iter__(self):
for element in [self.doc, self.tags, self.setup,
self.template, self.timeout] \
+ self.steps + [self.teardown]:
yield element
class UserKeyword(TestCase):
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.doc = Documentation('[Documentation]', self)
self.args = Arguments('[Arguments]', self)
self.return_ = Return('[Return]', self)
self.timeout = Timeout('[Timeout]', self)
self.teardown = Fixture('[Teardown]', self)
self.steps = []
_setters = {'documentation': lambda s: s.doc.populate,
'document': lambda s: s.doc.populate,
'arguments': lambda s: s.args.populate,
'return': lambda s: s.return_.populate,
'timeout': lambda s: s.timeout.populate,
'teardown': lambda s: s.teardown.populate}
def _add_to_parent(self, test):
self.parent.keywords.append(test)
@property
def settings(self):
return [self.args, self.doc, self.timeout, self.teardown, self.return_]
def __iter__(self):
for element in [self.args, self.doc, self.timeout] \
+ self.steps + [self.teardown, self.return_]:
yield element
class ForLoop(_WithSteps):
def __init__(self, declaration, comment=None):
self.range, index = self._get_range_and_index(declaration)
self.vars = declaration[:index]
self.items = declaration[index+1:]
self.comment = Comment(comment)
self.steps = []
def _get_range_and_index(self, declaration):
for index, item in enumerate(declaration):
item = item.upper().replace(' ', '')
if item in ['IN', 'INRANGE']:
return item == 'INRANGE', index
return False, len(declaration)
def is_comment(self):
return False
def is_for_loop(self):
return True
def as_list(self, indent=False, include_comment=True):
IN = ['IN RANGE' if self.range else 'IN']
comments = self.comment.as_list() if include_comment else []
return [': FOR'] + self.vars + IN + self.items + comments
def __iter__(self):
return iter(self.steps)
def is_set(self):
return True
class Step(object):
def __init__(self, content, comment=None):
self.assign = list(self._get_assigned_vars(content))
try:
self.keyword = content[len(self.assign)]
except IndexError:
self.keyword = None
self.args = content[len(self.assign)+1:]
self.comment = Comment(comment)
def _get_assigned_vars(self, content):
for item in content:
if not is_var(item.rstrip('= ')):
return
yield item
def is_comment(self):
return not (self.assign or self.keyword or self.args)
def is_for_loop(self):
return False
def is_set(self):
return True
def as_list(self, indent=False, include_comment=True):
kw = [self.keyword] if self.keyword is not None else []
comments = self.comment.as_list() if include_comment else []
data = self.assign + kw + self.args + comments
if indent:
data.insert(0, '')
return data
class OldStyleSettingAndVariableTableHeaderMatcher(object):
def match(self, header):
return all((True if e.lower() == 'value' else False)
for e in header[1:])
class OldStyleTestAndKeywordTableHeaderMatcher(object):
def match(self, header):
if header[1].lower() != 'action':
return False
for h in header[2:]:
if not h.lower().startswith('arg'):
return False
return True
|
|
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
from DistributedNPCToonBase import *
from toontown.chat.ChatGlobals import *
from toontown.hood import ZoneUtil
from toontown.nametag.NametagGlobals import *
from toontown.quest import QuestChoiceGui
from toontown.quest import QuestParser
from toontown.quest import TrackChoiceGui
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TeaserPanel
ChoiceTimeout = 20
class DistributedNPCSpecialQuestGiver(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.curQuestMovie = None
self.questChoiceGui = None
self.trackChoiceGui = None
self.cr = cr
def announceGenerate(self):
self.setAnimState('neutral', 0.9, None, None)
npcOrigin = self.cr.playGame.hood.loader.geom.find('**/npc_origin_' + `(self.posIndex)`)
if not npcOrigin.isEmpty():
self.reparentTo(npcOrigin)
self.clearMat()
else:
self.notify.warning('announceGenerate: Could not find npc_origin_' + str(self.posIndex))
DistributedNPCToonBase.announceGenerate(self)
messenger.send('doneTutorialSetup')
def delayDelete(self):
DistributedNPCToonBase.delayDelete(self)
if self.curQuestMovie:
curQuestMovie = self.curQuestMovie
self.curQuestMovie = None
curQuestMovie.timeout(fFinish=1)
curQuestMovie.cleanup()
def disable(self):
self.cleanupMovie()
DistributedNPCToonBase.disable(self)
def cleanupMovie(self):
self.clearChat()
self.ignore('chooseQuest')
if self.questChoiceGui:
self.questChoiceGui.destroy()
self.questChoiceGui = None
self.ignore(self.uniqueName('doneChatPage'))
if self.curQuestMovie:
self.curQuestMovie.timeout(fFinish=1)
self.curQuestMovie.cleanup()
self.curQuestMovie = None
if self.trackChoiceGui:
self.trackChoiceGui.destroy()
self.trackChoiceGui = None
def allowedToTalk(self):
return True
def handleCollisionSphereEnter(self, collEntry):
base.cr.playGame.getPlace().fsm.request('quest', [self])
self.sendUpdate('avatarEnter', [])
self.nametag3d.setDepthTest(0)
self.nametag3d.setBin('fixed', 0)
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def finishMovie(self, av, isLocalToon, elapsedTime):
self.cleanupMovie()
av.startLookAround()
self.startLookAround()
self.detectAvatars()
self.clearMat()
if isLocalToon:
self.showNametag2d()
taskMgr.remove(self.uniqueName('lerpCamera'))
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
self.sendUpdate('setMovieDone', [])
self.nametag3d.clearDepthTest()
self.nametag3d.clearBin()
def setupCamera(self, mode):
base.camera.wrtReparentTo(render)
if mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE or mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE:
base.camera.posQuatInterval(1, (5, 9, self.getHeight() - 0.5), (155, -2, 0), other=self, blendType='easeOut').start()
else:
base.camera.posQuatInterval(1, (-5, 9, self.getHeight() - 0.5), (-150, -2, 0), other=self, blendType='easeOut').start()
def setMovie(self, mode, npcId, avId, quests, timestamp):
isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.QUEST_MOVIE_CLEAR:
self.cleanupMovie()
return
if mode == NPCToons.QUEST_MOVIE_TIMEOUT:
self.cleanupMovie()
if isLocalToon:
self.freeAvatar()
self.setPageNumber(0, -1)
self.clearChat()
self.startLookAround()
self.detectAvatars()
return
av = base.cr.doId2do.get(avId)
if av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
if mode == NPCToons.QUEST_MOVIE_REJECT:
rejectString = Quests.chooseQuestDialogReject()
rejectString = Quests.fillInQuestNames(rejectString, avName=av.name)
self.setChatAbsolute(rejectString, CFSpeech | CFTimeout)
if isLocalToon:
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
return
if mode == NPCToons.QUEST_MOVIE_TIER_NOT_DONE:
rejectString = Quests.chooseQuestDialogTierNotDone()
rejectString = Quests.fillInQuestNames(rejectString, avName=av.name)
self.setChatAbsolute(rejectString, CFSpeech | CFTimeout)
if isLocalToon:
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
return
self.setupAvatars(av)
fullString = ''
toNpcId = None
if isLocalToon:
self.hideNametag2d()
if mode == NPCToons.QUEST_MOVIE_COMPLETE:
questId, rewardId, toNpcId = quests
scriptId = 'quest_complete_' + str(questId)
if QuestParser.questDefined(scriptId):
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
greetingString = Quests.chooseQuestDialog(questId, Quests.GREETING)
if greetingString:
fullString += greetingString + '\x07'
fullString += Quests.chooseQuestDialog(questId, Quests.COMPLETE) + '\x07'
if rewardId:
fullString += Quests.getReward(rewardId).getString()
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL:
fullString = TTLocalizer.QuestMovieQuestChoiceCancel
elif mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL:
fullString = TTLocalizer.QuestMovieTrackChoiceCancel
elif mode == NPCToons.QUEST_MOVIE_INCOMPLETE:
questId, completeStatus, toNpcId = quests
scriptId = 'quest_incomplete_' + str(questId)
if QuestParser.questDefined(scriptId):
if self.curQuestMovie:
self.curQuestMovie.timeout()
self.curQuestMovie.cleanup()
self.curQuestMovie = None
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
greetingString = Quests.chooseQuestDialog(questId, Quests.GREETING)
if greetingString:
fullString += greetingString + '\x07'
fullString += Quests.chooseQuestDialog(questId, completeStatus)
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_ASSIGN:
questId, rewardId, toNpcId = quests
scriptId = 'quest_assign_' + str(questId)
if QuestParser.questDefined(scriptId):
if self.curQuestMovie:
self.curQuestMovie.timeout()
self.curQuestMovie.cleanup()
self.curQuestMovie = None
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
fullString += Quests.chooseQuestDialog(questId, Quests.QUEST)
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE:
if isLocalToon:
self.setupCamera(mode)
self.setChatAbsolute(TTLocalizer.QuestMovieQuestChoice, CFSpeech)
if isLocalToon:
self.acceptOnce('chooseQuest', self.sendChooseQuest)
self.questChoiceGui = QuestChoiceGui.QuestChoiceGui()
self.questChoiceGui.setQuests(quests, npcId, ChoiceTimeout)
return
elif mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE:
if isLocalToon:
self.setupCamera(mode)
tracks = quests
self.setChatAbsolute(TTLocalizer.QuestMovieTrackChoice, CFSpeech)
if isLocalToon:
self.acceptOnce('chooseTrack', self.sendChooseTrack)
self.trackChoiceGui = TrackChoiceGui.TrackChoiceGui(tracks, ChoiceTimeout)
return
fullString = Quests.fillInQuestNames(fullString, avName=av.name, fromNpcId=npcId, toNpcId=toNpcId)
self.acceptOnce(self.uniqueName('doneChatPage'), self.finishMovie, extraArgs=[av, isLocalToon])
self.clearChat()
self.setPageChat(avId, 0, fullString, 1)
def sendChooseQuest(self, questId):
if self.questChoiceGui:
self.questChoiceGui.destroy()
self.questChoiceGui = None
self.sendUpdate('chooseQuest', [questId])
def sendChooseTrack(self, trackId):
if self.trackChoiceGui:
self.trackChoiceGui.destroy()
self.trackChoiceGui = None
self.sendUpdate('chooseTrack', [trackId])
|
|
from django.core.urlresolvers import resolve, reverse
import furl
from rest_framework import serializers as ser
import pytz
from modularodm import Q
from framework.auth.core import Auth, User
from website import settings
from website.files.models import FileNode
from website.project.model import Comment
from website.util import api_v2_url
from api.base.serializers import (
FileCommentRelationshipField,
format_relationship_links,
IDField,
JSONAPIListField,
JSONAPISerializer,
Link,
LinksField,
NodeFileHyperLinkField,
RelationshipField,
TypeField,
WaterbutlerLink,
)
from api.base.exceptions import Conflict
from api.base.utils import absolute_reverse
from api.base.utils import get_user_auth
class CheckoutField(ser.HyperlinkedRelatedField):
default_error_messages = {'invalid_data': 'Checkout must be either the current user or null'}
json_api_link = True # serializes to a links object
def __init__(self, **kwargs):
kwargs['queryset'] = True
kwargs['read_only'] = False
kwargs['allow_null'] = True
kwargs['lookup_field'] = 'pk'
kwargs['lookup_url_kwarg'] = 'user_id'
self.meta = {'id': 'user_id'}
self.link_type = 'related'
self.always_embed = kwargs.pop('always_embed', False)
super(CheckoutField, self).__init__('users:user-detail', **kwargs)
def resolve(self, resource):
"""
Resolves the view when embedding.
"""
embed_value = resource.stored_object.checkout.pk
kwargs = {self.lookup_url_kwarg: embed_value}
return resolve(
reverse(
self.view_name,
kwargs=kwargs
)
)
def get_queryset(self):
return User.find(Q('_id', 'eq', self.context['request'].user._id))
def get_url(self, obj, view_name, request, format):
if obj is None:
return {}
return super(CheckoutField, self).get_url(obj, view_name, request, format)
def to_internal_value(self, data):
if data is None:
return None
try:
return next(
user for user in
self.get_queryset()
if user._id == data
)
except StopIteration:
self.fail('invalid_data')
def to_representation(self, value):
url = super(CheckoutField, self).to_representation(value)
rel_meta = None
if value:
rel_meta = {'id': value._id}
ret = format_relationship_links(related_link=url, rel_meta=rel_meta)
return ret
class FileTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class FileSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'name',
'node',
'kind',
'path',
'materialized_path',
'size',
'provider',
'last_touched',
'tags',
])
id = IDField(source='_id', read_only=True)
type = TypeField()
guid = ser.SerializerMethodField(read_only=True,
method_name='get_file_guid',
help_text='OSF GUID for this file (if one has been assigned)')
checkout = CheckoutField()
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
kind = ser.CharField(read_only=True, help_text='Either folder or file')
path = ser.CharField(read_only=True, help_text='The unique path used to reference this object')
size = ser.SerializerMethodField(read_only=True, help_text='The size of this file at this version')
provider = ser.CharField(read_only=True, help_text='The Add-on service this file originates from')
materialized_path = ser.CharField(
read_only=True, help_text='The Unix-style path of this object relative to the provider root')
last_touched = ser.DateTimeField(read_only=True, help_text='The last time this file had information fetched about it via the OSF')
date_modified = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was last modified')
date_created = ser.SerializerMethodField(read_only=True, help_text='Timestamp when the file was created')
extra = ser.SerializerMethodField(read_only=True, help_text='Additional metadata about this file')
tags = JSONAPIListField(child=FileTagField(), required=False)
current_user_can_comment = ser.SerializerMethodField(help_text='Whether the current user is allowed to post comments')
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder'
)
versions = NodeFileHyperLinkField(
related_view='files:file-versions',
related_view_kwargs={'file_id': '<_id>'},
kind='file'
)
comments = FileCommentRelationshipField(related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<node._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'}
)
node = RelationshipField(related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
help_text='The project that this file belongs to'
)
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'move': WaterbutlerLink(),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
'new_folder': WaterbutlerLink(must_be_folder=True, kind='folder'),
})
class Meta:
type_ = 'files'
def get_size(self, obj):
if obj.versions:
return obj.versions[-1].size
return None
def get_date_modified(self, obj):
mod_dt = None
if obj.provider == 'osfstorage' and obj.versions:
# Each time an osfstorage file is added or uploaded, a new version object is created with its
# date_created equal to the time of the update. The date_modified is the modified date
# from the backend the file is stored on. This field refers to the modified date on osfstorage,
# so prefer to use the date_created of the latest version.
mod_dt = obj.versions[-1].date_created
elif obj.provider != 'osfstorage' and obj.history:
mod_dt = obj.history[-1].get('modified', None)
return mod_dt and mod_dt.replace(tzinfo=pytz.utc)
def get_date_created(self, obj):
creat_dt = None
if obj.provider == 'osfstorage' and obj.versions:
creat_dt = obj.versions[0].date_created
elif obj.provider != 'osfstorage' and obj.history:
# Non-osfstorage files don't store a created date, so instead get the modified date of the
# earliest entry in the file history.
creat_dt = obj.history[0].get('modified', None)
return creat_dt and creat_dt.replace(tzinfo=pytz.utc)
def get_extra(self, obj):
metadata = {}
if obj.provider == 'osfstorage' and obj.versions:
metadata = obj.versions[-1].metadata
elif obj.provider != 'osfstorage' and obj.history:
metadata = obj.history[-1].get('extra', {})
extras = {}
extras['hashes'] = { # mimic waterbutler response
'md5': metadata.get('md5', None),
'sha256': metadata.get('sha256', None),
}
return extras
def get_current_user_can_comment(self, obj):
user = self.context['request'].user
auth = Auth(user if not user.is_anonymous() else None)
return obj.node.can_comment(auth)
def get_unread_comments_count(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return 0
return Comment.find_n_unread(user=user, node=obj.node, page='files', root_id=obj.get_guid()._id)
def user_id(self, obj):
# NOTE: obj is the user here, the meta field for
# Hyperlinks is weird
if obj:
return obj._id
return None
def update(self, instance, validated_data):
assert isinstance(instance, FileNode), 'Instance must be a FileNode'
if instance.provider != 'osfstorage' and 'tags' in validated_data:
raise Conflict('File service provider {} does not support tags on the OSF.'.format(instance.provider))
auth = get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in instance.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.pop('tags', []))
else:
current_tags = set(old_tags)
for new_tag in (current_tags - old_tags):
instance.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
instance.remove_tag(deleted_tag, auth=auth)
for attr, value in validated_data.items():
if attr == 'checkout':
user = self.context['request'].user
instance.check_in_or_out(user, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
def is_valid(self, **kwargs):
return super(FileSerializer, self).is_valid(clean_html=False, **kwargs)
def get_file_guid(self, obj):
if obj:
guid = obj.get_guid()
if guid:
return guid._id
return None
def get_absolute_url(self, obj):
return api_v2_url('files/{}/'.format(obj._id))
class FileDetailSerializer(FileSerializer):
"""
Overrides FileSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class FileVersionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'size',
'identifier',
'content_type',
])
id = ser.CharField(read_only=True, source='identifier')
size = ser.IntegerField(read_only=True, help_text='The size of this file at this version')
content_type = ser.CharField(read_only=True, help_text='The mime type of this file at this verison')
links = LinksField({
'self': 'self_url',
'html': 'absolute_url'
})
class Meta:
type_ = 'file_versions'
def self_url(self, obj):
return absolute_reverse('files:version-detail', kwargs={
'version_id': obj.identifier,
'file_id': self.context['view'].kwargs['file_id']
})
def absolute_url(self, obj):
fobj = self.context['view'].get_file()
return furl.furl(settings.DOMAIN).set(
path=(fobj.node._id, 'files', fobj.provider, fobj.path.lstrip('/')),
query={fobj.version_identifier: obj.identifier} # TODO this can probably just be changed to revision or version
).url
def get_absolute_url(self, obj):
return self.self_url(obj)
|
|
# Copyright 2021 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
import inspect
import math
from absl import logging
import tensorflow.compat.v1 as tf
from tensorflow_addons import image as image_ops
import hparams
# pylint: disable=g-long-lambda
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Equalize', 0.8, 1), ('ShearY', 0.8, 4)],
[('Color', 0.4, 9), ('Equalize', 0.6, 3)],
[('Color', 0.4, 1), ('Rotate', 0.6, 8)],
[('Solarize', 0.8, 3), ('Equalize', 0.4, 7)],
[('Solarize', 0.4, 2), ('Solarize', 0.6, 2)],
[('Color', 0.2, 0), ('Equalize', 0.8, 8)],
[('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)],
[('ShearX', 0.2, 9), ('Rotate', 0.6, 8)],
[('Color', 0.6, 1), ('Equalize', 1.0, 2)],
[('Invert', 0.4, 9), ('Rotate', 0.6, 0)],
[('Equalize', 1.0, 9), ('ShearY', 0.6, 3)],
[('Color', 0.4, 7), ('Equalize', 0.6, 0)],
[('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)],
[('Solarize', 0.6, 8), ('Color', 0.6, 9)],
[('Solarize', 0.2, 4), ('Rotate', 0.8, 9)],
[('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)],
[('ShearX', 0.0, 0), ('Solarize', 0.8, 4)],
[('ShearY', 0.8, 0), ('Color', 0.6, 4)],
[('Color', 1.0, 0), ('Rotate', 0.6, 2)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 8)],
[('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)],
[('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)],
[('Posterize', 0.8, 2), ('Solarize', 0.6, 10)],
[('Solarize', 0.6, 8), ('Equalize', 0.6, 1)],
[('Color', 0.8, 6), ('Rotate', 0.4, 5)],
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)],
]
return policy
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = image_ops.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = image_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = image_ops.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = image_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = image_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level/_MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level/_MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level/_MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level/_MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(params):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Invert': lambda level: (),
'Rotate': _rotate_level_to_arg,
'Posterize': lambda level: (int((level/_MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level/_MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level/_MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'ShearX': _shear_level_to_arg,
'ShearY': _shear_level_to_arg,
'Cutout': lambda level: (int((level/_MAX_LEVEL) * params.cutout_const),),
'TranslateX': lambda level: _translate_level_to_arg(
level, params.translate_const),
'TranslateY': lambda level: _translate_level_to_arg(
level, params.translate_const),
}
def _parse_policy_info(name, prob, level, replace_value, augmentation_params):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_params)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
# pytype:disable=wrong-arg-types
if 'replace' in inspect.getfullargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getfullargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# pytype:enable=wrong-arg-types
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getfullargspec(func)[0]:
prob = 1.0
# pytype:enable=wrong-arg-types
# Apply the function with probability `prob`.
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
augmented_image = tf.cond(
should_apply_op,
lambda: func(image, *args),
lambda: image)
return augmented_image
def select_and_apply_random_policy(policies, image):
"""Select a random policy from `policies` and apply it to `image`."""
policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# Note that using tf.case instead of tf.conds would result in significantly
# larger graphs and would even break export for some larger policies.
for (i, policy) in enumerate(policies):
image = tf.cond(
tf.equal(i, policy_to_select),
lambda selected_policy=policy: selected_policy(image),
lambda: image)
return image
def build_and_apply_nas_policy(policies, image,
augmentation_params):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
augmentation_params: Params associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function.
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_params]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_):
for func, prob, args in tf_policy_:
image_ = _apply_func_with_prob(
func, image_, args, prob)
return image_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_image = select_and_apply_random_policy(
tf_policies, image)
return augmented_image
def distort_image_with_autoaugment(image, augmentation_name):
"""Applies the AutoAugment policy to `image`.
AutoAugment is from the paper: https://arxiv.org/abs/1805.09501.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image`.
"""
available_policies = {'v0': policy_v0,
'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Params that will be used for AutoAugment.
augmentation_params = hparams.Config(cutout_const=100, translate_const=250)
return build_and_apply_nas_policy(policy, image, augmentation_params)
def distort_image_with_randaugment(image, num_layers, magnitude):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
logging.info('Using RandAug.')
augmentation_params = hparams.Config(cutout_const=40, translate_const=100)
available_ops = [
'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize',
'Solarize', 'Color', 'Contrast', 'Brightness', 'Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd']
for layer_num in range(num_layers):
op_to_select = tf.random_uniform(
[], maxval=len(available_ops), dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
prob = tf.random_uniform([], minval=0.2, maxval=0.8, dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, augmentation_params)
image = tf.cond(
tf.equal(i, op_to_select),
lambda selected_func=func, selected_args=args: selected_func(
image, *selected_args),
lambda: image)
return image
def distort_image(image, aug_name, ra_num_layers, ra_magnitude):
"""Distort image with aa/ra policies."""
logging.info('Apply AutoAugment policy %s', aug_name)
if aug_name == 'autoaug':
image = distort_image_with_autoaugment(image, 'v0')
elif aug_name == 'randaug':
image = distort_image_with_randaugment(image, ra_num_layers, ra_magnitude)
elif aug_name == 'ra_aa':
i = tf.random_uniform([], maxval=2, dtype=tf.int32)
image = tf.cond(
tf.equal(i, 0), lambda: distort_image_with_autoaugment(image, 'v0'),
lambda: image)
image = tf.cond(
tf.equal(i, 1), lambda: distort_image_with_randaugment(
image, ra_num_layers, ra_magnitude), lambda: image)
else:
raise ValueError('Invalid value for aug_name: %s' % (aug_name))
return image
|
|
import syslog
from datetime import datetime
import time
import re
import sys
class APIKeys(object):
def __init__ (self, dbh, debug):
self.dbh = dbh
self.debug = debug
self.table = self.dbh.table('apikeys')
self.currow = None
self.updateable_row_names = ['alias', 'restrictedAccess', 'writeAccess', 'description', 'expires', 'revoked', 'parent']
def L(self, msg):
caller = ".".join([str(__name__), sys._getframe(1).f_code.co_name])
if self.debug != None:
print caller + ": " + msg
else:
syslog.syslog(caller + ": " + msg)
def totimestamp(dt, epoch=datetime(1970,1,1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 1e6
def is_valid (self, apikey):
"""
If the given key is found in the table, and is not expired or revoked, return true.
b:revoked 'f'
b:expires is either 'never' or < current time
"""
if apikey != None:
row = self.table.row(apikey, columns = ['b:expires', 'b:revoked'])
if row != {}:
if row['b:revoked'] == None or row['b:revoked'] == "False":
if row['b:expires'] == "never" or row['b:expires'] > time.time():
return True
else:
self.L(apikey + " is expired")
else:
self.L(apikey + " is revoked")
else:
self.L(apikey + " not found ")
else:
self.L("no key given")
return False
def is_revoked(self):
if self.exists() == True:
if 'b:revoked' in self.currow:
if self.currow['b:revoked'] == 'False':
return False
return False
def is_expired(self):
if self.exists() == True:
if 'b:expires' in self.currow:
if self.currow['b:expires'] == 'never':
return True
if int(self.currow['b:expires']) > time.time():
return True
return False
def exists(self):
if currow == None or currow == {}:
return False
return True
def get_by_alias(self, alias):
"""
lookup where rowkey = alias, returns a string column b:apikey (the key itself) or None
use get_by_key() to retrieve full key details
"""
if alias != None:
row = self.table.row(alias, columns = ['b:apikey'])
if row != {}:
return row['b:apikey']
else:
self.L("no key for alias " + alias)
return None
def get_by_key(self, apikey):
"""
lookup where rowkey = key, return a dictionary of all columns:values
or an empty dict
"""
self.L(apikey)
rv = {}
if apikey != None:
row = self.table.row(apikey)
rv = self.row_to_rv(row)
if rv != {}:
self.currow = row
else:
self.L("no key " + apikey)
self.currow = {}
return rv
def get_groups_by_key(self, apikey):
"""
lookup the record matching apikey, return a list of groups the key is a member of
to find the default group, use get_by_key()
returns an empty list on fail
"""
self.L(apikey)
rv = []
if apikey != None:
row = self.table.row(apikey)
if row != {}:
self.currow = row
for key, value in row.iteritems():
m = re.match('grp:(.*)', key)
if m != None:
rv.append(m.group(1))
return rv
def row_to_rv(self, row):
"""
given a row from the db, translate its values into something standardized we can
stuff into a protocol buffer. eg hbase values are typeless, we want to type (some of) them.
"""
rv = {}
if row != {}:
if 'b:alias' in row:
rv['alias'] = row['b:alias']
else:
rv['alias'] = ''
if 'b:description' in row:
rv['description'] = row['b:description']
else:
rv['description'] = ''
if 'b:defaultGroup' in row:
rv['defaultGroup'] = row['b:defaultGroup']
else:
rv['defaultGroup'] = ''
if 'b:expires' not in row or row['b:expires'] == "never":
rv['expires'] = 0
else:
rv['expires'] = int(row['b:expires'])
if 'b:writeAccess' not in row or row['b:writeAccess'] == 'False':
rv['writeAccess'] = False
else:
rv['writeAccess'] = True
if 'b:restrictedAccess' not in row or row['b:restrictedAccess'] == 'False':
rv['restrictedAccess'] = False
else:
rv['restrictedAccess'] = True
if 'b:revoked' not in row or row['b:revoked'] == 'False':
rv['revoked'] = False
else:
rv['revoked'] = True
if 'b:parent' in row:
rv['parent'] = row['b:parent']
else:
rv['parent'] = ''
if 'b:created' in row:
rv['created'] = int(row['b:created'])
else:
rv['created'] = 0
rv['groups'] = {}
rv['restrictions'] = {}
for k in row:
if k.startswith("grp:"):
rv['groups'][k.replace("grp:", "")] = row[k]
return rv
def list_by_key(self, apikey_pattern):
"""
lookup where rowkey =~ apikey_pattern, return a dict of all matches
will NOT return aliases (even tho we store them in the rowkey)
returns an empty dict on fail/no matches
"""
self.L(apikey_pattern)
rv = {}
if apikey_pattern != None:
for key, data in self.table.scan(): #filter="FirstKeyOnlyFilter"):
match = re.search(apikey_pattern, key)
if match != None and 'b:apikey' not in data:
rv[key] = self.row_to_rv(data)
return rv
def add_key(self, apikey_params):
"""
Given a key, add it to the database. If the key exists, returns False, else True on success
add_key({
apikey: ...,
alias: ...,
restrictedAccess: t|f,
writeAccess: t|f,
description: ...,
expires: int...,
revoked: t|f,
groupsList: {groupname, groupid, isdefault=t|f},
restrictionsList: {restrname, restrid},
parent: ...
})
"""
apikey = apikey_params.apikey
self.L(apikey)
kr = self.get_by_key(apikey)
if kr != {}:
raise Exception("Key already exists")
ka = self.get_by_alias(apikey_params.alias)
if ka != None:
raise Exception("Alias already exists")
self.L("key/alias dont exist, looks ok to add")
for fn in self.updateable_row_names:
dbcol = "b:" + fn
val = str(getattr(apikey_params, fn))
kr[dbcol] = str(getattr(apikey_params, fn))
try:
self.table.put(apikey, kr)
except TypeError as e:
print e
self.L("add of main record failed for " + apikey)
raise Exception("Failed to add key to database")
except:
self.table.delete(apikey)
self.L("add failed, unknown error: " + str(sys.exc_info()[0]))
raise Exception("Failed to add key to database: " + str(sys.exc_info()[0]))
try:
if apikey_params.alias != "":
self.table.put(apikey_params.alias, {'b:apikey': apikey})
except TypeError as e:
self.table.delete(apikey) # rollback
self.L("add of alias record failed for " + apikey + " (rolled back main)")
raise Exception("Add of alias to database failed")
except:
self.table.delete(apikey)
self.L("add failed, unknown error: " + str(sys.exc_info()[0]))
raise Exception("Add of alias failed: " + str(sys.exc_info()[0]))
def update_key(self, apikey_params):
"""
Given a key, update it in the database.
All of the fields, except the 'apikey' field, are optional. The specified fields will be merged
into the existing database record.
add_key({
apikey: ...,
alias: ...,
restrictedAccess: t|f,
writeAccess: t|f,
description: ...,
expires: int...,
revoked: t|f,
groupsList: [ {groupname: name, groupid: id, default: t|f} ],
restrictionsList: [ {restrname: name, restrid: id} ],
parent: ...
})
"""
apikey = apikey_params.apikey
self.L(apikey)
kr = self.table.row(apikey)
if kr != {}:
try:
if apikey_params.HasField("alias"):
prev_alias = kr['b:alias']
if apikey_params.alias != "" and prev_alias != apikey_params.alias: # if you want to change the alias
ka = self.get_by_alias(alias)
if ka != None:
raise Exception("Alias already taken")
self.table.put(apikey_params.alias, {'b:apikey': apikey})
self.table.delete(prev_alias)
for fn in self.updateable_row_names:
if apikey_params.HasField(fn):
dbcol = "b:" + fn
colval = str(getattr(apikey_params, fn))
kr[dbcol] = colval
self.table.put(apikey, kr)
if apikey_params.HasField('groupsList'):
self.update_groups(apikey, apikey_params.groupsList)
if apikey_params.HasField('restrictionsList'):
self.update_restrictions(apikey, apikey_params.restrictionsList)
except:
self.L("update failed, unknown error: " + str(sys.exc_info()[0]))
raise Exception("Unknown error: " + str(sys.exc_info()[0]))
raise Exception("Key doesn't exist: " + apikey)
def update_groups(self, apikey, groupsList):
"""
Given a groups list like this: groupsList = [ {groupname: name, groupid: id, default: t|f} ]
add the group name/id to the specified apikey. If the groupid is empty (or unspecified) but
the name is given, we will remove the group from the given apikey.
The group must exist in the groups table before we take any action, otherwise we return
false.
The groupid that the client passes is ignored, and instead we lookup the groupid in the
groups table using the groupname they give us.
"""
if apikey != None and 'groupname' in groupsList:
groupid = self.get_group_by_name(groupsList['groupname'])
if groupid != None:
akr = self.table.get('apikeys', apikey)
if akr != {}:
if 'groupid' in groupsList:
self.table.put('apikeys', apikey, 'grp:' + groupid, groupsList['groupname'])
else:
self.table.delete('apikeys', apikey, 'grp:' + groupid)
else:
raise Exception("Key doesn't exist")
def update_restrictions(self, apikey, restrictionsList):
return True
def create_group(self, groupname, groupid):
if groupname != None and groupid != None:
self.table.put('groups', groupname, 'b:uuid', groupid)
self.table.put('groups', groupid, 'b:name', groupname)
raise Exception("Invalid parameters")
def remove_group(self, groupname, groupid):
if groupname != None and groupid != None:
self.table.delete('groups', groupname)
self.table.delete('groups', groupid)
raise Exception("Invalid parameters")
def get_group_by_name(self, groupName):
if groupName != None:
row = self.table.get('groups', groupName, 'b:uuid')
if row != {} and 'b:uuid' in row:
return row['b:uuid']
return None
def get_group_by_id(self, groupid):
if groupid != None:
row = self.table.get('groups', groupid, 'b:name')
if row != {} and 'b:name' in row:
return row['b:name']
return None
def remove_by_key(self, apikey):
"""
Remove the given key from the database.
"""
self.L(apikey)
kr = self.get_by_key(apikey)
if kr != {}:
try:
if 'alias' in kr and kr['alias'] != '':
print "delete the alias record"
# delete the alias record
self.table.delete(kr['alias'])
else:
print "no alias rec to delete ", kr
self.table.delete(apikey)
except:
self.L("remove failed, unknown error: " + str(sys.exc_info()[0]))
raise Exception("Remove failed: " + str(sys.exc_info()[0]))
def remove_by_alias(self, apikey_alias):
"""
Remove the given key from the database.
"""
self.L(apikey_alias)
apikey = self.get_by_alias(apikey_alias)
if apikey != None:
self.remove_by_key(apikey)
|
|
import copy
import urllib3
import jwt
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings()
PARAMS = demisto.params()
# Remove trailing slash to prevent wrong URL path to service
SERVER = PARAMS['url'][:-1] \
if (PARAMS['url'] and PARAMS['url'].endswith('/')) else PARAMS['url']
# Should we use SSL
USE_SSL = not PARAMS.get('insecure', False)
# Service base URL
BASE_URL = SERVER + '/xapi/v1/'
APPLICATION_ID = PARAMS.get('application_id')
PRIVATE_KEY = PARAMS.get('private_key')
# Headers to be sent in requests
REQUEST_HEADERS = {
'Content-Type': 'application/json'
}
OUTPUTS = {
'get_endpoint_by_id': {
'ID': 'guid',
'Name': 'name',
'Domain': 'domain',
'Platform': 'platform',
'ScanStatus': 'scanStatus',
'Status': 'status',
'IP': 'ip',
'ComputerSid': 'computerSid',
'IsCompromised': 'compromised',
'OsVersion': 'osVersion',
'OsProductType': 'osProductType',
'OsProductName': 'osProductName',
'Is64': 'is64',
'LastSeen': 'lastSeen',
'LastUser': 'lastUser'
},
'endpoint_files_retrieve': {
'OperationID': 'operationId'
},
'endpoint_isolate': {
'OperationID': 'operationId'
},
'endpoint_scan': {
'OperationID': 'operationId'
},
'event_bulk_update_status': {
'EventID': 'eventGuid'
},
'hashes_blacklist_status': {
'SHA256': 'hash',
'BlacklistStatus': 'status'
},
'event_quarantine_result': {
'SHA256': 'fileHash',
'FilePath': 'filePath'
},
'endpoint_scan_result': {
'FileScanned': 'filesScanned',
'FilesFailed': 'filesFailed',
'MalwareFound': 'malwareFound'
}
}
def create_headers(with_auth):
"""Create headers for the http_request
Args:
with_auth: True
Returns:
Headers.
"""
headers = copy.deepcopy(REQUEST_HEADERS)
if with_auth:
token = generate_auth_token().decode('utf-8')
headers['Authorization'] = f'Bearer {token}'
return headers
def extract_and_validate_http_response(response, operation_err_message, test=False):
"""
Args:
response: raw response
operation_err_message: error message to present in case of error
test: boolean value, true if test
Returns:
Error if response is faulty.
"""
try:
response.raise_for_status()
return response.json() if not test else response.content
except requests.exceptions.HTTPError:
try:
err_message = response.json().get('message')
except Exception:
try:
err_obj = json.loads(xml2json(response.text))
err_message = demisto.get(err_obj, 'Error.Message')
except Exception:
err_message = 'Could not parse error'
return_error(f'{operation_err_message}: \n{err_message}')
def http_request(method, url_suffix, plain_url=False, params=None, data=None, operation_err=None, parse_response=True,
with_auth=True):
"""Generic http call to Traps
Args:
method: request method.
url_suffix: URL suffix.
plain_url: full URL.
params: request params.
data: request data.
operation_err: operation error to log the user.
parse_response: boolean value, if parsing the response is needed.
with_auth: boolean value, do we need to authenticate the request.
Returns:
Result from the API.
"""
try:
result = requests.request(
method,
BASE_URL + url_suffix if not plain_url else url_suffix,
verify=USE_SSL,
params=params,
data=json.dumps(data) if data else data,
headers=create_headers(with_auth),
)
except requests.exceptions.ConnectionError:
return_error('Error connecting to Traps server. Please check your connection and you server address')
if parse_response:
result = extract_and_validate_http_response(result, operation_err, plain_url)
return result
def health_check():
"""Performs basic health check on the server.
Returns:
Error if not ok.
"""
path = f'{SERVER}/xapi/health-check'
server_status = http_request('GET', path, plain_url=True).decode('utf-8')
if server_status == '"Ok"':
return
raise Exception(f'Server health-check failed. Status returned was: {server_status}')
def generate_auth_token():
"""Generate a token using jwt.
Returns:
token.
"""
key = PRIVATE_KEY
data = {'appId': APPLICATION_ID}
token = jwt.encode(data, key, algorithm='RS256')
return token
def parse_data_from_response(resp_obj, operation_name=None):
"""Response raw data.
Args:
resp_obj: raw_data.
operation_name: operation name.
Returns:
parsed data.
"""
new_data_obj = {} # type: dict
outputs_obj = OUTPUTS[operation_name]
for key, val in outputs_obj.items():
if val in resp_obj:
new_data_obj[key] = resp_obj.get(val)
return new_data_obj
def get_endpoint_by_id(endpoint_id):
"""Get endpoint data by sending a GET request.
Args:
endpoint_id: endpoint ID.
Returns:
endpoint data.
"""
path = f'agents/{endpoint_id}'
endpoint_data = http_request('GET', path, operation_err=f'Get endpoint {endpoint_id} failed')
return parse_data_from_response(endpoint_data, 'get_endpoint_by_id'), endpoint_data
def endpoint_files_retrieve(endpoint_id, file_name, event_id):
"""Retrieve a file from the endpoint by sending a POST request.
Args:
endpoint_id: endpoint ID.
file_name: File name.
event_id: Event ID.
Returns:
Operation data.
"""
path = f'agents/{endpoint_id}/files-retrieve'
data = {
'incidentId': event_id,
'files': [
{
"path": file_name
}
]
}
resp = http_request('POST', path, data=data,
operation_err=f'Files retrieve command on endpoint {endpoint_id} failed')
operation_obj = parse_data_from_response(resp, 'endpoint_files_retrieve')
operation_obj.update({
'EndpointID': endpoint_id,
'Type': 'files-retrieve'
})
return operation_obj
def endpoint_scan(endpoint_id):
"""Initiate a scan on an endpoint by sending a POST request.
Args:
endpoint_id: endpoint ID.
Returns:
Operation data.
"""
path = f'agents/{endpoint_id}/scan'
response = http_request('POST', path, operation_err=f'Scanning endpoint: {endpoint_id} failed')
operation_obj = parse_data_from_response(response, 'endpoint_scan')
operation_obj.update({
'EndpointID': endpoint_id,
'Type': 'endpoint-scan'
})
return operation_obj, response
def endpoint_scan_result(operation_id):
"""Initiate the SAM operation for retrieving the scan result.
Args:
operation_id: operation ID.
Returns:
scan data.
"""
status, additional_data = sam_operation(operation_id, 'Could not get scan results')
scan_data = parse_data_from_response(additional_data.get('scanData'),
'endpoint_scan_result') if additional_data else {}
scan_data['Status'] = status
scan_data['OperationID'] = operation_id
return scan_data
def update_event_status(event_ids, status):
"""Update event or events status by sending a POST request.
Args:
event_ids: event IDs.
status: status.
Returns:
API response.
"""
path = 'events/status'
data = {
"guids": event_ids,
"status": status
}
response = http_request('PATCH', path, data=data, operation_err=f'Update events {event_ids} status failed')
return response
def update_event_comment(event_id, comment):
"""Update event comment by sending a POST request.
Args:
event_id: event ID.
comment: comment.
"""
path = f'events/{event_id}/comment'
data = {
"comment": comment
}
http_request('POST', path, data=data, operation_err=f'Update event: {event_id} comment failed')
def event_update(event_id, status, comment):
"""Initiate update of an event.
Args:
event_id: event ID.
status: status.
comment: comment.
Returns:
Error if not successful.
"""
if not status and not comment:
raise Exception('Please add a status or a comment. Neither was given.')
if status:
response = update_event_status([event_id], status)
if response.get('failed'):
raise Exception(f'Update status for event: {event_id} has failed.')
if comment:
update_event_comment(event_id, comment)
def event_bulk_update_status(event_ids, status):
"""Initiate update of events statuses.
Args:
event_ids: event IDs.
status: status.
Returns:
Update statuses.
"""
ids_obj = update_event_status(event_ids, status)
results = {
'UpdateSuccess': list(
map(lambda id_obj: parse_data_from_response(id_obj, 'event_bulk_update_status'), ids_obj.get('succeeded'))),
'UpdateFail': list(
map(lambda id_obj: parse_data_from_response(id_obj, 'event_bulk_update_status'), ids_obj.get('failed'))),
'UpdateIgnored': list(
map(lambda id_obj: parse_data_from_response(id_obj, 'event_bulk_update_status'), ids_obj.get('ignored')))
}
return results
def hash_blacklist(hash_id):
"""Add hash to a blacklist by sending a POST request.
Args:
hash_id: Hash.
Returns:
status.
"""
path = f'hashes/{hash_id}/blacklist'
result = http_request('POST', path, operation_err=f'Failed to blacklist {hash_id}')
return result.get('status')
def remove_hash_from_blacklist(hash_id):
"""Remove hash to a blacklist by sending a POST request.
Args:
hash_id: Hash.
Returns:
status.
"""
path = f'hashes/{hash_id}/blacklist-remove'
result = http_request('POST', path, operation_err=f'Failed to remove {hash_id} from blacklist')
return result.get('status')
def hashes_blacklist_status(hash_ids):
"""Get hashes blacklisting status by sending a POST request.
Args:
hash_ids: Hashes.
Returns:
Hashes and blacklisting data.
"""
path = 'hashes/blacklist-status'
data = {
'hashes': hash_ids
}
ids_obj = http_request('POST', path, data=data, operation_err='Failed to get hashes status')
result = list(map(lambda id_obj: parse_data_from_response(id_obj, 'hashes_blacklist_status'), ids_obj))
return result
def event_quarantine(event_id):
"""Quarantine an event by sending a POST request.
Args:
event_id: event ID.
Returns:
Data regarding the event and the quarantine operation.
"""
path = f'events/{event_id}/quarantine'
resp = http_request('POST', path, operation_err=f'Quarantine event {event_id} failed')
message_ids = resp.get('operationId').get('samMessageIds')
operations = []
for op_id in message_ids:
operations.append({
'EventID': event_id,
'Type': 'event-quarantine',
'OperationID': op_id
})
return operations
def endpoint_isolate(endpoint_id):
"""Isolate an endpoint by sending a POST request.
Args:
endpoint_id: endpoint ID.
Returns:
Data regarding the endpoint and the isolation operation.
"""
path = f'agents/{endpoint_id}/isolate'
resp = http_request('POST', path, operation_err=f'Isolation of endpoint: {endpoint_id} failed')
operation_obj = parse_data_from_response(resp, 'endpoint_isolate')
operation_obj.update({
'EndpointID': endpoint_id,
'Type': 'endpoint-isolate'
})
return operation_obj
def sam_operation(operation_id, operation_err):
"""
This functions invokes an API call to the sam operation endpoint on Traps server to get the operation status and/or
results.
:param operation_id: the operation on which to get the status/results
:param operation_err: The error to return in case of a failure (changes according to the command fired.)
:return:
status: the status of the operation.
additional_data: additional data regarding the operation (like scan results)
"""
path = f'sam/operations/{operation_id}'
result = http_request('GET', path, operation_err=operation_err)
summary_data = result.get('summaryData')
if summary_data and (summary_data.get('incompatible') or summary_data.get('samExists')):
if operation_err == 'Could not get scan results': # Get scan result
requested_scans = int(summary_data.get('requested', 0))
incompatible_scans = int(summary_data.get('incompatible', 0))
sam_exists_scans = int(summary_data.get('samExists', 0))
if requested_scans <= incompatible_scans + sam_exists_scans:
raise Exception(f'{operation_err}.\nRequested scans number: {requested_scans}.\n'
f'Incompatible scans number: {requested_scans}.\n'
f'Sam exists scans number: {sam_exists_scans}.')
raise Exception(f'{operation_err}')
if result.get('summaryData').get('samExists'):
return 'ignored', None
for status_obj in result.get('statuses'):
if status_obj.get('count') > 0:
return status_obj.get('status'), result.get('additionalData')
raise Exception(f'{operation_err}: Could not retrieve status')
def endpoint_isolate_status(operation_id):
"""Initiate the SAM operation for endpoint isolation status.
Args:
operation_id: operation ID.
Returns:
endpoint status, operation ID
"""
status, _ = sam_operation(operation_id, 'Could not get endpoint isolate status')
return {'Status': status, 'OperationID': operation_id}
def event_quarantine_result(operation_id):
"""Initiate the SAM operation for quarantine result.
Args:
operation_id: operation ID.
Returns:
quarantine data.
"""
status, additional_data = sam_operation(operation_id, 'Could not get event quarantine status')
quarantine_data = parse_data_from_response(additional_data.get('quarantineData'),
'event_quarantine_result') if additional_data else {}
quarantine_data['Status'] = status
quarantine_data['OperationID'] = operation_id
return quarantine_data
def endpoint_files_retrieve_result(operation_id):
"""Initiate the SAM operation for file retrieving.
Args:
operation_id: operation ID.
Returns:
file as an entry, status, operation_id.
"""
status, additional_data = sam_operation(operation_id, 'Failed to get file retrieve results')
if status == 'finished':
file_info = additional_data.get('uploadData')
file_name = file_info.get('fileName')
url = file_info.get('downloadUrl')
data = http_request('GET', url, plain_url=True, operation_err='Unable to download file.', with_auth=False)
demisto.results(fileResult(filename=file_name, data=data))
return {'Status': status, 'OperationID': operation_id}
def test_module_command():
"""Performs basic GET request to check if the API is reachable and authentication is successful.
Returns:
ok if successful.
"""
health_check()
result = http_request('GET', 'agents/1', parse_response=False)
if result.status_code == 403:
raise Exception('Error connecting to server. Check your Application ID and Private key')
demisto.results('ok')
def get_endpoint_by_id_command():
"""Get endpoint data.
Returns:
Demisto Outputs.
"""
args = demisto.args()
endpoint_id = args.get('endpoint_id')
endpoint_data, raw_data = get_endpoint_by_id(endpoint_id)
human_readable = tableToMarkdown(f'Endpoint {endpoint_id} data:', endpoint_data, headerTransform=pascalToSpace)
context = {'Traps.Endpoint(val.ID == obj.ID)': createContext(endpoint_data)}
return_outputs(human_readable, context, raw_response=raw_data)
def endpoint_files_retrieve_command():
"""Initiate retrieving of files from an endpoint.
Returns:
Demisto Outputs.
"""
args = demisto.args()
endpoint_id = args.get('endpoint_id')
file_name = args.get('file_name')
event_id = args.get('event_id')
operation_obj = endpoint_files_retrieve(endpoint_id, file_name, event_id)
human_readable = tableToMarkdown(f'Files retrieve command on endpoint: {endpoint_id} received', operation_obj,
headerTransform=pascalToSpace)
context = {'Traps.FileRetrieve(val.OperationID == obj.OperationID)': operation_obj}
return_outputs(human_readable, context, operation_obj)
def endpoint_files_retrieve_result_command():
"""Retrieve files from an endpoint result.
Returns:
Demisto Outputs.
"""
args = demisto.args()
operation_id = args.get('operation_id')
status_obj = endpoint_files_retrieve_result(operation_id)
human_readable = f'### File retrieval status is: {status_obj.get("Status")}'
context = {'Traps.FileRetrieveResult(val.OperationID == obj.OperationID)': status_obj}
return_outputs(human_readable, context)
def endpoint_scan_command():
"""Initiate scan on an endpoint.
Returns:
Demisto Outputs.
"""
args = demisto.args()
endpoint_id = args.get('endpoint_id')
# check that running a scan is possible
_, raw_data = get_endpoint_by_id(endpoint_id)
scan_status = raw_data.get('scanStatus')
if scan_status and scan_status in ['pending', 'in_progress']:
raise Exception(f'Could not initiate a scan on the endpoint {endpoint_id}'
f' because endpoint scan status is {scan_status}.')
operation_obj, raw_data = endpoint_scan(endpoint_id)
human_readable = tableToMarkdown(f'Scan command on endpoint: {endpoint_id} received', operation_obj,
headerTransform=pascalToSpace)
context = {'Traps.Scan(val.OperationID == obj.OperationID)': operation_obj}
return_outputs(human_readable, context, raw_data)
def endpoint_scan_result_command():
"""Retrieve endpoint scan results.
Returns:
Demisto Outputs.
"""
args = demisto.args()
operation_id = args.get('operation_id')
status_obj = endpoint_scan_result(operation_id)
context = {'Traps.ScanResult(val.OperationID == obj.OperationID)': status_obj}
human_readable = tableToMarkdown(f'Status of scan operation: {operation_id}', status_obj,
headerTransform=pascalToSpace)
return_outputs(human_readable, context, status_obj)
def event_update_command():
"""Update an event.
Returns:
Demisto Outputs.
"""
args = demisto.args()
event_id = args.get('event_id')
status = args.get('status')
comment = args.get('comment')
event_update(event_id, status, comment)
human_readable = f'### Event: {event_id} was updated'
human_readable += f'\n##### New status: {status}' if status else ''
human_readable += f'\n##### New comment: {comment}' if comment else ''
return_outputs(human_readable, None, {})
def event_bulk_update_status_command():
"""Update events.
Returns:
Demisto Outputs.
"""
args = demisto.args()
event_ids = argToList(args.get('event_ids'))
status = args.get('status')
results = event_bulk_update_status(event_ids, status)
human_readable = tableToMarkdown('Successfully updated', results.get('UpdateSuccess'),
headerTransform=pascalToSpace)
human_readable += tableToMarkdown('Failed to update', results.get('UpdateFail'), headerTransform=pascalToSpace)
human_readable += tableToMarkdown('Ignored', results.get('UpdateIgnored'), headerTransform=pascalToSpace)
return_outputs(human_readable, {}, {})
def event_quarantine_command():
"""Quarantine an event.
Returns:
Demisto Outputs.
"""
args = demisto.args()
event_id = args.get('event_id')
operations = event_quarantine(event_id)
human_readable = tableToMarkdown(f'Quarantine command on event: {event_id} received', operations,
headerTransform=pascalToSpace)
context = {'Traps.Quarantine(val.OperationID == obj.OperationID)': operations}
return_outputs(human_readable, context, operations)
def event_quarantine_result_command():
"""Check quarantine event status.
Returns:
Demisto Outputs.
"""
args = demisto.args()
operation_id = args.get('operation_id')
status_obj = event_quarantine_result(operation_id)
context = {'Traps.QuarantineResult(val.OperationID == obj.OperationID)': status_obj}
human_readable = tableToMarkdown(f'Status of quarantine operation: {operation_id}',
status_obj, headerTransform=pascalToSpace)
return_outputs(human_readable, context, status_obj)
def hash_blacklist_command():
"""Add a hash to a blacklist.
Returns:
Demisto Outputs.
"""
args = demisto.args()
hash_id = args.get('hash_id')
status = hash_blacklist(hash_id)
context = {} # type: dict
if status == 'success':
human_readable = f'#### Successfully blacklisted: {hash_id}'
status_obj = {
'SHA256': hash_id,
'BlacklistStatus': 'blacklisted'
}
context = {'Traps.File(val.SHA256 == obj.SHA256)': status_obj}
elif status == 'ignore':
human_readable = f'#### Hash: {hash_id} already appears in blacklist'
else:
human_readable = f'#### Failed to blacklist: {hash_id}'
return_outputs(human_readable, context, status)
def hash_blacklist_remove_command():
"""Remove a hash from blacklist.
Returns:
Demisto Outputs.
"""
args = demisto.args()
hash_id = args.get('hash_id')
status = remove_hash_from_blacklist(hash_id)
context = {} # type: dict
if status == 'success':
human_readable = f'#### Successfully removed {hash_id} from blacklist'
status_obj = {
'SHA256': hash_id,
'BlacklistStatus': 'none'
}
context = {'Traps.File(val.SHA256 == obj.SHA256)': status_obj}
else:
human_readable = f'#### Failed to remove {hash_id} from blacklist:'
return_outputs(human_readable, context, status)
def hashes_blacklist_status_command():
"""Check hash blacklist status.
Returns:
Demisto Outputs.
"""
args = demisto.args()
hash_ids = args.get('hash_ids').split(',')
ids_obj = hashes_blacklist_status(hash_ids)
human_readable = tableToMarkdown('Hashes status:', ids_obj, headerTransform=pascalToSpace)
context = {'Traps.File(val.SHA256 == obj.SHA256)': ids_obj}
return_outputs(human_readable, context, ids_obj)
def endpoint_isolate_command():
"""Isolate an endpoint.
Returns:
Demisto Outputs.
"""
args = demisto.args()
endpoint_id = args.get('endpoint_id')
operation_obj = endpoint_isolate(endpoint_id)
human_readable = tableToMarkdown(f'Isolate command on endpoint {endpoint_id} received', operation_obj,
headerTransform=pascalToSpace)
context = {'Traps.Isolate(val.OperationID == obj.OperationID)': operation_obj}
return_outputs(human_readable, context, operation_obj)
def endpoint_isolate_status_command():
"""Check endpoint isolation status.
Returns:
Demisto Outputs.
"""
args = demisto.args()
operation_id = args.get('operation_id')
isolate_status = endpoint_isolate_status(operation_id)
human_readable = f'### Isolate status is: {isolate_status.get("Status")}'
context = {'Traps.IsolateResult(val.OperationID == obj.OperationID)': isolate_status}
return_outputs(human_readable, context, isolate_status)
def main():
"""
Initiate integration command
"""
# Remove proxy if not set to true in params
handle_proxy()
command = demisto.command()
LOG(f'Command being called is {command}.')
try:
if command == 'test-module':
test_module_command()
elif command == 'traps-get-endpoint-by-id':
get_endpoint_by_id_command()
elif command == 'traps-endpoint-files-retrieve':
endpoint_files_retrieve_command()
elif command == 'traps-endpoint-files-retrieve-result':
endpoint_files_retrieve_result_command()
elif command == 'traps-endpoint-scan':
endpoint_scan_command()
elif command == 'traps-endpoint-scan-result':
endpoint_scan_result_command()
elif command == 'traps-event-update':
event_update_command()
elif command == 'traps-event-bulk-update-status':
event_bulk_update_status_command()
elif command == 'traps-hash-blacklist':
hash_blacklist_command()
elif command == 'traps-hash-blacklist-remove':
hash_blacklist_remove_command()
elif command == 'traps-hashes-blacklist-status':
hashes_blacklist_status_command()
elif command == 'traps-event-quarantine':
event_quarantine_command()
elif command == 'traps-event-quarantine-result':
event_quarantine_result_command()
elif command == 'traps-endpoint-isolate':
endpoint_isolate_command()
elif command == 'traps-endpoint-isolate-status':
endpoint_isolate_status_command()
else:
raise NotImplementedError(f'Command {command} was not implemented.')
except Exception as err:
return_error(err)
if __name__ in ["__builtin__", "builtins", "__main__"]:
main()
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=invalid-overridden-method
import functools
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, Dict, List,
TYPE_CHECKING
)
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
from azure.core.pipeline import AsyncPipeline
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.async_paging import AsyncItemPaged
from .._shared.models import LocationMode
from .._shared.policies_async import ExponentialRetry
from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
from .._shared.response_handlers import return_response_headers, process_storage_error
from .._shared.parser import _to_utc_datetime
from .._shared.response_handlers import parse_to_internal_user_delegation_key
from .._generated.aio import AzureBlobStorage
from .._generated.models import StorageServiceProperties, KeyInfo
from .._blob_service_client import BlobServiceClient as BlobServiceClientBase
from ._container_client_async import ContainerClient
from ._blob_client_async import BlobClient
from .._models import ContainerProperties
from .._deserialize import service_stats_deserialize, service_properties_deserialize
from .._serialize import get_api_version
from ._models import ContainerPropertiesPaged, FilteredBlobPaged
if TYPE_CHECKING:
from datetime import datetime
from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey
from ._lease_async import BlobLeaseClient
from .._models import (
BlobProperties,
PublicAccess,
BlobAnalyticsLogging,
Metrics,
CorsRule,
RetentionPolicy,
StaticWebsite,
)
class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase):
"""A client to interact with the Blob Service at the account level.
This client provides operations to retrieve and configure the account properties
as well as list, create and delete containers within the account.
For operations relating to a specific container or blob, clients for those entities
can also be retrieved using the `get_client` functions.
:param str account_url:
The URL to the blob storage account. Any other entities included
in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
authenticated with a SAS token.
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential from azure.core.credentials, an account
shared access key, or an instance of a TokenCredentials class from azure.identity.
If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
- except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
:keyword str api_version:
The Storage API version to use for requests. Default value is '2019-07-07'.
Setting to an older version may result in reduced feature compatibility.
.. versionadded:: 12.2.0
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
:keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
:keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
:keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
:keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
or 4MB.
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_authentication_async.py
:start-after: [START create_blob_service_client]
:end-before: [END create_blob_service_client]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with account url and credential.
.. literalinclude:: ../samples/blob_samples_authentication_async.py
:start-after: [START create_blob_service_client_oauth]
:end-before: [END create_blob_service_client_oauth]
:language: python
:dedent: 8
:caption: Creating the BlobServiceClient with Azure Identity credentials.
"""
def __init__(
self, account_url, # type: str
credential=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> None
kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
super(BlobServiceClient, self).__init__(
account_url,
credential=credential,
**kwargs)
self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
default_api_version = self._client._config.version # pylint: disable=protected-access
self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access
self._loop = kwargs.get('loop', None)
@distributed_trace_async
async def get_user_delegation_key(self, key_start_time, # type: datetime
key_expiry_time, # type: datetime
**kwargs # type: Any
):
# type: (...) -> UserDelegationKey
"""
Obtain a user delegation key for the purpose of signing SAS tokens.
A token credential must be present on the service object for this request to succeed.
:param ~datetime.datetime key_start_time:
A DateTime value. Indicates when the key becomes valid.
:param ~datetime.datetime key_expiry_time:
A DateTime value. Indicates when the key stops being valid.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: The user delegation key.
:rtype: ~azure.storage.blob.UserDelegationKey
"""
key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
timeout = kwargs.pop('timeout', None)
try:
user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info,
timeout=timeout,
**kwargs) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore
@distributed_trace_async
async def get_account_information(self, **kwargs):
# type: (Any) -> Dict[str, str]
"""Gets information related to the storage account.
The information can also be retrieved if the user has a SAS to a container or blob.
The keys in the returned dictionary include 'sku_name' and 'account_kind'.
:returns: A dict of account information (SKU and account type).
:rtype: dict(str, str)
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START get_blob_service_account_info]
:end-before: [END get_blob_service_account_info]
:language: python
:dedent: 12
:caption: Getting account information for the blob service.
"""
try:
return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_service_stats(self, **kwargs):
# type: (Any) -> Dict[str, Any]
"""Retrieves statistics related to replication for the Blob service.
It is only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:return: The blob service stats.
:rtype: Dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START get_blob_service_stats]
:end-before: [END get_blob_service_stats]
:language: python
:dedent: 12
:caption: Getting service stats for the blob service.
"""
timeout = kwargs.pop('timeout', None)
try:
stats = await self._client.service.get_statistics( # type: ignore
timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
return service_stats_deserialize(stats)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_service_properties(self, **kwargs):
# type: (Any) -> Dict[str, Any]
"""Gets the properties of a storage account's Blob service, including
Azure Storage Analytics.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An object containing blob service properties such as
analytics logging, hour/minute metrics, cors rules, etc.
:rtype: Dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START get_blob_service_properties]
:end-before: [END get_blob_service_properties]
:language: python
:dedent: 12
:caption: Getting service properties for the blob service.
"""
timeout = kwargs.pop('timeout', None)
try:
service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
return service_properties_deserialize(service_props)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def set_service_properties(
self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging]
hour_metrics=None, # type: Optional[Metrics]
minute_metrics=None, # type: Optional[Metrics]
cors=None, # type: Optional[List[CorsRule]]
target_version=None, # type: Optional[str]
delete_retention_policy=None, # type: Optional[RetentionPolicy]
static_website=None, # type: Optional[StaticWebsite]
**kwargs
):
# type: (...) -> None
"""Sets the properties of a storage account's Blob service, including
Azure Storage Analytics.
If an element (e.g. analytics_logging) is left as None, the
existing settings on the service for that functionality are preserved.
:param analytics_logging:
Groups the Azure Analytics Logging settings.
:type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
:param hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for blobs.
:type hour_metrics: ~azure.storage.blob.Metrics
:param minute_metrics:
The minute metrics settings provide request statistics
for each minute for blobs.
:type minute_metrics: ~azure.storage.blob.Metrics
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service.
:type cors: list[~azure.storage.blob.CorsRule]
:param str target_version:
Indicates the default version to use for requests if an incoming
request's version is not specified.
:param delete_retention_policy:
The delete retention policy specifies whether to retain deleted blobs.
It also specifies the number of days and versions of blob to keep.
:type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
:param static_website:
Specifies whether the static website feature is enabled,
and if yes, indicates the index document and 404 error document to use.
:type static_website: ~azure.storage.blob.StaticWebsite
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START set_blob_service_properties]
:end-before: [END set_blob_service_properties]
:language: python
:dedent: 12
:caption: Setting service properties for the blob service.
"""
if all(parameter is None for parameter in [
analytics_logging, hour_metrics, minute_metrics, cors,
target_version, delete_retention_policy, static_website]):
raise ValueError("set_service_properties should be called with at least one parameter")
props = StorageServiceProperties(
logging=analytics_logging,
hour_metrics=hour_metrics,
minute_metrics=minute_metrics,
cors=cors,
default_service_version=target_version,
delete_retention_policy=delete_retention_policy,
static_website=static_website
)
timeout = kwargs.pop('timeout', None)
try:
await self._client.service.set_properties(props, timeout=timeout, **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace
def list_containers(
self, name_starts_with=None, # type: Optional[str]
include_metadata=False, # type: Optional[bool]
**kwargs
):
# type: (...) -> AsyncItemPaged[ContainerProperties]
"""Returns a generator to list the containers under the specified account.
The generator will lazily follow the continuation tokens returned by
the service and stop when all containers have been returned.
:param str name_starts_with:
Filters the results to return only containers whose names
begin with the specified prefix.
:param bool include_metadata:
Specifies that container metadata to be returned in the response.
The default value is `False`.
:keyword bool include_deleted:
Specifies that deleted containers to be returned in the response. This is for container restore enabled
account. The default value is `False`.
.. versionadded:: 12.4.0
:keyword int results_per_page:
The maximum number of container names to retrieve per API
call. If the request does not specify the server will return up to 5,000 items.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) of ContainerProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START bsc_list_containers]
:end-before: [END bsc_list_containers]
:language: python
:dedent: 16
:caption: Listing the containers in the blob service.
"""
include = ['metadata'] if include_metadata else []
include_deleted = kwargs.pop('include_deleted', None)
if include_deleted:
include.append("deleted")
timeout = kwargs.pop('timeout', None)
results_per_page = kwargs.pop('results_per_page', None)
command = functools.partial(
self._client.service.list_containers_segment,
prefix=name_starts_with,
include=include,
timeout=timeout,
**kwargs)
return AsyncItemPaged(
command,
prefix=name_starts_with,
results_per_page=results_per_page,
page_iterator_class=ContainerPropertiesPaged
)
@distributed_trace
def find_blobs_by_tags(self, filter_expression, **kwargs):
# type: (str, **Any) -> AsyncItemPaged[FilteredBlob]
"""The Filter Blobs operation enables callers to list blobs across all
containers whose tags match a given search expression. Filter blobs
searches across all containers within a storage account but can be
scoped within the expression to a single container.
:param str filter_expression:
The expression to find blobs whose tags matches the specified condition.
eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
To specify a container, eg. "@container='containerName' and \"Name\"='C'"
:keyword int results_per_page:
The max result per page when paginating.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) response of BlobProperties.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob]
"""
results_per_page = kwargs.pop('results_per_page', None)
timeout = kwargs.pop('timeout', None)
command = functools.partial(
self._client.service.filter_blobs,
where=filter_expression,
timeout=timeout,
**kwargs)
return AsyncItemPaged(
command, results_per_page=results_per_page,
page_iterator_class=FilteredBlobPaged)
@distributed_trace_async
async def create_container(
self, name, # type: str
metadata=None, # type: Optional[Dict[str, str]]
public_access=None, # type: Optional[Union[PublicAccess, str]]
**kwargs
):
# type: (...) -> ContainerClient
"""Creates a new container under the specified account.
If the container with the same name already exists, a ResourceExistsError will
be raised. This method returns a client with which to interact with the newly
created container.
:param str name: The name of the container to create.
:param metadata:
A dict with name-value pairs to associate with the
container as metadata. Example: `{'Category':'test'}`
:type metadata: dict(str, str)
:param public_access:
Possible values include: 'container', 'blob'.
:type public_access: str or ~azure.storage.blob.PublicAccess
:keyword container_encryption_scope:
Specifies the default encryption scope to set on the container and use for
all future writes.
.. versionadded:: 12.2.0
:paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob.aio.ContainerClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START bsc_create_container]
:end-before: [END bsc_create_container]
:language: python
:dedent: 16
:caption: Creating a container in the blob service.
"""
container = self.get_container_client(name)
timeout = kwargs.pop('timeout', None)
kwargs.setdefault('merge_span', True)
await container.create_container(
metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
return container
@distributed_trace_async
async def delete_container(
self, container, # type: Union[ContainerProperties, str]
lease=None, # type: Optional[Union[BlobLeaseClient, str]]
**kwargs
):
# type: (...) -> None
"""Marks the specified container for deletion.
The container and any blobs contained within it are later deleted during garbage collection.
If the container is not found, a ResourceNotFoundError will be raised.
:param container:
The container to delete. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.ContainerProperties
:param lease:
If specified, delete_container only succeeds if the
container's lease is active and matches this ID.
Required if the container has an active lease.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START bsc_delete_container]
:end-before: [END bsc_delete_container]
:language: python
:dedent: 16
:caption: Deleting a container in the blob service.
"""
container = self.get_container_client(container) # type: ignore
kwargs.setdefault('merge_span', True)
timeout = kwargs.pop('timeout', None)
await container.delete_container( # type: ignore
lease=lease,
timeout=timeout,
**kwargs)
@distributed_trace_async
async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs):
# type: (str, str, str, **Any) -> ContainerClient
"""Restores soft-deleted container.
Operation will only be successful if used within the specified number of days
set in the delete retention policy.
.. versionadded:: 12.4.0
This operation was introduced in API version '2019-12-12'.
:param str deleted_container_name:
Specifies the name of the deleted container to restore.
:param str deleted_container_version:
Specifies the version of the deleted container to restore.
:keyword str new_name:
The new name for the deleted container to be restored to.
If not specified deleted_container_name will be used as the restored container name.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: ~azure.storage.blob.aio.ContainerClient
"""
new_name = kwargs.pop('new_name', None)
container = self.get_container_client(new_name or deleted_container_name)
try:
await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access
deleted_container_version=deleted_container_version,
timeout=kwargs.pop('timeout', None), **kwargs)
return container
except HttpResponseError as error:
process_storage_error(error)
def get_container_client(self, container):
# type: (Union[ContainerProperties, str]) -> ContainerClient
"""Get a client to interact with the specified container.
The container need not already exist.
:param container:
The container. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.ContainerProperties
:returns: A ContainerClient.
:rtype: ~azure.storage.blob.aio.ContainerClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START bsc_get_container_client]
:end-before: [END bsc_get_container_client]
:language: python
:dedent: 12
:caption: Getting the container client to interact with a specific container.
"""
try:
container_name = container.name
except AttributeError:
container_name = container
_pipeline = AsyncPipeline(
transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
return ContainerClient(
self.url, container_name=container_name,
credential=self.credential, api_version=self.api_version, _configuration=self._config,
_pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function, loop=self._loop)
def get_blob_client(
self, container, # type: Union[ContainerProperties, str]
blob, # type: Union[BlobProperties, str]
snapshot=None # type: Optional[Union[Dict[str, Any], str]]
):
# type: (...) -> BlobClient
"""Get a client to interact with the specified blob.
The blob need not already exist.
:param container:
The container that the blob is in. This can either be the name of the container,
or an instance of ContainerProperties.
:type container: str or ~azure.storage.blob.ContainerProperties
:param blob:
The blob with which to interact. This can either be the name of the blob,
or an instance of BlobProperties.
:type blob: str or ~azure.storage.blob.BlobProperties
:param snapshot:
The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
or a dictionary output returned by
:func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`.
:type snapshot: str or dict(str, Any)
:returns: A BlobClient.
:rtype: ~azure.storage.blob.aio.BlobClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_service_async.py
:start-after: [START bsc_get_blob_client]
:end-before: [END bsc_get_blob_client]
:language: python
:dedent: 16
:caption: Getting the blob client to interact with a specific blob.
"""
try:
container_name = container.name
except AttributeError:
container_name = container
try:
blob_name = blob.name
except AttributeError:
blob_name = blob
_pipeline = AsyncPipeline(
transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
return BlobClient( # type: ignore
self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
credential=self.credential, api_version=self.api_version, _configuration=self._config,
_pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
key_resolver_function=self.key_resolver_function, loop=self._loop)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import shutil
import tempfile
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests.image import fake
from nova import volume
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if FLAGS.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CinderCloudTestCase(test.TestCase):
def setUp(self):
super(CinderCloudTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API',
volumes_dir=vol_tmpdir,
stub_network=True)
def fake_show(meh, context, id):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(compute_scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.api.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
try:
shutil.rmtree(FLAGS.volumes_dir)
except OSError, e:
pass
self.volume_api.reset_fake_api(self.context)
super(CinderCloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = self.cloud.create_volume(self.context,
size=1,
name='test-1',
description='test volume 1')
vol2 = self.cloud.create_volume(self.context,
size=1,
name='test-2',
description='test volume 2')
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
[vol1['volumeId']])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self.cloud.delete_volume(self.context, vol2['volumeId'])
def test_create_volume_in_availability_zone(self):
"""Makes sure create_volume works when we specify an availability
zone
"""
availability_zone = 'zone1:host1'
result = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
volume_id = result['volumeId']
availabilityZone = result['availabilityZone']
self.assertEqual(availabilityZone, availability_zone)
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
self.assertEqual(result['volumeSet'][0]['availabilityZone'],
availabilityZone)
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
"""Makes sure create_volume works when we specify a snapshot."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s'
% vol1['volumeId'])
vol2 = self.cloud.create_volume(self.context,
snapshot_id=snap['snapshotId'])
volume1_id = vol1['volumeId']
volume2_id = vol2['volumeId']
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
self.cloud.delete_volume(self.context, volume2_id)
self.cloud.delete_snapshot(self.context, snap['snapshotId'])
self.cloud.delete_volume(self.context, volume1_id)
def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap2 of vol %s' %
vol1['volumeId'])
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 2)
result = self.cloud.describe_snapshots(
self.context,
snapshot_id=[snap2['snapshotId']])
self.assertEqual(len(result['snapshotSet']), 1)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_create_snapshot(self):
"""Makes sure create_snapshot works."""
availability_zone = 'zone1:host1'
result = self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 1)
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
"""Makes sure delete_snapshot works."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.delete_snapshot(self.context,
snapshot_id=snapshot_id)
self.assertTrue(result)
self.cloud.delete_volume(self.context, vol1['volumeId'])
def _block_device_mapping_create(self, instance_uuid, mappings):
volumes = []
for bdm in mappings:
db.block_device_mapping_create(self.context, bdm)
if 'volume_id' in bdm:
values = {'id': bdm['volume_id']}
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
('snapshot_size', 'volume_size'),
('delete_on_termination',
'delete_on_termination')]:
if bdm_key in bdm:
values[vol_key] = bdm[bdm_key]
kwargs = {'name': 'bdmtest-volume',
'description': 'bdm test volume description',
'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached',
'volume_id': values['id']}
vol = self.volume_api.create_with_kwargs(self.context,
**kwargs)
if 'snapshot_id' in values:
self.volume_api.create_snapshot(self.context,
vol,
'snapshot-bdm',
'fake snap for bdm tests',
values['snapshot_id'])
self.volume_api.attach(self.context, vol,
instance_uuid, bdm['device_name'])
volumes.append(vol)
return volumes
def _setUpBlockDeviceMapping(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdb1'})
inst2 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdc1'})
instance_uuid = inst1['uuid']
mappings0 = [
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb1',
'snapshot_id': '1',
'volume_id': '2'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb2',
'volume_id': '3',
'volume_size': 1},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb3',
'delete_on_termination': True,
'snapshot_id': '4',
'volume_id': '5'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb4',
'delete_on_termination': False,
'snapshot_id': '6',
'volume_id': '7'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb5',
'snapshot_id': '8',
'volume_id': '9',
'volume_size': 0},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb6',
'snapshot_id': '10',
'volume_id': '11',
'volume_size': 1},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb7',
'no_device': True},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb8',
'virtual_name': 'swap'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb9',
'virtual_name': 'ephemeral3'}]
volumes = self._block_device_mapping_create(instance_uuid, mappings0)
return (inst1, inst2, volumes)
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
self.volume_api.delete(self.context, vol)
for uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, uuid):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst1['uuid'])
_expected_instance_bdm1 = {
'instanceId': 'i-00000001',
'rootDeviceName': '/dev/sdb1',
'rootDeviceType': 'ebs'}
_expected_block_device_mapping0 = [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '2',
}},
{'deviceName': '/dev/sdb2',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '3',
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'in-use',
'deleteOnTermination': True,
'volumeId': '5',
}},
{'deviceName': '/dev/sdb4',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '7',
}},
{'deviceName': '/dev/sdb5',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '9',
}},
{'deviceName': '/dev/sdb6',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '11', }}]
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
_expected_instance_bdm2 = {
'instanceId': 'i-00000002',
'rootDeviceName': '/dev/sdc1',
'rootDeviceType': 'instance-store'}
def test_format_instance_bdm(self):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = {}
self.cloud._format_instance_bdm(self.context, inst1['uuid'],
'/dev/sdb1', result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['uuid'],
'/dev/sdc1', result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def _assertInstance(self, instance_id):
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
result = self.cloud.describe_instances(self.context,
instance_id=[ec2_instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
result = result['instancesSet'][0]
self.assertEqual(result['instanceId'], ec2_instance_id)
return result
def _assertEqualBlockDeviceMapping(self, expected, result):
self.assertEqual(len(expected), len(result))
for x in expected:
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
self.assertSubDictMatch(x, y)
found = True
break
self.assertTrue(found)
def test_describe_instances_bdm(self):
"""Make sure describe_instances works with root_device_name and
block device mappings
"""
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
self.assertSubDictMatch(self._expected_instance_bdm1, result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
self.assertSubDictMatch(self._expected_instance_bdm2, result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertDictMatch(d1, d2)
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
{'device_name': '/dev/sdb2', 'volume_id': 01234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 01234567}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
kwargs = {'volume_id': 76543210,
'volume_size': 1,
'name': 'test-snap',
'description': 'test snap desc',
'snap_id': bdm['snapshot_id'],
'status': 'available'}
snap = self.volume_api.create_snapshot_with_kwargs(
self.context, **kwargs)
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00053977'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00053977'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00bc614e'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00bc614e'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00053977'}}]
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
#return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1_uuid,
'delete_on_termination': False},
{'device_name': '/dev/vdc',
'volume_id': vol2_uuid,
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
str(vol['id']) == str(vol2_uuid))
if(str(vol['id']) == str(vol1_uuid)):
self.volume_api.attach(self.context, vol,
instance_uuid, '/dev/vdb')
elif(str(vol['id']) == str(vol2_uuid)):
self.volume_api.attach(self.context, vol,
instance_uuid, '/dev/vdc')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
str(vol['id']) == str(vol2_uuid))
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
vol['mountpoint'] == '/dev/vdc')
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
#Here we puke...
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol2_uuid)
self.assertFalse(vol['deleted'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/vdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_detached(vol)
instance = db.instance_get(self.context, instance_id)
self.cloud.compute_api.attach_volume(self.context,
instance,
volume_id=vol2_uuid,
device='/dev/vdc')
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb')
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1_uuid)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
self._assert_volume_detached(vol1)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s' %
vol1['volumeId'])
snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-2',
description='test snap of vol %s' %
vol1['volumeId'])
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snap1_uuid,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snap2_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_uuid = vol['snapshot_id']
if snapshot_uuid == snap1_uuid:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_uuid == snap2_uuid:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_uuid, mountpoint)
#Just make sure we found them
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
#db.volume_destroy(self.context, vol1_id)
##admin_ctxt = context.get_admin_context(read_deleted="only")
##vol = db.volume_get(admin_ctxt, vol2_id)
##self.assertTrue(vol['deleted'])
#for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
# self.cloud.delete_snapshot(self.context, snapshot_id)
def test_create_image(self):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
@staticmethod
def _fake_bdm_get(ctxt, id):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'virtual_name': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral0',
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral1',
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral2',
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
|
|
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
elif int(msc_ver) >= 1900:
# VS2015 / MSVC 14.0
return ['msvcr140']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KiB < stripped_file < ??100KiB
# unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
if sys.maxsize == 2**31 - 1:
ms_win=' -DMS_WIN32'
else:
ms_win=' -DMS_WIN64'
self.set_executables(compiler='gcc -O -Wall'+ms_win,
compiler_so='gcc -mdll -O -Wall'+ms_win,
compiler_cxx='g++ -O -Wall'+ms_win,
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
|
|
"""
This module contains tests for combinator.branchmgr.
"""
import os, sys, StringIO
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from combinator import branchmgr
from combinator.branchmgr import DuplicateBranch, NonExistentBranch
from combinator.branchmgr import InvalidBranch, UncleanTrunkWorkingCopy
from combinator.branchmgr import MissingTrunkLocation
from combinator.branchmgr import BranchManager, subversionURLExists
from combinator.branchmgr import chbranchMain, mkbranchMain, whbranchMain
from combinator.branchmgr import unbranchMain
from combinator.subversion import createSubversionRepository, commit
class SubversionUtilitiesTests(TestCase):
"""
Tests to more or less general subversion-related functionality.
"""
def setUp(self):
"""
Compute the path and URL to a subversion repository which can be
tested against and set up standard out to be recorded and hidden.
"""
self.repository = FilePath(self.mktemp())
createSubversionRepository(self.repository, {'foo': {}})
self.url = 'file://' + self.repository.path
self.stdout = sys.stdout
sys.stdout = StringIO.StringIO()
def tearDown(self):
"""
Restore the normal standard out behavior.
"""
sys.stdout = self.stdout
def test_subversionURLExists(self):
"""
L{subversionURLExists} should return True if given an URL which does
exist.
"""
self.assertTrue(subversionURLExists(self.url))
def test_subversionURLDoesNotExist(self):
"""
L{subversionURLExists} should return False if given an URL which
does not exist.
"""
self.assertFalse(subversionURLExists(self.url + '/bar'))
class BranchManagerTests(TestCase):
"""
Tests for the BranchManager object.
"""
def setUp(self):
"""
Start keeping a record of all changed environment variables.
"""
self.changedEnv = {}
def changeEnvironment(self, key, value):
"""
Change an environmnt variable such that it will be set back to its
previous value at the end of the test.
"""
self.changedEnv[key] = os.environ[key]
os.environ[key] = value
def tearDown(self):
"""
Change back all environment variables altered during the course of this
test.
"""
for k, v in self.changedEnv.items():
os.environ[k] = v
def test_creation(self):
"""
Verify that a newly-created branch manager can locate the paths it
needs to do things.
"""
b = BranchManager()
self.assertNotEqual(b.svnProjectsDir, None)
self.assertNotEqual(b.sitePathsPath, None)
self.assertNotEqual(b.binCachePath, None)
def test_projectsEnvironment(self):
"""
Verify that BranchManager draws from the environment for the projects
path.
"""
self.changeEnvironment("COMBINATOR_PROJECTS", "somedir")
b = BranchManager()
self.assertEqual(b.svnProjectsDir, os.path.abspath("somedir"))
def test_pathsEnvironment(self):
"""
Verify that BranchManager draws from the environment for the paths
path.
"""
self.changeEnvironment("COMBINATOR_PATHS", "pathdir")
b = BranchManager()
self.assertEqual(b.sitePathsPath, os.path.abspath("pathdir"))
self.assertEqual(b.binCachePath, "pathdir/bincache")
def _perUserSitePackages(self, home):
"""
Construct the path to the user-specific site-packages path.
"""
return os.path.abspath(os.path.join(
home, '.local', 'lib', 'python%d.%d' % tuple(sys.version_info[:2]),
'site-packages'))
def test_userSitePackages(self):
"""
L{BranchManager.getPaths} should return an iterable which has as an
element the user-specific site-packages directory, if that directory
exists.
"""
home = self.mktemp()
sitePackages = self._perUserSitePackages(home)
os.makedirs(sitePackages)
self.changeEnvironment('HOME', home)
b = BranchManager()
self.assertIn(sitePackages, list(b.getPaths()))
def test_missingUserSitePackages(self):
"""
L{BranchManager.getPaths} should return an iterable which does not
have as an element the user-specific site-packages directory, if
that directory does not exist.
"""
home = self.mktemp()
self.changeEnvironment('HOME', home)
b = BranchManager()
self.assertNotIn(self._perUserSitePackages(home), list(b.getPaths()))
class FakeBranchManager(object):
"""
Purely in-memory implementation of the branch manager API.
@ivar activeBranches: A mapping from C{str} project names to C{str} branch
names. The branch name corresponding to each project name is the
branch which is currently active for that project.
@ivar repositories: A mapping from C{str} project names to C{dict}s
representing the contents of the repository for that project. Keys are
C{str} giving path segments and values are either C{str} giving file
contents or C{dict} with similar structure.
@ivar workingCopies: A mapping from C{str} project names to C{list} of
C{str} branch names. Each branch name in a list is a branch which has
been checked out for the corresponding project.
"""
def __init__(self, repositories=None):
self.activeBranches = {}
if repositories is None:
repositories = {}
self.repositories = repositories
self.workingCopies = {}
self.trunkClean = True
def _exists(self, path):
place = self.repositories
while path:
try:
place = place[path[0]]
except KeyError:
return False
path = path[1:]
return True
def changeProjectBranch(self, projectName, branchName, branchURI=None):
"""
Change the in-memory record of the active branch for the indicated
project.
"""
if (projectName not in self.workingCopies or
branchName not in self.workingCopies[projectName]):
if branchURI is None:
if projectName in self.activeBranches:
path = (projectName, 'branches', branchName)
else:
raise MissingTrunkLocation(projectName)
else:
path = branchURI
# Make sure the branch URI is valid.
if not self._exists(path):
raise NonExistentBranch(branchURI or branchName)
self.workingCopies.setdefault(projectName, []).append(branchName)
self.activeBranches[projectName] = branchName
def currentBranchFor(self, projectName):
"""
Retrieve the currently active branch for the given project name.
"""
return self.activeBranches[projectName]
def getCurrentBranches(self):
"""
Retrieve all of the currently active branches.
"""
return self.activeBranches.iteritems()
def newProjectBranch(self, projectName, branchName):
"""
Change the given project's active branch.
"""
if (projectName not in self.workingCopies or
"trunk" not in self.workingCopies[projectName]):
raise MissingTrunkLocation(projectName)
if self._exists((projectName,) + ('branches', branchName)):
raise DuplicateBranch(branchName)
self.activeBranches[projectName] = branchName
def mergeProjectBranch(self, projectName, force=False):
"""
Change the given project's active branch to trunk, unless it is trunk
already.
"""
if self.activeBranches.get(projectName, None) == "trunk":
raise InvalidBranch()
if not self.trunkClean and not force:
raise UncleanTrunkWorkingCopy()
self.changeProjectBranch(projectName, 'trunk')
def _uri(repository, *branch):
return 'file://' + reduce(FilePath.child, branch, repository).path
class ChangeBranchTestsMixin:
def test_getCurrentBranches(self):
"""
L{BranchManager.getCurrentBranches} returns an iterable of two-tuples
of project names and current active branches for all known branches.
"""
self.createRepository("Quux", {"trunk": {}})
self.manager.changeProjectBranch(
"Quux", "trunk", self.uri("Quux", "trunk"))
self.createRepository("Quarj", {"trunk": {},
"branches":
{"foo": {}}})
self.manager.changeProjectBranch(
"Quarj", "trunk", self.uri("Quarj", "trunk"))
self.manager.changeProjectBranch("Quarj", "foo")
self.assertEqual(
set(self.manager.getCurrentBranches()),
set([("Quux", "trunk"), ("Quarj", "foo")]))
def test_trunkCheckout(self):
"""
L{BranchManager.changeProjectBranch} creates in the projects directory
a checkout of trunk of the given project.
"""
projectName = 'Quux'
self.createRepository(projectName, {'trunk': {}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.checkTrunkCheckout(projectName)
def test_trunkCheckoutWritesBranchFile(self):
"""
L{BranchManager.changeProjectBranch} should write a new I{.bch} file
for the given project when switching to trunk for the first time.
"""
projectName = 'Quux'
self.createRepository(projectName, {'trunk': {}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.assertEqual(self.manager.currentBranchFor(projectName), 'trunk')
def test_branchCheckoutChangesBranchFile(self):
"""
L{BranchManager.changeProjectBranch} should rewrite an existing
project's I{.bch} file when changing to a different branch. The
repository URI should not be required for this case.
"""
projectName = 'Quux'
branchName = 'foo'
self.createRepository(
projectName, {'trunk': {},
'branches':
{branchName: {}}})
# First get trunk
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
# Then switch to the branch
self.manager.changeProjectBranch(projectName, branchName)
self.assertEqual(
self.manager.currentBranchFor(projectName), branchName)
def test_changeToTrunkMissingTrunkLocation(self):
"""
L{BranchManager.changeProjectBranch} raises L{MissingTrunkLocation}
when asked to change the active branch of a project to trunk when the
trunk URI has not been specified.
"""
err = self.assertRaises(
MissingTrunkLocation,
self.manager.changeProjectBranch, 'Quux', 'trunk')
self.assertEqual(err.args, ("Quux",))
def test_changeToBranchMissingTrunkLocation(self):
"""
L{BranchManager.changeProjectBranch} raises L{MissingTrunkLocation}
when asked to change the active branch of a project to non-trunk when
the trunk URI has not been specified.
"""
err = self.assertRaises(
MissingTrunkLocation,
self.manager.changeProjectBranch, 'Quux', 'foo')
self.assertEqual(err.args, ("Quux",))
def test_changeBranchRejectsInvalid(self):
"""
L{BranchManager.changeProjectBranch} raises L{NonExistentBranch} when
passed the name of a branch which does not already exist.
"""
projectName = 'Quux'
branchName = 'fantastical'
self.createRepository(projectName, {'trunk': {}})
# First get trunk
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
# Then try to change to a branch which isn't real.
err = self.assertRaises(
NonExistentBranch,
self.manager.changeProjectBranch,
projectName, branchName)
self.assertEqual(err.args, (branchName,))
def test_changeBranchRejectsExplicitInvalid(self):
"""
L{BranchManager.changeProjectBranch} raises L{IOError} when passed a
branch URI which is invalid.
"""
projectName = 'Quux'
branchName = 'foo'
self.createRepository(projectName,
{'trunk': {},
'branches':
{branchName: {}}})
# First get trunk
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
# Then try to change to a branch using a URI which is invalid.
uri = self.uri(projectName, 'not a real thing')
err = self.assertRaises(
NonExistentBranch,
self.manager.changeProjectBranch,
projectName, branchName, uri)
self.assertEqual(err.args, (uri,))
def test_changeToCheckedOutBranch(self):
"""
L{BranchManager.changeProjectBranch} succeeds if the repository is
inaccessible but there is already a checkout of the specified
branch.
"""
projectName = 'Quux'
branchName = 'foo'
self.createRepository(projectName,
{'trunk': {},
'branches':
{branchName: {}}})
# First get trunk
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
# Then switch to the branch
self.manager.changeProjectBranch(projectName, branchName)
# Go offline
self.makeRepositoryInaccessible(projectName)
# Switch back to trunk and (since trunk is slightly different than
# other branches) then back to the branch
self.manager.changeProjectBranch(projectName, 'trunk')
self.assertEqual(self.manager.currentBranchFor(projectName), 'trunk')
self.manager.changeProjectBranch(projectName, branchName)
self.assertEqual(
self.manager.currentBranchFor(projectName), branchName)
def test_newBranchForUnknownProject(self):
"""
L{BranchManager.newProjectBranch} raises L{MissingTrunkLocation} if
passed the name of an unrecognized project.
"""
err = self.assertRaises(
MissingTrunkLocation,
self.manager.newProjectBranch, "Quux", "foo")
self.assertEqual(err.args, ("Quux",))
def test_changeCurrentBranch(self):
"""
L{BranchManager.newProjectBranch} should change the current branch of
the given project to the newly created branch.
"""
projectName = 'Quux'
branchName = 'bar'
self.createRepository(projectName, {'trunk': {}, 'branches': {}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.manager.newProjectBranch(projectName, branchName)
self.assertEqual(
self.manager.currentBranchFor(projectName), branchName)
def test_rejectDuplicateBranch(self):
"""
L{BranchManager.newProjectBranch} should refuse to copy trunk into an
existing branch.
"""
projectName = 'Quux'
branchName = 'baz'
self.createRepository(projectName, {'trunk': {},
'branches':
{branchName: {}}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
err = self.assertRaises(
DuplicateBranch,
self.manager.newProjectBranch, projectName, branchName)
self.assertEqual(err.args, (branchName,))
def test_merge(self):
"""
Merging a branch does not produce any errors under normal conditions.
"""
projectName = "Quux"
branchName = 'baz'
self.createRepository(projectName, {"trunk": {},
'branches': {branchName: {}}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.manager.changeProjectBranch(projectName, branchName)
self.manager.mergeProjectBranch(projectName)
self.assertEqual(
self.manager.currentBranchFor(projectName), 'trunk')
def test_mergeUnknownProject(self):
"""
L{BranchManager.mergeProjectBranch} raises L{MissingTrunkLocation} if
passed the name of a project for which the trunk URI has not been
specified.
"""
projectName = "Quux"
err = self.assertRaises(
MissingTrunkLocation,
self.manager.mergeProjectBranch, projectName)
self.assertEqual(err.args, (projectName,))
def test_mergeTrunk(self):
"""
L{BranchManager.mergeProjectBranch} raises L{InvalidBranch} if passed
the name of a project for which the current active branch is trunk.
"""
projectName = "Quux"
self.createRepository(projectName, {"trunk": {}})
self.manager.changeProjectBranch(
projectName, "trunk", self.uri(projectName, "trunk"))
err = self.assertRaises(
InvalidBranch,
self.manager.mergeProjectBranch, projectName)
self.assertEqual(err.args, ())
def test_mergeUnclean(self):
"""
L{BranchManager.mergeProjectBranch} raises L{UncleanTrunkWorkingCopy}
if there are uncommitted changes in trunk.
"""
projectName = "Quux"
branchName = 'baz'
fname = 'foo.txt'
contents = {fname: 'some data'}
self.createRepository(projectName, {"trunk": contents,
'branches': {branchName: contents}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.manager.changeProjectBranch(projectName, branchName)
self.modifyTrunk(projectName, fname, 'some new data')
err = self.assertRaises(
UncleanTrunkWorkingCopy,
self.manager.mergeProjectBranch, projectName)
self.assertEqual(
self.manager.currentBranchFor(projectName), branchName)
def test_forceMergeUnclean(self):
"""
L{BranchManager.mergeProjectBranch} does not raise
L{UncleanTrunkWorkingCopy} if the 'force' flag is specified.
"""
projectName = "Quux"
branchName = 'baz'
fname = 'foo.txt'
contents = {fname: 'some data'}
self.createRepository(projectName, {"trunk": contents,
'branches': {branchName: contents}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.manager.changeProjectBranch(projectName, branchName)
self.modifyTrunk(projectName, fname, 'some new data')
self.manager.mergeProjectBranch(projectName, force=True)
self.assertEqual(
self.manager.currentBranchFor(projectName), 'trunk')
class FakeBranchManagerChangeBranchTests(TestCase, ChangeBranchTestsMixin):
"""
Tests for L{FakeBranchManager.changeProjectBranch}.
"""
def setUp(self):
"""
Create an in-memory branch manager which knows about the repository for
a project with some branches.
"""
self.manager = FakeBranchManager()
def uri(self, project, *branch):
"""
Create an identifier for the given project's branch.
L{FakeBranchManager} can interpret this.
"""
return (project,) + branch
def createRepository(self, projectName, contents):
"""
Add an in-memory repository for the given project with the given
contents.
"""
self.manager.repositories[projectName] = contents
def checkTrunkCheckout(self, projectName):
"""
Make sure there is a trunk working copy for the given project.
"""
self.assertIn(projectName, self.manager.workingCopies)
self.assertIn("trunk", self.manager.workingCopies[projectName])
def makeRepositoryInaccessible(self, projectName):
"""
Discard the repository for the given project.
"""
del self.manager.repositories[projectName]
def modifyTrunk(self, projectName, fname, newData):
"""
Make a change to a file in trunk.
"""
self.manager.trunkClean = False
class BranchManagerChangeBranchTests(TestCase, ChangeBranchTestsMixin):
"""
Tests for L{BranchManager.changeProjectBranch}.
"""
projectName = 'Quux'
def setUp(self):
"""
Create a branch manager with temporary directories for all its working
filesystem paths.
"""
self.paths = self.mktemp()
self.projects = self.mktemp()
os.makedirs(self.paths)
os.makedirs(self.projects)
self.manager = BranchManager(self.paths, self.projects)
self.cwd = os.getcwd()
self.repositories = FilePath(self.mktemp())
def tearDown(self):
"""
Assert that the working directory has been restored to its original
value if it was changed.
"""
try:
self.assertEqual(self.cwd, os.getcwd())
finally:
os.chdir(self.cwd)
def createRepository(self, projectName, contents):
"""
Create a new SVN repository with the given contents and associate it
with given project.
"""
path = self.repositories.child(projectName)
path.makedirs()
createSubversionRepository(path, contents)
def uri(self, projectName, *branch):
"""
Return a I{file} URI for the given branch of the given project.
"""
return _uri(self.repositories.child(projectName), *branch)
def checkTrunkCheckout(self, project):
"""
Assert that a trunk checkout of the given project exists.
"""
trunkWorkingCopy = FilePath(self.paths).child(project).child('trunk')
self.assertTrue(
trunkWorkingCopy.exists(),
"%r did not exist." % (trunkWorkingCopy.path,))
def makeRepositoryInaccessible(self, projectName):
"""
Make the repository inaccessible so checks for the existence of
branches can't possibly succeed.
"""
self.repositories.child(projectName).remove()
def modifyTrunk(self, projectName, fname, newData):
"""
Make a change to a file in trunk.
"""
trunkpath = FilePath(self.paths).child(projectName).child('trunk')
f = trunkpath.child(fname).open('w')
f.write(newData)
f.close()
def commitTrunk(self, projectName):
"""
Commit the trunk working copy for the given project.
"""
commit(
FilePath(self.paths).child(projectName).child('trunk'),
'Commit some changes')
def modifyBranch(self, projectName, branchName, fname, newData):
"""
Make a change to a file in a branch.
"""
fObj = FilePath(self.paths).child(projectName).child(
'branches').child(branchName).child(fname).open('w')
fObj.write(newData)
fObj.close()
def commitBranch(self, projectName, branchName):
"""
Commit a branch working for the given project.
"""
commit(
FilePath(self.paths).child(projectName).child(
'branches').child(branchName),
'Commit some changes')
def test_mergeConflict(self):
"""
L{BranchManager.mergeProjectBranch} performs merges
non-interactively so that they complete even if there is a merge
conflict.
"""
projectName = "Quux"
branchName = 'baz'
fname = 'foo.txt'
contents = {fname: 'some data'}
self.createRepository(projectName, {"trunk": contents,
"branches": {}})
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
self.manager.newProjectBranch(projectName, branchName)
self.modifyTrunk(projectName, fname, 'changed data')
self.commitTrunk(projectName)
self.modifyBranch(
projectName, branchName, fname, 'differently changed data')
self.commitBranch(projectName, branchName)
self.manager.mergeProjectBranch(projectName)
def test_changeCurrentBranchDeletesUnknown(self):
"""
If L{BranchManager.changeProjectBranch} creates a new working copy, it
doesn't contain extra unversioned files from the I{trunk} working copy.
"""
projectName = 'Quux'
branchName = 'foo'
self.createRepository(
projectName, {'trunk': {},
'branches':
{branchName: {}}})
# Get a trunk checkout
self.manager.changeProjectBranch(
projectName, 'trunk', self.uri(projectName, 'trunk'))
# Here is some unversioned junk in the trunk working copy
self.modifyTrunk(projectName, "junk", "garbage")
self.manager.changeProjectBranch(projectName, branchName)
junk = FilePath(self.paths).descendant([
projectName, "branches", branchName, "junk"])
self.assertFalse(junk.exists())
class MainFunctionTests(TestCase):
"""
Tests for the main functions for the Combinator command line tools.
"""
def setUp(self):
"""
Replace the global branch manager instance with a fake branch manager
that is easier to manipulate to test different situations. Replace
stdout with a StringIO.
"""
self.originalBranchManager = branchmgr.theBranchManager
self.manager = branchmgr.theBranchManager = FakeBranchManager()
self.originalStandardOutput = sys.stdout
sys.stdout = StringIO.StringIO()
def tearDown(self):
"""
Restore the real branch manager and stdout.
"""
branchmgr.theBranchManager = self.originalBranchManager
sys.stdout = self.originalStandardOutput
def test_chbranchTrunkCheckoutWithoutURI(self):
"""
C{chbranchMain} raises L{SystemExit} with a string explaining that the
branchURI parameter is required if it is called to check out trunk
without a branchURI.
"""
err = self.assertRaises(
SystemExit,
chbranchMain, ["/bin/chbranch", "Quux", "trunk"])
self.assertEqual(
err.args,
("The location of %r trunk is not known. Specify a URI as the "
"3rd argument to check out a branch (check out trunk to make "
"this unnecessary)." % ("Quux",),))
def test_chbranchUnknownProject(self):
"""
L{chbranchMain} raises L{SystemExit} with a string explaining that
there is no such project if it is called with the name of a project
which is unknown and a branch other than trunk.
"""
err = self.assertRaises(
SystemExit,
chbranchMain, ["/bin/chbranch", "Quux", "baz"])
self.assertEqual(
err.args,
("The location of %r trunk is not known. Specify a URI as the "
"3rd argument to check out a branch (check out trunk to make "
"this unnecessary)." % ("Quux",),))
def test_chbranchInvalidBranchName(self):
"""
C{chbranchMain} raises L{SystemExit} with a string explaining that
there is no such branch if it is called to check out a branch which
does not exist.
"""
self.manager.repositories["Quux"] = {"trunk": {}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
err = self.assertRaises(
SystemExit,
chbranchMain, ["/bin/chbranch", "Quux", "baz"])
self.assertEqual(
err.args,
("No such branch: %r" % ("baz",),))
def test_chbranchInvalidURI(self):
"""
L{chbranchMain} raises L{SystemExit} with a string explaining that
there is no such URI if it is called to checkout a branch with an
explicit URI which does not exist.
"""
self.manager.repositories["Quux"] = {"trunk": {}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
err = self.assertRaises(
SystemExit,
chbranchMain, ["/bin/chbranch", "Quux", "baz", "foobar"])
self.assertEqual(
err.args,
("No such branch: %r" % ("foobar",),))
def test_chbranchWrongNumberOfArguments(self):
"""
L{chbranchMain} raises L{SystemExit} with usage information if it is
called with fewer than three arguments or more than four arguments.
"""
for args in [
["/bin/chbranch", "Quux"],
["/bin/chbranch", "Quux", "foo", "bar", "baz"]]:
err = self.assertRaises(
SystemExit, chbranchMain, args)
self.assertEqual(
err.args, ("Usage: chbranch <project> <branch name> [trunk url]",))
def test_chbranch(self):
"""
L{chbranchMain} returns without exception when called to checkout a
branch which exists. It changes the active branch for the specified
project.
"""
branchName = "foo"
self.manager.repositories["Quux"] = {"trunk": {},
"branches":
{branchName: {}}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
chbranchMain(["/bin/chbranch", "Quux", branchName])
self.assertEqual(self.manager.currentBranchFor("Quux"), branchName)
def test_mkbranchWrongNumberOfArguments(self):
"""
L{mkbranchMain} raises L{SystemExit} with usage information when called
with a number of arguments other than three.
"""
for args in [
["/bin/mkbranch", "Foo"],
["/bin/mkbranch", "Foo", "bar", "baz"]]:
err = self.assertRaises(SystemExit, mkbranchMain, args)
self.assertEqual(
err.args,
("Usage: mkbranch <project> <branch name>",))
def test_mkbranchUnknownProject(self):
"""
L{mkbranchMain} raises L{SystemExit} with a string explaining there is
no such project if it is called with the name of a project which is
unknown.
"""
err = self.assertRaises(
SystemExit,
mkbranchMain, ["/bin/mkbranch", "Quux", "foo"])
self.assertEqual(
err.args,
("The location of %r trunk is not known. Specify a URI as the "
"3rd argument to check out a branch (check out trunk to make "
"this unnecessary)." % ("Quux",),))
def test_mkbranchDuplicateBranch(self):
"""
L{mkbranchMain} raises L{SystemExit} with a string explaining there is
already a branch by the given name when called with the name of a
branch which already exists.
"""
self.manager.repositories["Quux"] = {"trunk": {},
"branches":
{"foo": {}}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
err = self.assertRaises(
SystemExit,
mkbranchMain, ["/bin/mkbranch", "Quux", "foo"])
def test_mkbranch(self):
"""
L{mkbranchMain} returns without exception when called to create a new
branch for a project which exists. It changes the active branch for
the specified project.
"""
self.manager.repositories["Quux"] = {"trunk": {},
"branches":
{"foo": {}}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
mkbranchMain(["/bin/mkbranch", "Quux", "bar"])
self.assertEqual(self.manager.currentBranchFor("Quux"), "bar")
def test_whbranchWrongNumberOfArguments(self):
"""
L{whbranchMain} raises L{SystemExit} with a string explaining usage
information if called with more than two arguments.
"""
err = self.assertRaises(
SystemExit,
whbranchMain, ["/bin/whbranch", "foo", "bar"])
self.assertEqual(
err.args,
("Usage: whbranch [project]",))
def test_whbranchOneArgument(self):
"""
L{whbranchMain} prints the current branch for each known project if
called with one argument.
"""
self.manager.repositories["Quux"] = {"trunk": {}}
self.manager.repositories["Quarj"] = {"trunk": {},
"branches":
{"foo": {}}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
self.manager.changeProjectBranch("Quarj", "trunk", ("Quarj", "trunk"))
self.manager.changeProjectBranch("Quarj", "foo")
whbranchMain(["/bin/whbranch"])
self.assertEqual(
set(sys.stdout.getvalue().splitlines()),
set(["Quux: trunk", "Quarj: foo"]))
def test_whbranchTwoArguments(self):
"""
L{whbranchMain} prints the current branch for the named project if
called with two arguments.
"""
self.manager.repositories["Quux"] = {"trunk": {}}
self.manager.repositories["Quarj"] = {"trunk": {},
"branches":
{"foo": {}}}
self.manager.changeProjectBranch("Quux", "trunk", ("Quux", "trunk"))
self.manager.changeProjectBranch("Quarj", "trunk", ("Quarj", "trunk"))
self.manager.changeProjectBranch("Quarj", "foo")
whbranchMain(["/bin/mkbranch", "Quux"])
self.assertEqual(sys.stdout.getvalue(), "trunk\n")
sys.stdout.truncate(0)
whbranchMain(["/bin/mkbranch", "Quarj"])
self.assertEqual(sys.stdout.getvalue(), "foo\n")
def test_unbranchWrongNumberOfArgments(self):
"""
L{unbranchMain} raises L{SystemExit} with usage information if it is
called with a number of arguments other than two.
"""
for args in [
["/bin/unbranch"],
["/bin/unbranch", "Foo", "bar"]]:
err = self.assertRaises(
SystemExit,
unbranchMain, args)
self.assertEqual(
err.args,
("Usage: unbranch [--force] <project>",))
def test_unbranchUnknownProject(self):
"""
L{unbranchMain} raises L{SystemExit} with a string explaining that
there is no such project if it is called with the name of a project
which is unknown.
"""
err = self.assertRaises(
SystemExit,
unbranchMain, ["/bin/unbranch", "Quux"])
self.assertEqual(
err.args,
("The location of %r trunk is not known. Specify a URI as the "
"3rd argument to check out a branch (check out trunk to make "
"this unnecessary)." % ("Quux",),))
def test_unbranchTrunk(self):
"""
L{unbranchMain} raises L{SystemExit} with a string explaining that
trunk cannot be unbranch if called with the name of a project for which
the current active branch is trunk.
"""
projectName = "Quux"
self.manager.repositories[projectName] = {"trunk": {}}
self.manager.changeProjectBranch(
projectName, "trunk", (projectName, "trunk"))
err = self.assertRaises(
SystemExit,
unbranchMain, ["/bin/unbranch", projectName])
self.assertEqual(err.args, ("Cannot merge trunk.",))
def test_unbranch(self):
"""
L{unbranchMain} returns without exception when called with the name of
a project which is known and for which the current active branch is not
trunk. The active branch afterwards is trunk.
"""
projectName = "Quux"
self.manager.repositories[projectName] = {"trunk": {},
"branches":
{"foo": {}}}
self.manager.changeProjectBranch(
projectName, "trunk", (projectName, "trunk"))
self.manager.changeProjectBranch(projectName, "foo")
unbranchMain(["/bin/unbranch", projectName])
self.assertEqual(self.manager.currentBranchFor(projectName), "trunk")
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/vlans/vlan/members/member/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"vlans",
"vlan",
"members",
"member",
"interface-ref",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/vlans/vlan/members/member/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/vlans/vlan/members/member/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/vlans/vlan/members/member/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"vlans",
"vlan",
"members",
"member",
"interface-ref",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/vlans/vlan/members/member/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/vlans/vlan/members/member/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
import asyncio
import warnings
from inspect import isawaitable
from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
MutableMapping,
Optional,
Tuple,
Union,
)
from urllib.parse import quote
import sanic.app # noqa
from sanic.compat import Header
from sanic.exceptions import InvalidUsage, ServerError
from sanic.log import logger
from sanic.request import Request
from sanic.response import HTTPResponse, StreamingHTTPResponse
from sanic.server import StreamBuffer
from sanic.websocket import WebSocketConnection
ASGIScope = MutableMapping[str, Any]
ASGIMessage = MutableMapping[str, Any]
ASGISend = Callable[[ASGIMessage], Awaitable[None]]
ASGIReceive = Callable[[], Awaitable[ASGIMessage]]
class MockProtocol:
def __init__(self, transport: "MockTransport", loop):
self.transport = transport
self._not_paused = asyncio.Event(loop=loop)
self._not_paused.set()
self._complete = asyncio.Event(loop=loop)
def pause_writing(self) -> None:
self._not_paused.clear()
def resume_writing(self) -> None:
self._not_paused.set()
async def complete(self) -> None:
self._not_paused.set()
await self.transport.send(
{"type": "http.response.body", "body": b"", "more_body": False}
)
@property
def is_complete(self) -> bool:
return self._complete.is_set()
async def push_data(self, data: bytes) -> None:
if not self.is_complete:
await self.transport.send(
{"type": "http.response.body", "body": data, "more_body": True}
)
async def drain(self) -> None:
await self._not_paused.wait()
class MockTransport:
_protocol: Optional[MockProtocol]
def __init__(
self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
) -> None:
self.scope = scope
self._receive = receive
self._send = send
self._protocol = None
self.loop = None
def get_protocol(self) -> MockProtocol:
if not self._protocol:
self._protocol = MockProtocol(self, self.loop)
return self._protocol
def get_extra_info(self, info: str) -> Union[str, bool, None]:
if info == "peername":
return self.scope.get("server")
elif info == "sslcontext":
return self.scope.get("scheme") in ["https", "wss"]
return None
def get_websocket_connection(self) -> WebSocketConnection:
try:
return self._websocket_connection
except AttributeError:
raise InvalidUsage("Improper websocket connection.")
def create_websocket_connection(
self, send: ASGISend, receive: ASGIReceive
) -> WebSocketConnection:
self._websocket_connection = WebSocketConnection(send, receive)
return self._websocket_connection
def add_task(self) -> None:
raise NotImplementedError
async def send(self, data) -> None:
# TODO:
# - Validation on data and that it is formatted properly and is valid
await self._send(data)
async def receive(self) -> ASGIMessage:
return await self._receive()
class Lifespan:
def __init__(self, asgi_app: "ASGIApp") -> None:
self.asgi_app = asgi_app
if "before_server_start" in self.asgi_app.sanic_app.listeners:
warnings.warn(
'You have set a listener for "before_server_start" '
"in ASGI mode. "
"It will be executed as early as possible, but not before "
"the ASGI server is started."
)
if "after_server_stop" in self.asgi_app.sanic_app.listeners:
warnings.warn(
'You have set a listener for "after_server_stop" '
"in ASGI mode. "
"It will be executed as late as possible, but not after "
"the ASGI server is stopped."
)
async def startup(self) -> None:
"""
Gather the listeners to fire on server start.
Because we are using a third-party server and not Sanic server, we do
not have access to fire anything BEFORE the server starts.
Therefore, we fire before_server_start and after_server_start
in sequence since the ASGI lifespan protocol only supports a single
startup event.
"""
listeners = self.asgi_app.sanic_app.listeners.get(
"before_server_start", []
) + self.asgi_app.sanic_app.listeners.get("after_server_start", [])
for handler in listeners:
response = handler(
self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop
)
if isawaitable(response):
await response
async def shutdown(self) -> None:
"""
Gather the listeners to fire on server stop.
Because we are using a third-party server and not Sanic server, we do
not have access to fire anything AFTER the server stops.
Therefore, we fire before_server_stop and after_server_stop
in sequence since the ASGI lifespan protocol only supports a single
shutdown event.
"""
listeners = self.asgi_app.sanic_app.listeners.get(
"before_server_stop", []
) + self.asgi_app.sanic_app.listeners.get("after_server_stop", [])
for handler in listeners:
response = handler(
self.asgi_app.sanic_app, self.asgi_app.sanic_app.loop
)
if isawaitable(response):
await response
async def __call__(
self, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
) -> None:
message = await receive()
if message["type"] == "lifespan.startup":
await self.startup()
await send({"type": "lifespan.startup.complete"})
message = await receive()
if message["type"] == "lifespan.shutdown":
await self.shutdown()
await send({"type": "lifespan.shutdown.complete"})
class ASGIApp:
sanic_app: "sanic.app.Sanic"
request: Request
transport: MockTransport
do_stream: bool
lifespan: Lifespan
ws: Optional[WebSocketConnection]
def __init__(self) -> None:
self.ws = None
@classmethod
async def create(
cls, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
) -> "ASGIApp":
instance = cls()
instance.sanic_app = sanic_app
instance.transport = MockTransport(scope, receive, send)
instance.transport.loop = sanic_app.loop
setattr(instance.transport, "add_task", sanic_app.loop.create_task)
headers = Header(
[
(key.decode("latin-1"), value.decode("latin-1"))
for key, value in scope.get("headers", [])
]
)
instance.do_stream = (
True if headers.get("expect") == "100-continue" else False
)
instance.lifespan = Lifespan(instance)
if scope["type"] == "lifespan":
await instance.lifespan(scope, receive, send)
else:
path = (
scope["path"][1:]
if scope["path"].startswith("/")
else scope["path"]
)
url = "/".join([scope.get("root_path", ""), quote(path)])
url_bytes = url.encode("latin-1")
url_bytes += b"?" + scope["query_string"]
if scope["type"] == "http":
version = scope["http_version"]
method = scope["method"]
elif scope["type"] == "websocket":
version = "1.1"
method = "GET"
instance.ws = instance.transport.create_websocket_connection(
send, receive
)
await instance.ws.accept()
else:
pass
# TODO:
# - close connection
request_class = sanic_app.request_class or Request
instance.request = request_class(
url_bytes,
headers,
version,
method,
instance.transport,
sanic_app,
)
if sanic_app.is_request_stream:
is_stream_handler = sanic_app.router.is_stream_handler(
instance.request
)
if is_stream_handler:
instance.request.stream = StreamBuffer(
sanic_app.config.REQUEST_BUFFER_QUEUE_SIZE
)
instance.do_stream = True
return instance
async def read_body(self) -> bytes:
"""
Read and return the entire body from an incoming ASGI message.
"""
body = b""
more_body = True
while more_body:
message = await self.transport.receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
return body
async def stream_body(self) -> None:
"""
Read and stream the body in chunks from an incoming ASGI message.
"""
more_body = True
while more_body:
message = await self.transport.receive()
chunk = message.get("body", b"")
await self.request.stream.put(chunk)
more_body = message.get("more_body", False)
await self.request.stream.put(None)
async def __call__(self) -> None:
"""
Handle the incoming request.
"""
if not self.do_stream:
self.request.body = await self.read_body()
else:
self.sanic_app.loop.create_task(self.stream_body())
handler = self.sanic_app.handle_request
callback = None if self.ws else self.stream_callback
await handler(self.request, None, callback)
async def stream_callback(self, response: HTTPResponse) -> None:
"""
Write the response.
"""
headers: List[Tuple[bytes, bytes]] = []
cookies: Dict[str, str] = {}
try:
cookies = {
v.key: v
for _, v in list(
filter(
lambda item: item[0].lower() == "set-cookie",
response.headers.items(),
)
)
}
headers += [
(str(name).encode("latin-1"), str(value).encode("latin-1"))
for name, value in response.headers.items()
if name.lower() not in ["set-cookie"]
]
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.request.url,
type(response),
)
exception = ServerError("Invalid response type")
response = self.sanic_app.error_handler.response(
self.request, exception
)
headers = [
(str(name).encode("latin-1"), str(value).encode("latin-1"))
for name, value in response.headers.items()
if name not in (b"Set-Cookie",)
]
if "content-length" not in response.headers and not isinstance(
response, StreamingHTTPResponse
):
headers += [
(b"content-length", str(len(response.body)).encode("latin-1"))
]
if "content-type" not in response.headers:
headers += [
(b"content-type", str(response.content_type).encode("latin-1"))
]
if response.cookies:
cookies.update(
{
v.key: v
for _, v in response.cookies.items()
if v.key not in cookies.keys()
}
)
headers += [
(b"set-cookie", cookie.encode("utf-8"))
for k, cookie in cookies.items()
]
await self.transport.send(
{
"type": "http.response.start",
"status": response.status,
"headers": headers,
}
)
if isinstance(response, StreamingHTTPResponse):
response.protocol = self.transport.get_protocol()
await response.stream()
await response.protocol.complete()
else:
await self.transport.send(
{
"type": "http.response.body",
"body": response.body,
"more_body": False,
}
)
|
|
import json
import logging
import time
from datetime import datetime, timedelta
from itertools import chain
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseRedirect)
from django.shortcuts import render
from django.utils.html import escape
from django.utils.http import urlquote
from django.views.decorators.cache import cache_page
import bleach
import jinja2
from elasticutils.utils import format_explanation
from elasticutils.contrib.django import ES_EXCEPTIONS
from mobility.decorators import mobile_template
from tower import ugettext as _, ugettext_lazy as _lazy
from kitsune import search as constants
from kitsune.forums.models import Forum, ThreadMappingType
from kitsune.products.models import Product
from kitsune.questions.models import QuestionMappingType
from kitsune.search.utils import locale_or_default, clean_excerpt
from kitsune.search import es_utils
from kitsune.search.forms import SimpleSearchForm, AdvancedSearchForm
from kitsune.search.es_utils import F, AnalyzerS, handle_es_errors
from kitsune.search.search_utils import apply_boosts, generate_simple_search
from kitsune.sumo.helpers import Paginator
from kitsune.sumo.json_utils import markup_json
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import paginate
from kitsune.wiki.facets import documents_for
from kitsune.wiki.models import DocumentMappingType
log = logging.getLogger('k.search')
EXCERPT_JOINER = _lazy(u'...', 'between search excerpts')
def cache_control(resp, cache_period):
"""Inserts cache/expires headers"""
resp['Cache-Control'] = 'max-age=%s' % (cache_period * 60)
resp['Expires'] = (
(datetime.utcnow() + timedelta(minutes=cache_period))
.strftime('%A, %d %B %Y %H:%M:%S GMT'))
return resp
def _es_down_template(request, *args, **kwargs):
"""Returns the appropriate "Elasticsearch is down!" template"""
return 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
class UnknownDocType(Exception):
"""Signifies a doctype for which there's no handling"""
pass
def build_results_list(pages, is_json):
"""Takes a paginated search and returns results List
Handles wiki documents, questions and contributor forum posts.
:arg pages: paginated S
:arg is_json: whether or not this is generated results for json output
:returns: list of dicts
"""
results = []
for rank, doc in enumerate(pages, pages.start_index()):
if doc['model'] == 'wiki_document':
summary = _build_es_excerpt(doc)
if not summary:
summary = doc['document_summary']
result = {
'title': doc['document_title'],
'type': 'document'}
elif doc['model'] == 'questions_question':
summary = _build_es_excerpt(doc)
if not summary:
# We're excerpting only question_content, so if the query matched
# question_title or question_answer_content, then there won't be any
# question_content excerpts. In that case, just show the question--but
# only the first 500 characters.
summary = bleach.clean(doc['question_content'], strip=True)[:500]
result = {
'title': doc['question_title'],
'type': 'question',
'is_solved': doc['question_is_solved'],
'num_answers': doc['question_num_answers'],
'num_votes': doc['question_num_votes'],
'num_votes_past_week': doc['question_num_votes_past_week']}
elif doc['model'] == 'forums_thread':
summary = _build_es_excerpt(doc, first_only=True)
result = {
'title': doc['post_title'],
'type': 'thread'}
else:
raise UnknownDocType('%s is an unknown doctype' % doc['model'])
result['url'] = doc['url']
if not is_json:
result['object'] = doc
result['search_summary'] = summary
result['rank'] = rank
result['score'] = doc.es_meta.score
result['explanation'] = escape(format_explanation(doc.es_meta.explanation))
result['id'] = doc['id']
results.append(result)
return results
@markup_json
@handle_es_errors(_es_down_template)
@mobile_template('search/{mobile/}results.html')
def simple_search(request, template=None):
"""Elasticsearch-specific simple search view.
This view is for end user searching of the Knowledge Base and
Support Forum. Filtering options are limited to:
* product (`product=firefox`, for example, for only Firefox results)
* document type (`w=2`, for example, for Support Forum questions only)
"""
# 1. Prep request.
# Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
if request.GET.get('a') in ['1', '2']:
new_url = reverse('search.advanced') + '?' + request.GET.urlencode()
return HttpResponseRedirect(new_url)
# 2. Build form.
search_form = SimpleSearchForm(request.GET, auto_id=False)
# 3. Validate request.
if not search_form.is_valid():
if request.IS_JSON:
return HttpResponse(
json.dumps({'error': _('Invalid search data.')}),
content_type=request.CONTENT_TYPE,
status=400)
t = template if request.MOBILE else 'search/form.html'
return cache_control(
render(request, t, {
'advanced': False,
'request': request,
'search_form': search_form}),
settings.SEARCH_CACHE_PERIOD)
# 4. Generate search.
cleaned = search_form.cleaned_data
# On mobile, we default to just wiki results.
if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
cleaned['w'] = constants.WHERE_WIKI
language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
lang_name = settings.LANGUAGES_DICT.get(language.lower()) or ''
searcher = generate_simple_search(search_form, language, with_highlights=True)
searcher = searcher[:settings.SEARCH_MAX_RESULTS]
# 5. Generate output.
pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)
if pages.paginator.count == 0:
fallback_results = _fallback_results(language, cleaned['product'])
results = []
else:
fallback_results = None
results = build_results_list(pages, request.IS_JSON)
product = Product.objects.filter(slug__in=cleaned['product'])
if product:
product_titles = [_(p.title, 'DB: products.Product.title') for p in product]
else:
product_titles = [_('All Products')]
# FIXME: This is probably bad l10n.
product_titles = ', '.join(product_titles)
data = {
'num_results': pages.paginator.count,
'results': results,
'fallback_results': fallback_results,
'product_titles': product_titles,
'q': cleaned['q'],
'w': cleaned['w'],
'lang_name': lang_name,
'products': Product.objects.filter(visible=True)}
if request.IS_JSON:
data['total'] = len(data['results'])
data['products'] = [{'slug': p.slug, 'title': p.title}
for p in data['products']]
if product:
data['product'] = product[0].slug
pages = Paginator(pages)
data['pagination'] = dict(
number=pages.pager.number,
num_pages=pages.pager.paginator.num_pages,
has_next=pages.pager.has_next(),
has_previous=pages.pager.has_previous(),
max=pages.max,
span=pages.span,
dotted_upper=pages.pager.dotted_upper,
dotted_lower=pages.pager.dotted_lower,
page_range=pages.pager.page_range,
url=pages.pager.url,
)
if not results:
data['message'] = _('No pages matched the search criteria')
json_data = json.dumps(data)
if request.JSON_CALLBACK:
json_data = request.JSON_CALLBACK + '(' + json_data + ');'
return HttpResponse(json_data, content_type=request.CONTENT_TYPE)
data.update({
'product': product,
'pages': pages,
'search_form': search_form,
'advanced': False,
})
resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
max_age=3600, secure=False, httponly=False)
return resp
@markup_json
@handle_es_errors(_es_down_template)
@mobile_template('search/{mobile/}results.html')
def advanced_search(request, template=None):
"""Elasticsearch-specific Advanced search view"""
# 1. Prep request.
r = request.GET.copy()
# TODO: Figure out how to get rid of 'a' and do it.
# It basically is used to switch between showing the form or results.
a = request.GET.get('a', '2')
# TODO: This is so the 'a=1' stays in the URL for pagination.
r['a'] = 1
language = locale_or_default(request.GET.get('language', request.LANGUAGE_CODE))
r['language'] = language
lang = language.lower()
lang_name = settings.LANGUAGES_DICT.get(lang) or ''
# 2. Build form.
search_form = AdvancedSearchForm(r, auto_id=False)
search_form.set_allowed_forums(request.user)
# get value for search input from last search term.
last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE)
if last_search and 'q' not in r:
r['q'] = urlquote(last_search)
# 3. Validate request.
# Note: a == 2 means "show the form"--that's all we use it for now.
if a == '2' or not search_form.is_valid():
if request.IS_JSON:
return HttpResponse(
json.dumps({'error': _('Invalid search data.')}),
content_type=request.CONTENT_TYPE,
status=400)
t = template if request.MOBILE else 'search/form.html'
return cache_control(
render(request, t, {
'advanced': True,
'request': request,
'search_form': search_form}),
settings.SEARCH_CACHE_PERIOD)
# 4. Generate search.
cleaned = search_form.cleaned_data
# On mobile, we default to just wiki results.
if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
cleaned['w'] = constants.WHERE_WIKI
# We use a regular S here because we want to search across
# multiple doctypes.
searcher = (AnalyzerS().es(urls=settings.ES_URLS)
.indexes(es_utils.read_index('default')))
doctypes = []
final_filter = F()
unix_now = int(time.time())
interval_filters = (
('created', cleaned['created'], cleaned['created_date']),
('updated', cleaned['updated'], cleaned['updated_date'])
)
# Start - wiki search configuration
if cleaned['w'] & constants.WHERE_WIKI:
wiki_f = F(model='wiki_document')
# Category filter
if cleaned['category']:
wiki_f &= F(document_category__in=cleaned['category'])
# Locale filter
wiki_f &= F(document_locale=language)
# Product filter
products = cleaned['product']
for p in products:
wiki_f &= F(product=p)
# Topics filter
topics = cleaned['topics']
for t in topics:
wiki_f &= F(topic=t)
# Archived bit
if not cleaned['include_archived']:
wiki_f &= F(document_is_archived=False)
# Apply sortby
sortby = cleaned['sortby_documents']
try:
searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby])
except IndexError:
# Skip index errors because they imply the user is sending us sortby values
# that aren't valid.
pass
doctypes.append(DocumentMappingType.get_mapping_type_name())
final_filter |= wiki_f
# End - wiki search configuration
# Start - support questions configuration
if cleaned['w'] & constants.WHERE_SUPPORT:
question_f = F(model='questions_question')
# These filters are ternary, they can be either YES, NO, or OFF
ternary_filters = ('is_locked', 'is_solved', 'has_answers',
'has_helpful', 'is_archived')
d = dict(('question_%s' % filter_name,
_ternary_filter(cleaned[filter_name]))
for filter_name in ternary_filters if cleaned[filter_name])
if d:
question_f &= F(**d)
if cleaned['asked_by']:
question_f &= F(question_creator=cleaned['asked_by'])
if cleaned['answered_by']:
question_f &= F(question_answer_creator=cleaned['answered_by'])
q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
for t in q_tags:
if t:
question_f &= F(question_tag=t)
# Product filter
products = cleaned['product']
for p in products:
question_f &= F(product=p)
# Topics filter
topics = cleaned['topics']
for t in topics:
question_f &= F(topic=t)
# Note: num_voted (with a d) is a different field than num_votes
# (with an s). The former is a dropdown and the latter is an
# integer value.
if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
question_f &= F(question_num_votes__gte=cleaned['num_votes'])
# Apply sortby
sortby = cleaned['sortby']
try:
searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby])
except IndexError:
# Skip index errors because they imply the user is sending us sortby values
# that aren't valid.
pass
# Apply created and updated filters
for filter_name, filter_option, filter_date in interval_filters:
if filter_option == constants.INTERVAL_BEFORE:
before = {filter_name + '__gte': 0,
filter_name + '__lte': max(filter_date, 0)}
question_f &= F(**before)
elif filter_option == constants.INTERVAL_AFTER:
after = {filter_name + '__gte': min(filter_date, unix_now),
filter_name + '__lte': unix_now}
question_f &= F(**after)
doctypes.append(QuestionMappingType.get_mapping_type_name())
final_filter |= question_f
# End - support questions configuration
# Start - discussion forum configuration
if cleaned['w'] & constants.WHERE_DISCUSSION:
discussion_f = F(model='forums_thread')
if cleaned['author']:
discussion_f &= F(post_author_ord=cleaned['author'])
if cleaned['thread_type']:
if constants.DISCUSSION_STICKY in cleaned['thread_type']:
discussion_f &= F(post_is_sticky=1)
if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
discussion_f &= F(post_is_locked=1)
valid_forum_ids = [f.id for f in Forum.authorized_forums_for_user(request.user)]
forum_ids = None
if cleaned['forum']:
forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids]
# If we removed all the forums they wanted to look at or if
# they didn't specify, then we filter on the list of all
# forums they're authorized to look at.
if not forum_ids:
forum_ids = valid_forum_ids
discussion_f &= F(post_forum_id__in=forum_ids)
# Apply created and updated filters
for filter_name, filter_option, filter_date in interval_filters:
if filter_option == constants.INTERVAL_BEFORE:
before = {filter_name + '__gte': 0,
filter_name + '__lte': max(filter_date, 0)}
discussion_f &= F(**before)
elif filter_option == constants.INTERVAL_AFTER:
after = {filter_name + '__gte': min(filter_date, unix_now),
filter_name + '__lte': unix_now}
discussion_f &= F(**after)
doctypes.append(ThreadMappingType.get_mapping_type_name())
final_filter |= discussion_f
# End - discussion forum configuration
# Done with all the filtery stuff--time to generate results
searcher = searcher.doctypes(*doctypes)
searcher = searcher.filter(final_filter)
if 'explain' in request.GET and request.GET['explain'] == '1':
searcher = searcher.explain()
cleaned_q = cleaned['q']
# Set up the highlights. Show the entire field highlighted.
searcher = searcher.highlight(
'question_content', # support forum
'document_summary', # kb
'post_content', # contributor forum
pre_tags=['<b>'],
post_tags=['</b>'],
number_of_fragments=0)
searcher = apply_boosts(searcher)
# Build the query
if cleaned_q:
query_fields = chain(*[
cls.get_query_fields() for cls in [
DocumentMappingType,
ThreadMappingType,
QuestionMappingType
]
])
query = {}
# Create a simple_query_search query for every field we want to search.
for field in query_fields:
query['%s__sqs' % field] = cleaned_q
# Transform the query to use locale aware analyzers.
query = es_utils.es_query_with_analyzer(query, language)
searcher = searcher.query(should=True, **query)
searcher = searcher[:settings.SEARCH_MAX_RESULTS]
# 5. Generate output
pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)
if pages.paginator.count == 0:
# If we know there aren't any results, show fallback_results.
fallback_results = _fallback_results(language, cleaned['product'])
results = []
else:
fallback_results = None
results = build_results_list(pages, request.IS_JSON)
items = [(k, v) for k in search_form.fields for
v in r.getlist(k) if v and k != 'a']
items.append(('a', '2'))
product = Product.objects.filter(slug__in=cleaned['product'])
if product:
product_titles = [_(p.title, 'DB: products.Product.title') for p in product]
else:
product_titles = [_('All Products')]
# FIXME: This is probably bad l10n.
product_titles = ', '.join(product_titles)
data = {
'num_results': pages.paginator.count,
'results': results,
'fallback_results': fallback_results,
'product_titles': product_titles,
'q': cleaned['q'],
'w': cleaned['w'],
'lang_name': lang_name,
'advanced': True,
'products': Product.objects.filter(visible=True)
}
if request.IS_JSON:
data['total'] = len(data['results'])
data['products'] = [{'slug': p.slug, 'title': p.title}
for p in data['products']]
if product:
data['product'] = product[0].slug
pages = Paginator(pages)
data['pagination'] = dict(
number=pages.pager.number,
num_pages=pages.pager.paginator.num_pages,
has_next=pages.pager.has_next(),
has_previous=pages.pager.has_previous(),
max=pages.max,
span=pages.span,
dotted_upper=pages.pager.dotted_upper,
dotted_lower=pages.pager.dotted_lower,
page_range=pages.pager.page_range,
url=pages.pager.url,
)
if not results:
data['message'] = _('No pages matched the search criteria')
json_data = json.dumps(data)
if request.JSON_CALLBACK:
json_data = request.JSON_CALLBACK + '(' + json_data + ');'
return HttpResponse(json_data, content_type=request.CONTENT_TYPE)
data.update({
'product': product,
'pages': pages,
'search_form': search_form
})
resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
max_age=3600, secure=False, httponly=False)
return resp
@cache_page(60 * 15) # 15 minutes.
def opensearch_suggestions(request):
"""A simple search view that returns OpenSearch suggestions."""
content_type = 'application/x-suggestions+json'
search_form = SimpleSearchForm(request.GET, auto_id=False)
if not search_form.is_valid():
return HttpResponseBadRequest(content_type=content_type)
cleaned = search_form.cleaned_data
language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
searcher = generate_simple_search(search_form, language, with_highlights=False)
searcher = searcher.values_dict('document_title', 'question_title', 'url')
results = searcher[:10]
def urlize(r):
return u'%s://%s%s' % (
'https' if request.is_secure() else 'http',
request.get_host(),
r['url'][0]
)
def titleize(r):
# NB: Elasticsearch returns an array of strings as the value, so we mimic that and
# then pull out the first (and only) string.
return r.get('document_title', r.get('question_title', [_('No title')]))[0]
try:
data = [
cleaned['q'],
[titleize(r) for r in results],
[],
[urlize(r) for r in results]
]
except ES_EXCEPTIONS:
# If we have Elasticsearch problems, we just send back an empty set of results.
data = []
return HttpResponse(json.dumps(data), content_type=content_type)
@cache_page(60 * 60 * 168) # 1 week.
def opensearch_plugin(request):
"""Render an OpenSearch Plugin."""
host = u'%s://%s' % ('https' if request.is_secure() else 'http', request.get_host())
return render(
request, 'search/plugin.html', {
'host': host,
'locale': request.LANGUAGE_CODE
},
content_type='application/opensearchdescription+xml'
)
def _ternary_filter(ternary_value):
"""Return a search query given a TERNARY_YES or TERNARY_NO.
Behavior for TERNARY_OFF is undefined.
"""
return ternary_value == constants.TERNARY_YES
def _build_es_excerpt(result, first_only=False):
"""Return concatenated search excerpts.
:arg result: The result object from the queryset results
:arg first_only: True if we should show only the first bit, False
if we should show all bits
"""
bits = [m.strip() for m in chain(*result.es_meta.highlight.values())]
if first_only and bits:
excerpt = bits[0]
else:
excerpt = EXCERPT_JOINER.join(bits)
return jinja2.Markup(clean_excerpt(excerpt))
def _fallback_results(locale, product_slugs):
"""Return the top 20 articles by votes for the given product(s)."""
products = []
for slug in product_slugs:
try:
p = Product.objects.get(slug=slug)
products.append(p)
except Product.DoesNotExist:
pass
docs, fallback = documents_for(locale, products=products)
docs = docs + (fallback or [])
return docs[:20]
|
|
"""Support for interfacing to iTunes API."""
import requests
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "iTunes"
DEFAULT_PORT = 8181
DEFAULT_SSL = False
DEFAULT_TIMEOUT = 10
DOMAIN = "itunes"
SUPPORT_ITUNES = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_TURN_OFF
| SUPPORT_SHUFFLE_SET
)
SUPPORT_AIRPLAY = SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
class Itunes:
"""The iTunes API client."""
def __init__(self, host, port, use_ssl):
"""Initialize the iTunes device."""
self.host = host
self.port = port
self.use_ssl = use_ssl
@property
def _base_url(self):
"""Return the base URL for endpoints."""
if self.use_ssl:
uri_scheme = "https://"
else:
uri_scheme = "http://"
if self.port:
return f"{uri_scheme}{self.host}:{self.port}"
return f"{uri_scheme}{self.host}"
def _request(self, method, path, params=None):
"""Make the actual request and return the parsed response."""
url = f"{self._base_url}{path}"
try:
if method == "GET":
response = requests.get(url, timeout=DEFAULT_TIMEOUT)
elif method == "POST":
response = requests.put(url, params, timeout=DEFAULT_TIMEOUT)
elif method == "PUT":
response = requests.put(url, params, timeout=DEFAULT_TIMEOUT)
elif method == "DELETE":
response = requests.delete(url, timeout=DEFAULT_TIMEOUT)
return response.json()
except requests.exceptions.HTTPError:
return {"player_state": "error"}
except requests.exceptions.RequestException:
return {"player_state": "offline"}
def _command(self, named_command):
"""Make a request for a controlling command."""
return self._request("PUT", f"/{named_command}")
def now_playing(self):
"""Return the current state."""
return self._request("GET", "/now_playing")
def set_volume(self, level):
"""Set the volume and returns the current state, level 0-100."""
return self._request("PUT", "/volume", {"level": level})
def set_muted(self, muted):
"""Mute and returns the current state, muted True or False."""
return self._request("PUT", "/mute", {"muted": muted})
def set_shuffle(self, shuffle):
"""Set the shuffle mode, shuffle True or False."""
return self._request(
"PUT", "/shuffle", {"mode": ("songs" if shuffle else "off")}
)
def play(self):
"""Set playback to play and returns the current state."""
return self._command("play")
def pause(self):
"""Set playback to paused and returns the current state."""
return self._command("pause")
def next(self):
"""Skip to the next track and returns the current state."""
return self._command("next")
def previous(self):
"""Skip back and returns the current state."""
return self._command("previous")
def stop(self):
"""Stop playback and return the current state."""
return self._command("stop")
def play_playlist(self, playlist_id_or_name):
"""Set a playlist to be current and returns the current state."""
response = self._request("GET", "/playlists")
playlists = response.get("playlists", [])
found_playlists = [
playlist
for playlist in playlists
if (playlist_id_or_name in [playlist["name"], playlist["id"]])
]
if found_playlists:
playlist = found_playlists[0]
path = f"/playlists/{playlist['id']}/play"
return self._request("PUT", path)
def artwork_url(self):
"""Return a URL of the current track's album art."""
return f"{self._base_url}/artwork"
def airplay_devices(self):
"""Return a list of AirPlay devices."""
return self._request("GET", "/airplay_devices")
def airplay_device(self, device_id):
"""Return an AirPlay device."""
return self._request("GET", f"/airplay_devices/{device_id}")
def toggle_airplay_device(self, device_id, toggle):
"""Toggle airplay device on or off, id, toggle True or False."""
command = "on" if toggle else "off"
path = f"/airplay_devices/{device_id}/{command}"
return self._request("PUT", path)
def set_volume_airplay_device(self, device_id, level):
"""Set volume, returns current state of device, id,level 0-100."""
path = f"/airplay_devices/{device_id}/volume"
return self._request("PUT", path, {"level": level})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iTunes platform."""
add_entities(
[
ItunesDevice(
config.get(CONF_NAME),
config.get(CONF_HOST),
config.get(CONF_PORT),
config[CONF_SSL],
add_entities,
)
]
)
class ItunesDevice(MediaPlayerEntity):
"""Representation of an iTunes API instance."""
def __init__(self, name, host, port, use_ssl, add_entities):
"""Initialize the iTunes device."""
self._name = name
self._host = host
self._port = port
self._use_ssl = use_ssl
self._add_entities = add_entities
self.client = Itunes(self._host, self._port, self._use_ssl)
self.current_volume = None
self.muted = None
self.shuffled = None
self.current_title = None
self.current_album = None
self.current_artist = None
self.current_playlist = None
self.content_id = None
self.player_state = None
self.airplay_devices = {}
self.update()
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
self.player_state = state_hash.get("player_state", None)
self.current_volume = state_hash.get("volume", 0)
self.muted = state_hash.get("muted", None)
self.current_title = state_hash.get("name", None)
self.current_album = state_hash.get("album", None)
self.current_artist = state_hash.get("artist", None)
self.current_playlist = state_hash.get("playlist", None)
self.content_id = state_hash.get("id", None)
_shuffle = state_hash.get("shuffle", None)
self.shuffled = _shuffle == "songs"
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self.player_state == "offline" or self.player_state is None:
return "offline"
if self.player_state == "error":
return "error"
if self.player_state == "stopped":
return STATE_IDLE
if self.player_state == "paused":
return STATE_PAUSED
return STATE_PLAYING
def update(self):
"""Retrieve latest state."""
now_playing = self.client.now_playing()
self.update_state(now_playing)
found_devices = self.client.airplay_devices()
found_devices = found_devices.get("airplay_devices", [])
new_devices = []
for device_data in found_devices:
device_id = device_data.get("id")
if self.airplay_devices.get(device_id):
# update it
airplay_device = self.airplay_devices.get(device_id)
airplay_device.update_state(device_data)
else:
# add it
airplay_device = AirPlayDevice(device_id, self.client)
airplay_device.update_state(device_data)
self.airplay_devices[device_id] = airplay_device
new_devices.append(airplay_device)
if new_devices:
self._add_entities(new_devices)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.current_volume / 100.0
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_image_url(self):
"""Image url of current playing media."""
if (
self.player_state in (STATE_PLAYING, STATE_IDLE, STATE_PAUSED)
and self.current_title is not None
):
return f"{self.client.artwork_url()}?id={self.content_id}"
return (
"https://cloud.githubusercontent.com/assets/260/9829355"
"/33fab972-58cf-11e5-8ea2-2ca74bdaae40.png"
)
@property
def media_title(self):
"""Title of current playing media."""
return self.current_title
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.current_artist
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
return self.current_album
@property
def media_playlist(self):
"""Title of the currently playing playlist."""
return self.current_playlist
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self.shuffled
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ITUNES
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
response = self.client.set_volume(int(volume * 100))
self.update_state(response)
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
response = self.client.set_muted(mute)
self.update_state(response)
def set_shuffle(self, shuffle):
"""Shuffle (true) or no shuffle (false) media player."""
response = self.client.set_shuffle(shuffle)
self.update_state(response)
def media_play(self):
"""Send media_play command to media player."""
response = self.client.play()
self.update_state(response)
def media_pause(self):
"""Send media_pause command to media player."""
response = self.client.pause()
self.update_state(response)
def media_next_track(self):
"""Send media_next command to media player."""
response = self.client.next() # pylint: disable=not-callable
self.update_state(response)
def media_previous_track(self):
"""Send media_previous command media player."""
response = self.client.previous()
self.update_state(response)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if media_type == MEDIA_TYPE_PLAYLIST:
response = self.client.play_playlist(media_id)
self.update_state(response)
def turn_off(self):
"""Turn the media player off."""
response = self.client.stop()
self.update_state(response)
class AirPlayDevice(MediaPlayerEntity):
"""Representation an AirPlay device via an iTunes API instance."""
def __init__(self, device_id, client):
"""Initialize the AirPlay device."""
self._id = device_id
self.client = client
self.device_name = "AirPlay"
self.kind = None
self.active = False
self.selected = False
self.volume = 0
self.supports_audio = False
self.supports_video = False
self.player_state = None
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
if "player_state" in state_hash:
self.player_state = state_hash.get("player_state", None)
if "name" in state_hash:
name = state_hash.get("name", "")
self.device_name = f"{name} AirTunes Speaker".strip()
if "kind" in state_hash:
self.kind = state_hash.get("kind", None)
if "active" in state_hash:
self.active = state_hash.get("active", None)
if "selected" in state_hash:
self.selected = state_hash.get("selected", None)
if "sound_volume" in state_hash:
self.volume = state_hash.get("sound_volume", 0)
if "supports_audio" in state_hash:
self.supports_audio = state_hash.get("supports_audio", None)
if "supports_video" in state_hash:
self.supports_video = state_hash.get("supports_video", None)
@property
def name(self):
"""Return the name of the device."""
return self.device_name
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.selected is True:
return "mdi:volume-high"
return "mdi:volume-off"
@property
def state(self):
"""Return the state of the device."""
if self.selected is True:
return STATE_ON
return STATE_OFF
def update(self):
"""Retrieve latest state."""
@property
def volume_level(self):
"""Return the volume."""
return float(self.volume) / 100.0
@property
def media_content_type(self):
"""Flag of media content that is supported."""
return MEDIA_TYPE_MUSIC
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_AIRPLAY
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume = int(volume * 100)
response = self.client.set_volume_airplay_device(self._id, volume)
self.update_state(response)
def turn_on(self):
"""Select AirPlay."""
self.update_state({"selected": True})
self.schedule_update_ha_state()
response = self.client.toggle_airplay_device(self._id, True)
self.update_state(response)
def turn_off(self):
"""Deselect AirPlay."""
self.update_state({"selected": False})
self.schedule_update_ha_state()
response = self.client.toggle_airplay_device(self._id, False)
self.update_state(response)
|
|
import const
from json_document import ResultSet
from util import prettify
class SearchQuery(object):
"""
Container class for constructing and executing ElasticSearch queries for
some SeachModel class.
Each SearchQuery instance may contain a number of query expressions
(filters, queries, facet fields, sorting expressions and limit/offset
preferences).
Query expressions may be chained, e.g.:
query = Model.query()
query.filter( field1 == value1 ).filter( field2 == value2).limit(10)
Each added query expression generates a new SearchQuery instance, so base
queries may be reused for different purposes, e.g.:
query = Model.query.filter( field1 == value1)
query_foo = query.match(field2=="foo") #filter field1, match field2
query_bar = query.match(field3.like("bar")) #filter field1, match field3
"""
def __init__(self, search_model_class, must_queries=None,
must_not_queries=None, should_queries=None, and_filters=None,
or_filters=None, facet_queries=None, limit=None, offset=0,
page_size=None, sort=None):
self.search_model_class = search_model_class
self.must_queries = must_queries or []
self.must_not_queries = must_not_queries or []
self.should_queries = should_queries or []
self.and_filters = and_filters or []
self.or_filters = or_filters or []
self.facet_queries = facet_queries or []
self._offset = offset
self._limit = limit
self._page_size = page_size
self.sort = sort or []
def _generate_subquery(self, must_queries=None, must_not_queries=None,
should_queries=None, and_filters=None, or_filters=None,
facet_queries=None, limit=None, offset=None, page_size=None,
sort=None):
"""
Creates a new query object based on this one, with extra arguments
appended (or overriden, in the case of limit, offset, page_size, sort).
"""
# Additive fields
must_queries = self.must_queries + (must_queries or [])
must_not_queries = self.must_not_queries + (must_not_queries or [])
should_queries = self.should_queries + (should_queries or [])
and_filters = self.and_filters + (and_filters or [])
or_filters = self.or_filters + (or_filters or [])
facet_queries = self.facet_queries + (facet_queries or [])
sort = self.sort + (sort or [])
# Last added takes precedence
limit = limit if limit else self._limit
offset = offset if offset else self._offset
page_size = page_size if page_size else self._page_size
return self.__class__(self.search_model_class, must_queries,
must_not_queries, should_queries, and_filters, or_filters,
facet_queries, limit, offset, page_size, sort)
def _generate_es_query(self, count_query=False):
"""
Create ES query dictionary from stored filters, query expressions and
sort expressions.
Note that this dictionary will NOT include limit, offset or any other
"search api" related settings.
:param count_query: if True, do not include facet/sort parameters.
"""
es_dict = {}
query_arguments = {}
filter_arguments = {}
if self.must_queries or self.should_queries or self.must_not_queries:
match_query = {}
if self.must_queries:
match_query[const.MUST] = self.must_queries
if self.should_queries:
match_query[const.SHOULD] = self.should_queries
if self.must_not_queries:
match_query[const.MUST_NOT] = self.must_not_queries
query_arguments[const.BOOL] = match_query
if self.and_filters or self.or_filters:
if len(self.and_filters):
filter_arguments = { const.AND: self.and_filters }
if len(self.or_filters):
filter_arguments = { const.OR: self.or_filters }
if query_arguments and filter_arguments:
es_dict[const.FILTERED] = { const.QUERY: query_arguments }
es_dict[const.FILTERED][const.FILTER] = filter_arguments
else:
if query_arguments:
es_dict.update(query_arguments)
elif filter_arguments:
es_dict[const.FILTERED] = { const.FILTER : filter_arguments }
else:
es_dict[const.MATCH_ALL] = {}
if count_query:
return es_dict
es_dict = { const.QUERY: es_dict }
if self.facet_queries:
facets = {}
for field in self.facet_queries:
facets.update(field)
es_dict[const.FACETS] = facets
if self.sort:
sort = self.sort
else:
sort = [{const.ID: { const.ORDER: const.ASC }}]
es_dict[const.SORT] = sort
# print 'es_dict', es_dict
return es_dict
"""
Sort / limit preferences.
"""
def offset(self, amount):
"""
Set the result offset (in number of documents from start).
"""
return self._generate_subquery(offset=amount)
def limit(self, amount):
"""
Set the maximum number of documents returned by this query.
"""
return self._generate_subquery(limit=amount)
def order_by(self, sort_expr):
"""
Add a sort order; sorting precedent is sequential, with each subsequent
sort breaking ties from the previous.
"""
return self._generate_subquery(sort=sort_expr)
def page_size(self, amount):
"""
Set the number of documents to return with each network request.
"""
return self._generate_subquery(page_size=amount)
"""
Filtering: The presence of one of these expressions will generate a
"filtered" query on execution.
"""
def filter_and(self, query_expression):
"""
Add AND filter expression.
"""
return self._generate_subquery(and_filters=[query_expression])
def filter_or(self, query_expression):
"""
Add OR filter expression.
"""
return self._generate_subquery(or_filters=[query_expression])
def filter(self, query_expression):
"""
Alias for filter_and
"""
return self.filter_and(query_expression)
"""
Matching: The presence of on of these expressions will generate a "bool"
query on execution.
"""
def match(self, query_expression):
"""
Alias for must_match.
"""
return self.must_match(query_expression)
def must_match(self, query_expression):
"""
Add query expression that documents MUST match.
"""
return self._generate_subquery(must_queries=[query_expression])
def should_match(self, query_expression):
"""
Add query expression that documents SHOULD match (mostly effects
document scoring).
"""
return self._generate_subquery(should_queries=[query_expression])
def must_not_match(self, query_expression):
"""
Add query expression that documents MUST NOT match.
"""
return self._generate_subquery(must_not_queries=[query_expression])
def _add_facet(self, facet_type, search_field, facet_name=None,
facet_filters=None):
if not facet_name:
facet_name = search_field.hierarchy
q = { facet_name: {
facet_type: { const.FIELD: search_field.hierarchy }
}}
if facet_filters:
facet_filter_dict = {}
for facet_filter in facet_filters:
facet_filter_dict.update(facet_filter)
q[facet_name][const.FACET_FILTER] = facet_filter_dict
return q
"""Faceting."""
def term_facet(self, search_field, facet_name=None, facet_filters=None,
facet_size=None):
"""
Add a term facet.
:param search_field: SearchField to count distinct values for
:param facet_name: string, optional name for the facet result
(will default to the full name of the search field)
:param facet_filters: list of QueryExpressions to filter facet results
"""
q = self._add_facet(const.TERMS, search_field,
facet_name=facet_name, facet_filters=facet_filters)
return self._generate_subquery(facet_queries=[q])
"""Query execution."""
def count(self, **request_params):
"""
Fetch the number of matching documents with the ES count API. See ES
documentation for supported request_params.
"""
es_query = self._generate_es_query(count_query=True)
return self.search_model_class.count(es_query, **request_params)
def all(self):
"""
Fetch all documents for given query and return a tuple containing
(total document count, document sources).
Queries will be executed one <page_size> at a time, until there are no
more documents, or a specific <limit> has been reached.
"""
es_query = self._generate_es_query()
page_size = self._page_size or const.DEFAULT_PAGE_SIZE
es_query[const.SIZE] = page_size
offset = 0
if self._offset:
offset = self._offset * page_size
es_query[const.FROM] = offset
start = 0
done = False
unique_ids = set()
result_set = ResultSet()
while True:
es_query[const.FROM] = start * page_size
results = self.search_model_class.search(es_query, return_raw=False)
total = results.total
if result_set.total is None:
result_set.total = total
for document in results.documents:
if document._id not in unique_ids:
unique_ids.add(document._id)
result_set.documents.append(document)
if self._limit:
if len(unique_ids) >= self._limit:
done = True
break
if results.facets and not result_set.facets:
result_set.facets = results.facets
if done:
break
if len(results.documents) == 0:
break
if total <= (page_size * (start+1)):
break
start += 1
return result_set
def delete(cls, **request_params):
"""
Delete all documents that match this query.
"""
es_query = self._generate_es_query()
return cls.search_model_class.delete_by_query(doc_type, es_query,
**request_params)
def __repr__(self):
return "SearchQuery:[\n%s\n]" % self._generate_es_query()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return repr(self)
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.optimizer import Optimizer
from paddle.fluid import core, framework, layers
from paddle.fluid.framework import Program, Variable
from paddle.fluid.layer_helper import LayerHelper
import paddle
import numpy as np
from paddle.fluid.dygraph import base as imperative_base
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
from paddle import _C_ops
__all__ = []
class ModelAverage(Optimizer):
r"""
The ModelAverage optimizer accumulates specific continuous historical
parameters during training. The accumulated historical range can be controlled
by the passed ``average_window_rate`` argument. The averaged ``Parameter`` are
used in the prediction, which usually can improve the accuracy of the prediction.
Accumulate the average of the ``Parameter`` in the sliding window, the result will be saved
in a temporary variable, can be applied to the current model's ``Parameter`` by calling
the ``apply()`` method, and the current model ``Parameter`` can be restored by calling
the ``restore()`` method.
The window size for calculating the average is determined by ``average_window_rate``,
``min_average_window``, ``max_average_window`` and the current ``Parameter`` update times (num_updates).
When the cumulative times (num_accumulates) is greater than the specific window
threshold (average_window), the accumulated ``Parameter`` temporary variable is set to 0.0.
The following example will help to understand the role of these arguments:
::
if num_accumulates >= min_average_window and num_accumulates >= min(max_average_window, num_updates * average_window_rate):
num_accumulates = 0
In the above conditional judgment statement, ``num_accumulates`` indicates the current
accumulated number, which can be abstractly understood as the length of the cumulative window.
The length of the window must be at least the length set by the ``min_average_window`` argument,
and cannot exceed the length specified by the ``max_average_window`` argument or
``num_updates * average_window_rate``, where ``num_updates`` indicates the current ``Parameter``
update times, ``average_window_rate`` is a coefficient that calculates the length of the window.
Args:
average_window_rate (float): The calculate ratio of the window length relative to ``Parameter`` update times.
parameters (list, optional): List of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. \
The default value is None in static mode, at this time all parameters will be updated.
min_average_window (int, optional): the minimum size of average window length. The default value is 10000.
max_average_window (int, optional): The maximum size of average window length. The default value is 10000.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
self.bias = self._linear.bias
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt, model_average):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
model_average.step()
opt.clear_grad()
model_average.clear_grad()
print("Train Epoch {} batch {}: loss = {}, bias = {}".format(
epoch_id, batch_id, np.mean(loss.numpy()), layer.bias.numpy()))
def evaluate(layer, loader, loss_fn):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
print("Evaluate batch {}: loss = {}, bias = {}".format(
batch_id, np.mean(loss.numpy()), layer.bias.numpy()))
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = opt.Momentum(learning_rate=0.2, momentum=0.1, parameters=layer.parameters())
model_average = paddle.incubate.ModelAverage(0.15,
parameters=layer.parameters(),
min_average_window=2,
max_average_window=10)
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# create data loader
eval_loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=1)
# train
train(layer, loader, loss_fn, optimizer, model_average)
print("\nEvaluate With ModelAverage")
with model_average.apply(need_restore=False):
evaluate(layer, eval_loader, loss_fn)
print("\nEvaluate With Restored Paramters")
model_average.restore()
evaluate(layer, eval_loader, loss_fn)
"""
def __init__(self,
average_window_rate,
parameters=None,
min_average_window=10000,
max_average_window=10000,
name=None):
super(ModelAverage, self).__init__(
learning_rate=0.0,
parameters=parameters,
weight_decay=None,
grad_clip=None,
name=name)
self.helper = LayerHelper(self.__class__.__name__)
self.average_window = average_window_rate
self.min_average_window = min_average_window
self.max_average_window = max_average_window
self.type = "average_accumulates"
if not framework.in_dygraph_mode():
global_block = framework.default_main_program().global_block()
all_parameters = parameters if parameters else global_block.all_parameters(
)
self._create_accumulators(global_block, all_parameters)
for param in all_parameters:
self._append_optimize_op(global_block, [param, None])
self.apply_program = Program()
block = self.apply_program.global_block()
with framework.program_guard(main_program=self.apply_program):
for param in all_parameters:
self._add_average_apply_op(block, param)
self.restore_program = Program()
block = self.restore_program.global_block()
with framework.program_guard(main_program=self.restore_program):
for param in all_parameters:
self._add_average_restore_op(block, param)
def _create_accumulators(self, block, parameters):
assert isinstance(block, framework.Block)
for param in parameters:
self._add_accumulator('sum_1', param)
self._add_accumulator('sum_2', param)
self._add_accumulator('sum_3', param)
self._add_accumulator('restore', param)
self._add_accumulator(
'num_accumulates', param, dtype='int64', shape=[1])
self._add_accumulator(
'old_num_accumulates', param, dtype='int64', shape=[1])
self._add_accumulator(
'num_updates', param, dtype='int64', shape=[1])
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
sum_1 = self._get_accumulator('sum_1', param_and_grad[0])
sum_2 = self._get_accumulator('sum_2', param_and_grad[0])
sum_3 = self._get_accumulator('sum_3', param_and_grad[0])
num_accumulates = self._get_accumulator('num_accumulates',
param_and_grad[0])
old_num_accumulates = self._get_accumulator('old_num_accumulates',
param_and_grad[0])
num_updates = self._get_accumulator('num_updates', param_and_grad[0])
if framework.in_dygraph_mode():
_, _, _, _, _, _ = _C_ops.average_accumulates(
param_and_grad[0], sum_1, sum_2, sum_3, num_accumulates,
old_num_accumulates, num_updates, sum_1, sum_2, sum_3,
num_accumulates, old_num_accumulates, num_updates,
'average_window', self.average_window, 'min_average_window',
self.min_average_window, 'max_average_window',
self.max_average_window)
return None
block = framework.default_main_program().global_block()
attrs = {
"average_window": self.average_window,
"min_average_window": self.min_average_window,
"max_average_window": self.max_average_window,
}
inputs = {
"param": param_and_grad[0],
"in_sum_1": sum_1,
"in_sum_2": sum_2,
"in_sum_3": sum_3,
"in_num_accumulates": num_accumulates,
"in_old_num_accumulates": old_num_accumulates,
"in_num_updates": num_updates
}
outputs = {
"out_sum_1": sum_1,
"out_sum_2": sum_2,
"out_sum_3": sum_3,
"out_num_accumulates": num_accumulates,
"out_old_num_accumulates": old_num_accumulates,
"out_num_updates": num_updates,
}
average_accumulates_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return average_accumulates_op
@imperative_base.no_grad
def minimize(self,
loss,
startup_program=None,
parameters=None,
no_grad_set=None):
"""
Add operations to minimize ``loss`` by updating ``parameters``.
Args:
loss (Tensor): A ``Tensor`` containing the value to minimize.
startup_program (Program, optional): :ref:`api_fluid_Program` for
initializing parameters in ``parameters``. The default value
is None, at this time :ref:`api_fluid_default_startup_program` will be used.
parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update
to minimize ``loss``. The default value is None, at this time all parameters
will be updated.
no_grad_set (set, optional): Set of ``Tensor`` or ``Tensor.name`` that don't need
to be updated. The default value is None.
Returns:
tuple: tuple (optimize_ops, params_grads), A list of operators appended
by minimize and a list of (param, grad) tensor pairs, param is
``Parameter``, grad is the gradient value corresponding to the parameter.
In static graph mode, the returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
indicate program pruning. If so, the program will be pruned by ``feed`` and
``fetch_list`` before run, see details in ``Executor``.
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
loss.backward()
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
sgd.minimize(loss)
modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(),
min_average_window=2,
max_average_window=4)
modelaverage.minimize(loss)
sgd.clear_grad()
modelaverage.clear_grad()
"""
if framework.in_dygraph_mode():
self.step()
@framework.dygraph_only
@imperative_base.no_grad
def step(self):
"""
Execute the optimizer and update parameters once.
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(),
min_average_window=2,
max_average_window=4)
loss.backward()
sgd.step()
modelaverage.step()
sgd.clear_grad()
modelaverage.clear_grad()
"""
params_grads = []
for param in self._parameter_list:
if not param.trainable:
continue
if param._grad_ivar() is not None:
grad_var = param._grad_ivar()
params_grads.append((param, grad_var))
block = framework.default_main_program().global_block()
self._create_accumulators(block, self._parameter_list)
for param_and_grad in params_grads:
self._append_optimize_op(block, param_and_grad)
@signature_safe_contextmanager
@imperative_base.no_grad
def apply(self, executor=None, need_restore=True):
"""
Apply the average of the cumulative ``Parameter`` to the parameters of the current model.
Args:
executor(Executor): The network executor in static-graph mode. The default value is None in dygraph mode.
need_restore(bool): Restore flag variable, if set to True, the network will restore
the parameters of the network to the default value, if set to False,
it will not be restored. The default value is True.
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
loss.backward()
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(),
min_average_window=2,
max_average_window=4)
sgd.step()
modelaverage.step()
with modelaverage.apply():
for param in linear.parameters():
print(param)
for param in linear.parameters():
print(param)
"""
if framework.in_dygraph_mode():
for param in self._parameter_list:
num_accumulates = self._get_accumulator('num_accumulates',
param)
old_num_accumulates = self._get_accumulator(
'old_num_accumulates', param)
sum_1 = self._get_accumulator('sum_1', param)
sum_2 = self._get_accumulator('sum_2', param)
sum_3 = self._get_accumulator('sum_3', param)
param_restore = self._get_accumulator('restore', param)
paddle.assign(param, param_restore)
total_param = sum_1 + sum_2 + sum_3
total_accumulates = num_accumulates + old_num_accumulates
total_param = paddle.cast(total_param, dtype='float32')
total_accumulates = paddle.cast(
total_accumulates, dtype='float32')
average_param = total_param / total_accumulates
paddle.assign(average_param, param)
try:
yield
finally:
if need_restore:
self.restore()
return
if executor is None:
raise RuntimeError(
"Executor should not be None in static graph mode.")
executor.run(self.apply_program)
try:
yield
finally:
if need_restore:
self.restore(executor)
@imperative_base.no_grad
def restore(self, executor=None):
"""
Restore ``Parameter`` values of current model.
Args:
executor(Executor): The network executor in static-graph mode. The default value is None in dygraph mode
Examples:
.. code-block:: python
import paddle
import numpy as np
inp = paddle.to_tensor(np.random.random([1, 10]).astype('float32'))
linear = paddle.nn.Linear(10, 1)
out = linear(inp)
loss = paddle.mean(out)
loss.backward()
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(),
min_average_window=2,
max_average_window=4)
sgd.step()
modelaverage.step()
with modelaverage.apply(need_restore=False):
for param in linear.parameters():
print(param)
for param in linear.parameters():
print(param)
modelaverage.restore()
for param in linear.parameters():
print(param)
"""
if framework.in_dygraph_mode():
for param in self._parameter_list:
param_restore = self._get_accumulator('restore', param)
paddle.assign(param_restore, param)
return
if executor is None:
raise RuntimeError(
"Executor should not be None in static graph mode.")
executor.run(self.restore_program)
def _add_average_apply_op(self, block, param):
param = block._clone_variable(param)
grad = block._clone_variable(self._get_accumulator('restore', param))
sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))
sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))
sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))
num_accumulates = block._clone_variable(
self._get_accumulator('num_accumulates', param))
old_num_accumulates = block._clone_variable(
self._get_accumulator('old_num_accumulates', param))
# backup param value to grad
layers.assign(input=param, output=grad)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3])
tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype is None else self._dtype)
sum = layers.cast(
x=sum, dtype='float32' if self._dtype is None else self._dtype)
layers.ops._elementwise_div(x=sum, y=tmp, out=param)
def _add_average_restore_op(self, block, param):
param = block._clone_variable(param)
grad = block._clone_variable(self._get_accumulator('restore', param))
layers.assign(input=grad, output=param)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import shlex
import re
import os
try:
import simplejson as json
except Exception:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.common.base import KeyCertificateConnection
from libcloud.common.types import InvalidCredsError
from libcloud.container.base import (Container, ContainerDriver,
ContainerImage)
from libcloud.container.providers import Provider
from libcloud.container.types import ContainerState
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
class DockerResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
# error responses are tricky in Docker. Eg response could be
# an error, but response status could still be 200
content_type = self.headers.get('content-type', 'application/json')
if content_type == 'application/json' or content_type == '':
if self.headers.get('transfer-encoding') == 'chunked' and \
'fromImage' in self.request.url:
body = [json.loads(chunk) for chunk in
self.body.strip().replace('\r', '').split('\n')]
else:
body = json.loads(self.body)
else:
body = self.body
except ValueError:
m = re.search('Error: (.+?)"', self.body)
if m:
error_msg = m.group(1)
raise Exception(error_msg)
else:
raise Exception(
'ConnectionError: Failed to parse JSON response')
return body
def parse_error(self):
if self.status == 401:
raise InvalidCredsError('Invalid credentials')
return self.body
def success(self):
return self.status in self.valid_response_codes
class DockerException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "%s %s" % (self.code, self.message)
def __repr__(self):
return "DockerException %s %s" % (self.code, self.message)
class DockerConnection(ConnectionUserAndKey):
responseCls = DockerResponse
timeout = 60
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
If user and password are specified, include a base http auth
header
"""
headers['Content-Type'] = 'application/json'
if self.user_id and self.key:
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class DockertlsConnection(KeyCertificateConnection):
responseCls = DockerResponse
def __init__(self, key, secret, secure=True,
host='localhost',
port=4243, key_file='', cert_file='', **kwargs):
super(DockertlsConnection, self).__init__(key_file=key_file,
cert_file=cert_file,
secure=secure, host=host,
port=port, url=None,
proxy_url=None,
timeout=None, backoff=None,
retry_delay=None)
if key_file:
keypath = os.path.expanduser(key_file)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise InvalidCredsError(
'You need an key PEM file to authenticate with '
'Docker tls. This can be found in the server.'
)
self.key_file = key_file
certpath = os.path.expanduser(cert_file)
is_file_path = os.path.exists(
certpath) and os.path.isfile(certpath)
if not is_file_path:
raise InvalidCredsError(
'You need an certificate PEM file to authenticate with '
'Docker tls. This can be found in the server.'
)
self.cert_file = cert_file
def add_default_headers(self, headers):
headers['Content-Type'] = 'application/json'
return headers
class DockerContainerDriver(ContainerDriver):
"""
Docker container driver class.
>>> from libcloud.container.providers import get_driver
>>> driver = get_driver('docker')
>>> conn = driver(host='198.61.239.128', port=4243)
>>> conn.list_containers()
or connecting to http basic auth protected https host:
>>> conn = driver('user', 'pass', host='https://198.61.239.128', port=443)
connect with tls authentication, by providing a hostname, port, a private
key file (.pem) and certificate (.pem) file
>>> conn = driver(host='https://198.61.239.128',
>>> port=4243, key_file='key.pem', cert_file='cert.pem')
"""
type = Provider.DOCKER
name = 'Docker'
website = 'http://docker.io'
connectionCls = DockerConnection
supports_clusters = False
version = '1.24'
def __init__(self, key='', secret='', secure=False, host='localhost',
port=4243, key_file=None, cert_file=None):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:param key_file: Path to private key for TLS connection (optional)
:type key_file: ``str``
:param cert_file: Path to public key for TLS connection (optional)
:type cert_file: ``str``
:return: ``None``
"""
if key_file:
self.connectionCls = DockertlsConnection
self.key_file = key_file
self.cert_file = cert_file
secure = True
if host.startswith('https://'):
secure = True
# strip the prefix
prefixes = ['http://', 'https://']
for prefix in prefixes:
if host.startswith(prefix):
host = host.strip(prefix)
super(DockerContainerDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
key_file=key_file,
cert_file=cert_file)
if key_file or cert_file:
# docker tls authentication-
# https://docs.docker.com/articles/https/
# We pass two files, a key_file with the
# private key and cert_file with the certificate
# libcloud will handle them through LibcloudHTTPSConnection
if not (key_file and cert_file):
raise Exception(
'Needs both private key file and '
'certificate file for tls authentication')
self.connection.secure = secure
self.connection.host = host
self.connection.port = port
# set API version
self.version = self._get_api_version()
def _ex_connection_class_kwargs(self):
kwargs = {}
if hasattr(self, 'key_file'):
kwargs['key_file'] = self.key_file
if hasattr(self, 'cert_file'):
kwargs['cert_file'] = self.cert_file
return kwargs
def install_image(self, path):
"""
Install a container image from a remote path.
:param path: Path to the container image
:type path: ``str``
:rtype: :class:`libcloud.container.base.ContainerImage`
"""
payload = {
}
data = json.dumps(payload)
result = self.connection.request('/v%s/images/create?fromImage=%s' %
(self.version, path), data=data,
method='POST')
if "errorDetail" in result.body:
raise DockerException(None, result.body)
image_id = None
# the response is slightly different if the image is already present
# and it's not downloaded. both messages below indicate that the image
# is available for use to the daemon
if re.search(r'Downloaded newer image', result.body) or \
re.search(r'"Status: Image is up to date', result.body):
if re.search(r'sha256:(?P<id>[a-z0-9]{64})', result.body):
image_id = re.findall(r'sha256:(?P<id>[a-z0-9]{64})',
result.body)[-1]
# if there is a failure message or if there is not an image id in the
# response then throw an exception.
if image_id is None:
raise DockerException(None, 'failed to install image')
image = ContainerImage(
id=image_id,
name=path,
path=path,
version=None,
driver=self.connection.driver,
extra={})
return image
def list_images(self):
"""
List the installed container images
:rtype: ``list`` of :class:`libcloud.container.base.ContainerImage`
"""
result = self.connection.request('/v%s/images/json' %
(self.version)).object
images = []
for image in result:
try:
name = image.get('RepoTags')[0]
except Exception:
name = image.get('Id')
images.append(ContainerImage(
id=image.get('Id'),
name=name,
path=name,
version=None,
driver=self.connection.driver,
extra={
"created": image.get('Created'),
"size": image.get('Size'),
"virtual_size": image.get('VirtualSize'),
},
))
return images
def list_containers(self, image=None, all=True):
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`libcloud.container.base.ContainerImage`
:param all: Show all container (including stopped ones)
:type all: ``bool``
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
if all:
ex = '?all=1'
else:
ex = ''
try:
result = self.connection.request(
"/v%s/containers/json%s" % (self.version, ex)).object
except Exception as exc:
errno = getattr(exc, 'errno', None)
if errno == 111:
raise DockerException(
errno,
'Make sure docker host is accessible'
'and the API port is correct')
raise
containers = [self._to_container(value) for value in result]
return containers
def deploy_container(self, name, image, parameters=None, start=True,
command=None, hostname=None, user='',
stdin_open=True, tty=True,
mem_limit=0, ports=None, environment=None, dns=None,
volumes=None, volumes_from=None,
network_disabled=False, entrypoint=None,
cpu_shares=None, working_dir='', domainname=None,
memswap_limit=0, port_bindings=None,
network_mode='bridge', labels=None):
"""
Deploy an installed container image
For details on the additional parameters see : http://bit.ly/1PjMVKV
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`libcloud.container.base.ContainerImage`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`Container`
"""
command = shlex.split(str(command))
if port_bindings is None:
port_bindings = {}
params = {
'name': name
}
payload = {
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': user,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': False,
'Memory': mem_limit,
'AttachStdin': True,
'AttachStdout': True,
'AttachStderr': True,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image.name,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'PublishAllPorts': True,
'PortBindings': port_bindings,
'NetworkMode': network_mode,
'Labels': labels,
}
data = json.dumps(payload)
try:
result = self.connection.request('/v%s/containers/create'
% (self.version),
data=data,
params=params, method='POST')
except Exception as e:
message = e.message or str(e) # pylint: disable=no-member
if message.startswith('No such image:'):
raise DockerException(None, 'No such image: %s' % image.name)
else:
raise DockerException(None, e)
id_ = result.object['Id']
payload = {
'Binds': [],
'PublishAllPorts': True,
'PortBindings': port_bindings,
}
data = json.dumps(payload)
if start:
if float(self._get_api_version()) > 1.22:
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, id_),
method='POST')
else:
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, id_), data=data,
method='POST')
return self.get_container(id_)
def get_container(self, id):
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request("/v%s/containers/%s/json" %
(self.version, id)).object
return self._to_container(result)
def start_container(self, container):
"""
Start a container
:param container: The container to be started
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
if float(self._get_api_version()) > 1.22:
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, container.id),
method='POST')
else:
payload = {
'Binds': [],
'PublishAllPorts': True,
}
data = json.dumps(payload)
result = self.connection.request(
'/v%s/containers/%s/start' %
(self.version, container.id),
method='POST', data=data)
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to start container')
def stop_container(self, container):
"""
Stop a container
:param container: The container to be stopped
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('/v%s/containers/%s/stop' %
(self.version, container.id),
method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to stop container')
def restart_container(self, container):
"""
Restart a container
:param container: The container to be stopped
:type container: :class:`libcloud.container.base.Container`
:return: The container refreshed with current data
:rtype: :class:`libcloud.container.base.Container`
"""
data = json.dumps({'t': 10})
# number of seconds to wait before killing the container
result = self.connection.request('/v%s/containers/%s/restart' %
(self.version, container.id),
data=data, method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
else:
raise DockerException(result.status,
'failed to restart container')
def destroy_container(self, container):
"""
Remove a container
:param container: The container to be destroyed
:type container: :class:`libcloud.container.base.Container`
:return: True if the destroy was successful, False otherwise.
:rtype: ``bool``
"""
result = self.connection.request('/v%s/containers/%s' % (self.version,
container.id),
method='DELETE')
return result.status in VALID_RESPONSE_CODES
def ex_list_processes(self, container):
"""
List processes running inside a container
:param container: The container to list processes for.
:type container: :class:`libcloud.container.base.Container`
:rtype: ``str``
"""
result = self.connection.request("/v%s/containers/%s/top" %
(self.version, container.id)).object
return result
def ex_rename_container(self, container, name):
"""
Rename a container
:param container: The container to be renamed
:type container: :class:`libcloud.container.base.Container`
:param name: The new name
:type name: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
result = self.connection.request('/v%s/containers/%s/rename?name=%s'
% (self.version, container.id, name),
method='POST')
if result.status in VALID_RESPONSE_CODES:
return self.get_container(container.id)
def ex_get_logs(self, container, stream=False):
"""
Get container logs
If stream == True, logs will be yielded as a stream
From Api Version 1.11 and above we need a GET request to get the logs
Logs are in different format of those of Version 1.10 and below
:param container: The container to list logs for
:type container: :class:`libcloud.container.base.Container`
:param stream: Stream the output
:type stream: ``bool``
:rtype: ``bool``
"""
payload = {}
data = json.dumps(payload)
if float(self._get_api_version()) > 1.10:
result = self.connection.request(
"/v%s/containers/%s/logs?follow=%s&stdout=1&stderr=1" %
(self.version, container.id, str(stream))).object
logs = result
else:
result = self.connection.request(
"/v%s/containers/%s/attach?logs=1&stream=%s&stdout=1&stderr=1"
% (self.version, container.id, str(stream)),
method='POST',
data=data)
logs = result.body
return logs
def ex_search_images(self, term):
"""Search for an image on Docker.io.
Returns a list of ContainerImage objects
>>> images = conn.ex_search_images(term='mistio')
>>> images
[<ContainerImage: id=rolikeusch/docker-mistio...>,
<ContainerImage: id=mist/mistio, name=mist/mistio,
driver=Docker ...>]
:param term: The search term
:type term: ``str``
:rtype: ``list`` of :class:`libcloud.container.base.ContainerImage`
"""
term = term.replace(' ', '+')
result = self.connection.request('/v%s/images/search?term=%s' %
(self.version, term)).object
images = []
for image in result:
name = image.get('name')
images.append(
ContainerImage(
id=name,
path=name,
version=None,
name=name,
driver=self.connection.driver,
extra={
"description": image.get('description'),
"is_official": image.get('is_official'),
"is_trusted": image.get('is_trusted'),
"star_count": image.get('star_count'),
},
))
return images
def ex_delete_image(self, image):
"""
Remove image from the filesystem
:param image: The image to remove
:type image: :class:`libcloud.container.base.ContainerImage`
:rtype: ``bool``
"""
result = self.connection.request('/v%s/images/%s' % (self.version,
image.name),
method='DELETE')
return result.status in VALID_RESPONSE_CODES
def _to_container(self, data):
"""
Convert container in Container instances
"""
try:
name = data.get('Name').strip('/')
except Exception:
try:
name = data.get('Names')[0].strip('/')
except Exception:
name = data.get('Id')
state = data.get('State')
if isinstance(state, dict):
status = data.get(
'Status',
state.get('Status')
if state is not None else None)
else:
status = data.get('Status')
if 'Exited' in status:
state = ContainerState.STOPPED
elif status.startswith('Up '):
state = ContainerState.RUNNING
elif 'running' in status:
state = ContainerState.RUNNING
else:
state = ContainerState.STOPPED
image = data.get('Image')
ports = data.get('Ports', [])
created = data.get('Created')
if isinstance(created, float):
created = ts_to_str(created)
extra = {
'id': data.get('Id'),
'status': data.get('Status'),
'created': created,
'image': image,
'ports': ports,
'command': data.get('Command'),
'sizerw': data.get('SizeRw'),
'sizerootfs': data.get('SizeRootFs'),
}
ips = []
if ports is not None:
for port in ports:
if port.get('IP') is not None:
ips.append(port.get('IP'))
return Container(
id=data['Id'],
name=name,
image=ContainerImage(
id=data.get('ImageID', None),
path=image,
name=image,
version=None,
driver=self.connection.driver
),
ip_addresses=ips,
state=state,
driver=self.connection.driver,
extra=extra)
def _get_api_version(self):
"""
Get the docker API version information
"""
result = self.connection.request('/version').object
result = result or {}
api_version = result.get('ApiVersion')
return api_version
def ts_to_str(timestamp):
"""
Return a timestamp as a nicely formated datetime string.
"""
date = datetime.datetime.fromtimestamp(timestamp)
date_string = date.strftime("%d/%m/%Y %H:%M %Z")
return date_string
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import io
import os
import os.path
import json
import shlex
import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
from fnmatch import fnmatch
from datetime import datetime
import requests
import six
from .. import constants
from .. import errors
from .. import tls
from .types import Ulimit, LogConfig
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8')
return json.loads(data)
def tar(path, exclude=None, dockerfile=None, fileobj=None):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=fileobj)
root = os.path.abspath(path)
exclude = exclude or []
for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
t.add(os.path.join(root, path), arcname=path, recursive=False)
t.close()
fileobj.seek(0)
return fileobj
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
exceptions = [p for p in patterns if p.startswith('!')]
include_patterns = [p[1:] for p in exceptions]
include_patterns += [dockerfile, '.dockerignore']
exclude_patterns = list(set(patterns) - set(exceptions))
all_paths = get_paths(root)
# Remove all paths that are matched by any exclusion pattern
paths = [
p for p in all_paths
if not any(match_path(p, pattern) for pattern in exclude_patterns)
]
# Add back the set of paths that are matched by any inclusion pattern.
# Include parent dirs - if we add back 'foo/bar', add 'foo' as well
for p in all_paths:
if any(match_path(p, pattern) for pattern in include_patterns):
components = p.split('/')
paths += [
'/'.join(components[:end])
for end in range(1, len(components) + 1)
]
return set(paths)
def get_paths(root):
paths = []
for parent, dirs, files in os.walk(root, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
for path in dirs:
paths.append(os.path.join(parent, path))
for path in files:
paths.append(os.path.join(parent, path))
return paths
def match_path(path, pattern):
pattern = pattern.rstrip('/')
pattern_components = pattern.split('/')
path_components = path.split('/')[:len(pattern_components)]
return fnmatch('/'.join(path_components), pattern)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def ping_registry(url):
warnings.warn(
'The `ping_registry` method is deprecated and will be removed.',
DeprecationWarning
)
return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
def ping(url, valid_4xx_statuses=None):
try:
res = requests.get(url, timeout=3)
except Exception:
return False
else:
# We don't send yet auth headers
# and a v2 registry will respond with status 401
return (
res.status_code < 400 or
(valid_4xx_statuses and res.status_code in valid_4xx_statuses)
)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], six.string_types):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in six.iteritems(port_bindings):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, six.binary_type):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, six.binary_type):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
six.text_type('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, six.binary_type):
v = v.decode('utf-8')
result.append(
six.text_type('{0}:{1}:rw').format(k, v)
)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, platform=None):
proto = "http+unix"
host = DEFAULT_HTTP_HOST
port = None
path = ''
if not addr and platform == 'win32':
addr = '{0}:{1}'.format(DEFAULT_HTTP_HOST, 2375)
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
if addr.startswith('http://'):
addr = addr.replace('http://', 'tcp://')
if addr.startswith('http+unix://'):
addr = addr.replace('http+unix://', 'unix://')
if addr == 'tcp://':
raise errors.DockerException(
"Invalid bind address format: {0}".format(addr))
elif addr.startswith('unix://'):
addr = addr[7:]
elif addr.startswith('tcp://'):
proto = "http"
addr = addr[6:]
elif addr.startswith('https://'):
proto = "https"
addr = addr[8:]
elif addr.startswith('fd://'):
raise errors.DockerException("fd protocol is not implemented")
else:
if "://" in addr:
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
proto = "http"
if proto != "http+unix" and ":" in addr:
host_parts = addr.split(':')
if len(host_parts) != 2:
raise errors.DockerException(
"Invalid bind address format: {0}".format(addr)
)
if host_parts[0]:
host = host_parts[0]
port = host_parts[1]
if '/' in port:
port, path = port.split('/', 1)
path = '/{0}'.format(path)
try:
port = int(port)
except Exception:
raise errors.DockerException(
"Invalid port: %s", addr
)
elif proto in ("http", "https") and ':' not in addr:
raise errors.DockerException(
"Bind address needs a port: {0}".format(addr))
else:
host = addr
if proto == "http+unix":
return "{0}://{1}".format(proto, host)
return "{0}://{1}:{2}{3}".format(proto, host, port, path)
def parse_devices(devices):
device_list = []
for device in devices:
device_mapping = device.split(":")
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({"PathOnHost": path_on_host,
"PathInContainer": path_in_container,
"CgroupPermissions": permissions})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None):
host = os.environ.get('DOCKER_HOST')
cert_path = os.environ.get('DOCKER_CERT_PATH')
tls_verify = os.environ.get('DOCKER_TLS_VERIFY')
params = {}
if host:
params['base_url'] = (host.replace('tcp://', 'https://')
if tls_verify else host)
if tls_verify and not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if tls_verify and cert_path:
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=True,
ssl_version=ssl_version,
assert_hostname=assert_hostname)
return params
def convert_filters(filters):
result = {}
for k, v in six.iteritems(filters):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = v
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if len(s) == 0:
s = 0
else:
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = int(digits_part)
except ValueError:
message = ('Failed converting the string value for'
'memory ({0}) to a number.')
formatted_message = message.format(digits_part)
raise errors.DockerException(formatted_message)
s = digits * units[suffix]
else:
message = ('The specified value for memory'
' ({0}) should specify the units. The postfix'
' should be one of the `b` `k` `m` `g`'
' characters')
raise errors.DockerException(message.format(s))
return s
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
return TypeError(error_msg.format(param, expected, type(param_value)))
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
error_msg = '{0} param is not supported in API versions {1} {2}'
return errors.InvalidVersion(error_msg.format(param, operator, version))
def host_config_value_error(param, param_value):
error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
publish_all_ports=False, links=None, privileged=False,
dns=None, dns_search=None, volumes_from=None,
network_mode=None, restart_policy=None, cap_add=None,
cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_swappiness=None,
cgroup_parent=None, group_add=None, cpu_quota=None,
cpu_period=None, oom_kill_disable=False, version=None):
host_config = {}
if not version:
warnings.warn(
'docker.utils.create_host_config() is deprecated. Please use '
'Client.create_host_config() instead.'
)
version = constants.DEFAULT_DOCKER_API_VERSION
if mem_limit is not None:
if isinstance(mem_limit, six.string_types):
mem_limit = parse_bytes(mem_limit)
host_config['Memory'] = mem_limit
if memswap_limit is not None:
if isinstance(memswap_limit, six.string_types):
memswap_limit = parse_bytes(memswap_limit)
host_config['MemorySwap'] = memswap_limit
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
host_config['MemorySwappiness'] = mem_swappiness
if pid_mode not in (None, 'host'):
raise host_config_value_error('pid_mode', pid_mode)
elif pid_mode:
host_config['PidMode'] = pid_mode
if ipc_mode:
host_config['IpcMode'] = ipc_mode
if privileged:
host_config['Privileged'] = privileged
if oom_kill_disable:
if version_lt(version, '1.20'):
raise host_config_version_error('oom_kill_disable', '1.19')
host_config['OomKillDisable'] = oom_kill_disable
if publish_all_ports:
host_config['PublishAllPorts'] = publish_all_ports
if read_only is not None:
host_config['ReadonlyRootfs'] = read_only
if dns_search:
host_config['DnsSearch'] = dns_search
if network_mode:
host_config['NetworkMode'] = network_mode
elif network_mode is None and compare_version('1.19', version) > 0:
host_config['NetworkMode'] = 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
host_config['RestartPolicy'] = restart_policy
if cap_add:
host_config['CapAdd'] = cap_add
if cap_drop:
host_config['CapDrop'] = cap_drop
if devices:
host_config['Devices'] = parse_devices(devices)
if group_add:
if version_lt(version, '1.20'):
raise host_config_version_error('group_add', '1.20')
host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
host_config['Dns'] = dns
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error('security_opt', security_opt, 'list')
host_config['SecurityOpt'] = security_opt
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
host_config['VolumesFrom'] = volumes_from
if binds is not None:
host_config['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
host_config['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = [
'{0}:{1}'.format(k, v)
for k, v in sorted(six.iteritems(extra_hosts))
]
host_config['ExtraHosts'] = extra_hosts
if links is not None:
if isinstance(links, dict):
links = six.iteritems(links)
formatted_links = ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
host_config['Links'] = formatted_links
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
host_config['LxcConf'] = lxc_conf
if cgroup_parent is not None:
host_config['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
host_config['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
l = Ulimit(**l)
host_config['Ulimits'].append(l)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
host_config['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_quota', '1.19')
host_config['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_period', '1.19')
host_config['CpuPeriod'] = cpu_period
return host_config
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
parse_line = line.strip().split('=')
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
if six.PY2 and not isinstance(command, six.binary_type):
command = command.encode('utf-8')
return shlex.split(command)
def create_container_config(
version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None, network_disabled=False,
entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
labels=None, volume_driver=None, stop_signal=None
):
if isinstance(command, six.string_types):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = [
six.text_type('{0}={1}').format(k, v)
for k, v in six.iteritems(environment)
]
if labels is not None and compare_version('1.18', version) < 0:
raise errors.InvalidVersion(
'labels were only introduced in API version 1.18'
)
if stop_signal is not None and compare_version('1.21', version) < 0:
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
)
if compare_version('1.19', version) < 0:
if volume_driver is not None:
raise errors.InvalidVersion(
'Volume drivers were only introduced in API version 1.19'
)
mem_limit = mem_limit if mem_limit is not None else 0
memswap_limit = memswap_limit if memswap_limit is not None else 0
else:
if mem_limit is not None:
raise errors.InvalidVersion(
'mem_limit has been moved to host_config in API version 1.19'
)
if memswap_limit is not None:
raise errors.InvalidVersion(
'memswap_limit has been moved to host_config in API '
'version 1.19'
)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
if isinstance(mem_limit, six.string_types):
mem_limit = parse_bytes(mem_limit)
if isinstance(memswap_limit, six.string_types):
memswap_limit = parse_bytes(memswap_limit)
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if volumes_from:
if not isinstance(volumes_from, six.string_types):
volumes_from = ','.join(volumes_from)
else:
# Force None, an empty list or dict causes client.start to fail
volumes_from = None
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
if compare_version('1.10', version) >= 0:
message = ('{0!r} parameter has no effect on create_container().'
' It has been moved to start()')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
raise errors.InvalidVersion(message.format('volumes_from'))
return {
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'Cpuset': cpuset,
'CpusetCpus': cpuset,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
'StopSignal': stop_signal
}
|
|
"""
Script used to test the adaptive interpolation and
the evaluation of said interpolant
"""
from __future__ import absolute_import
import os
import time
import numpy as np
import numpy.linalg as la
import scipy.special as spec
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import adaptive_interpolation.adapt as adapt
import adaptive_interpolation.approximator as app
import adaptive_interpolation.generate as generate
import adaptive_interpolation.adaptive_interpolation as adapt_i
# bessel function for testing
def f(x, order=0):
return spec.jn(order, x)
def f0(x, v):
if v == 0:
return f(x)
elif v == 1:
return spec.jn(10, x)
elif v== 2:
return spec.hankel1(0, x)
elif v == 3:
return spec.hankel1(10, x)
elif v == 4:
return spec.hankel2(0, x)
elif v == 5:
return spec.hankel2(10, x)
else:
return spec.airy(x)
# a function for testing
def f1(x0):
xs = []
for x in x0:
if x < 1:
xs.append(1 + x)
elif (1 <= x) and (x < 2.02):
xs.append(1 + x**2)
elif (2.02 <= x) and (x < 3.5):
xs.append(-3*np.log(x))
elif (3.5 <= x) and (x < 4.4):
xs.append(np.exp(np.sqrt(x)))
elif (4.4 <= x) and (x < 7.001):
xs.append(3)
elif (7.001 <= x) and (x < 9.306):
xs.append(np.sqrt(x**4.4) / 100.)
elif (9.306 <= x) and (x <= 11):
xs.append(x - 3)
return np.array(xs)
# plot the absolute errors as well as the actual and approximated functions
def my_plot(x, actual, approximation, abs_errors):
plt.figure()
plt.title('Actual and Approximate values Graphed')
plt.plot(x, actual, 'r')
plt.plot(x, approximation, 'b')
plt.figure()
plt.yscale('log')
plt.title('Absolute Error in Interpolated Values')
plt.plot(x, abs_errors+1e-17, 'gs')
plt.show()
# Given a specific Approximator class, this will test how the
# performance and accuracy varies when the code is varied from branching
# and vectorized to not branching and not vectorized
def test_parallel(approx):
size = 1e7
interval = approx.heap[1][3]
x = np.linspace(interval[0], inverval[1], size, dtype=np.float64)
nb_nv = adapt_i.generate_code(approx)
nb_v = adapt_i.generate_code(approx)
b_nv = adapt_i.generate_code(approx)
b_v = adapt_i.generate_code(approx, 1, 1, size)
# time run_code functions and return times
t00 = time.time()
val_00 = adapt_i.run_code(x, approx=0, vectorized=False)
t00 = time.time() - t00
t01 = time.time()
val_01 = adapt_i.run_code(x, approx, vectorized=True)
t01 = time.time() - t01
t10 = time.time()
val_10 = adapt_i.run_code(x, approx=0, vectorized=False)
t10 = time.time() - t10
t11 = time.time()
val_11 = adapt_i.run_code(x, approx, vectorized=True)
t11 = time.time() - t11
# function values are independent of generative method
assert la.norm(val00 - val01, np.inf) < 1e-15
assert la.norm(val00 - val10, np.inf) < 1e-15
assert la.norm(val00 - val11, np.inf) < 1e-15
assert la.norm(val01 - val10, np.inf) < 1e-15
assert la.norm(val01 - val11, np.inf) < 1e-15
assert la.norm(val10 - val11, np.inf) < 1e-15
print("nb_nv\tnb_v\tb_nv\tb_v")
print(t00,'\t', t01, '\t', t10,'\t', t11)
return [t00, t01, t10, t11]
def test_all_parallel_methods():
a, b = 0, 10
est1 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "monomial")
est2 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "chebyshev")
est3 = adapt_i.make_interpolant(a, b, f, 3, 1e-9, "legendre")
test_parallel(est1)
test_parallel(est2)
test_parallel(est3)
def test_exact_interpolants():
order1 = lambda x: 3*x + 7
order4 = lambda x: 4.123*x**4 - 5.6*x**3 - x**2 + 4.5
order6 = lambda x: x**6 - 3*x**5 - 2*x**4 + x - 3
order8 = lambda x: x**8 - 42*x**7 + 7.5*x**5 - 4.1234*x**4 - 1.2*x**2
a, b = -10, 10
x = np.linspace(a, b, 1e5, dtype=np.float64)
est1 = adapt_i.make_interpolant(a,b,order1,1,1e-9, "monomial").evaluate(x)
est4 = adapt_i.make_interpolant(a,b,order4,4,1e-9, "monomial").evaluate(x)
est6 = adapt_i.make_interpolant(a,b,order6,6,1e-9, "monomial").evaluate(x)
est8 = adapt_i.make_interpolant(a,b,order8,8,1e-9, "monomial").evaluate(x)
print(la.norm(est1-order1(x), np.inf)/la.norm(order1(x), np.inf))
print(la.norm(est4-order4(x), np.inf)/la.norm(order4(x), np.inf))
print(la.norm(est6-order6(x), np.inf)/la.norm(order6(x), np.inf))
print(la.norm(est8-order8(x), np.inf)/la.norm(order8(x), np.inf))
assert la.norm(est1-order1(x), np.inf)/la.norm(order1(x), np.inf) < 1e-15
assert la.norm(est4-order4(x), np.inf)/la.norm(order4(x), np.inf) < 1e-15
assert la.norm(est6-order6(x), np.inf)/la.norm(order6(x), np.inf) < 1e-15
assert la.norm(est8-order8(x), np.inf)/la.norm(order8(x), np.inf) < 1e-15
# tests that the returned interpolant is below the given error
def test_guaranteed_accuracy():
func1 = lambda x: np.sin(1./(x))
func2 = lambda x: np.abs(x*np.sin(x))
func3 = lambda x: np.sqrt(x)
func4 = lambda x: np.abs(x*np.cos(x))
a, b = 0.01, 10
x = np.linspace(a, b, 1e5, dtype=np.float64)
for func in [func4, func2, func3, func1]:
for err in [1e-3, 1e-6, 1e-9]:
for interpolant in ["monomial", "chebyshev", "legendre"]:
est = adapt_i.make_interpolant(a,b,func,6,err, interpolant).evaluate(x)
abs_err = la.norm(est-func(x), np.inf)
rel_err = abs_err/la.norm(func(x), np.inf)
print(interpolant, err, rel_err)
plt.figure()
plt.plot(x, func(x), 'r')
plt.plot(x, est, 'b')
plt.show()
assert rel_err < err
def test_cheb_surf_speed():
n = 4
a, b = 0, 10
orders = np.arange(8, 20, 2)
sizes = np.arange(2, 8)
#instantiate z
z = []
index = 0
for _ in orders:
z.append([])
for _ in sizes:
z[index].append(0)
index+=1
for _ in range(n):
index_x=0
for order in orders:
adapt_i.generate_code(approx, 0, 1)
y = np.linspace(a, b, 1e3)
print("rel_error", la.norm(approx.evaluate(y)-f(y),np.inf)/la.norm(f(y), np.inf))
for i in sizes:
x = np.linspace(a, b, 10**i)
start_time = time.time()
val = adapt_i.run_code(x, approx, vectorized=True)
run_time = time.time() - start_time
print(z)
if _ > 1: #throw out first two trials
z[index_x][index_y] += run_time
index_y+=1
index_x+=1
for x in range(len(z)):
for y in range(len(z[x])):
z[x][y] = z[x][y]/(n-2)
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y = np.meshgrid(orders, sizes)
ax.plot_surface(x, y, np.array(z))
plt.show()
def test_speed():
n = 10
throw_out=40
a, b = 0, 20
sizes = 2**np.arange(1, 13)
#sizes = np.linspace(1e2, 5e6, 5, dtype=np.int)
tests = []
orders = [9, 16]
tests.append(0*np.zeros(sizes.shape))
tests.append(0*np.zeros(sizes.shape))
for j in range(len(orders)):
tests.append(0*np.zeros(sizes.shape))
approx = adapt_i.make_interpolant(a, b, f, orders[j], 1e-9, 'chebyshev')
if True: # test interpolant is accurate
y = np.linspace(a, b, 8*5e3)
adapt_i.generate_code(approx, 8*5e3, 32)
knl, q, xd, yd, treed = generate.build_code(y, approx)
_, z = generate.run_single(approx)
rel_err = la.norm(z-f(y),np.inf)/la.norm(f(y), np.inf)
print("rel_error", orders[j], rel_err)
for i in range(sizes.shape[0]):
index = 0
x = np.linspace(a, b, sizes[i])
adapt_i.generate_code(approx, sizes[i], 1)
knl, q, xd, yd, treed = generate.build_code(x, approx)
for trial in range(n+throw_out):
print("order: "+repr(j)+"/"+repr(len(orders))+"\ttrial:"+repr(trial+1)+"/"+repr(n+throw_out)+"\r")
run_time, _ = generate.run_single(approx)
# run code multiple times before actually adding to tests
if trial >= throw_out:
tests[j+1][i] += run_time
if j == 0:
if trial-throw_out > 17:
print("mine",run_time)
start_time = time.time()
val = f(x)
run_time = time.time() - start_time
#print(la.norm(_-val,np.inf)/la.norm(val, np.inf))
if trial - throw_out > 17:
print("scipy", run_time, sizes[i], trial-throw_out)
tests[0][i] += run_time
print()
# average out each test
for i in range(len(tests)):
tests[i] /= float(n)
fig = plt.figure()
plt.title("Runtimes 1E-9 prec. bessel 0-20, {0} trials, vw=1".format(n))
plt.xlabel("Size of evaluated array")
plt.ylabel("Time to evaluate (seconds)")
#plt.yscale("log")
#plt.xscale("log")
sp, = plt.plot(sizes, tests[0], 'r', label='scipy bessel')
i = 0
hand=[sp]
colors = ['b', 'g', 'y', 'k', 'm', 'c']
for order in orders:
a1, = plt.plot(sizes, tests[i+1], colors[i],label="{0}th order approx".format(order))
i+=1
hand.append(a1)
plt.legend(handles =hand)
#plt.show()
string = "data/t"+repr(time.time())+"n"+repr(n)+"vw1o"+repr(orders[0])+".png"
fig.savefig(string)
def test_throughput(n, d, precision, size):
if d != '32' and d != '64': return
throw_out = 20
a, b = 0, 20
vws = [1, 2, 4, 8, 16, 32, 64]
size = 2**14
GB = size * float(d) / (8*2**20) # number of bits / a GB = # of GB
orders = [9]
tests = [[0 for __ in range(len(vws))] for _ in range(len(orders)+ 1)]
for j in range(len(orders)):
approx = adapt_i.make_interpolant(a, b, f, orders[j], precision, 'chebyshev', dtype=d)
for v in range(len(vws)):
# see how much time to process array
adapt_i.generate_code(approx, size, vws[v])
print()
knl, q, treed = generate.build_code(approx)
print(approx.code)
for trial in range(n+throw_out):
print("order: "+repr(j)+"/"+repr(len(orders))+"\ttrial:"+repr(trial+1)+"/"+repr(n+throw_out)+"\r")
o = np.float32 if d == '32' else np.float64
x = np.random.uniform(a, b, size=size).astype(o)
run_time, _ = generate.run_single(x, approx)
# run code multiple times before actually adding to tests
if trial > throw_out:
tests[j+1][v] += GB/run_time
# only evaluate scipy's speed the first time
if j == 0:
start_time = time.time()
val = f(x, v)
run_time = time.time() - start_time
tests[0][v] += GB/run_time
print()
# average out each test
#tests[0][0] /= float(n)
for i in range(len(tests)):
for j in range(len(vws)):
tests[i][j] /= float(n)
fig = plt.figure()
plt.title("throughput {0} single, {1} trials, vw={2}".format(precision, n, vws[0]))
plt.xlabel("Function Evaluated")
plt.ylabel("Average Throughput (GB/s)")
#plt.yscale("log")
#plt.xscale("log")
#plt.bar(0, tests[0][0], width=.5, align='center', color='r')
i = 0
z = np.linspace(-.2, .2, len(vws))
colors = ['b', 'g', 'y', 'k', 'm', 'c']
for v in range(len(vws)):
plt.bar(i+z[v], tests[i][v], width=.3/len(vws), align='center', color=colors[i])
xticks = ['scipy specials']
for order in orders:
z = np.linspace(-.2, .2, len(vws))
for v in range(len(vws)):
plt.bar(i+1+z[v], tests[i+1][v], width=.3/len(vws), align='center', color=colors[i])
i+=1
xticks.append("{0}th order approx".format(order))
plt.xticks(range(len(orders)+1), xticks)
#plt.show()
string = "../data/00"+repr(d)+"t"+repr(time.time()%100)+"n"+repr(n)+"+vw"+repr(vws[0])+repr(vws[-1])+"o"+repr(orders[0])+repr(precision)+repr(size)+".png"
fig.savefig(string)
# run the main program
if __name__ == "__main__":
#test_speed()
#test_throughput()
p = 1e-6
for d in ['32', '64']:
for size in [2**10, 2**14]:
test_throughput(25, d, p, size)
#test_cheb_surf_speed()
#test_exact_interpolants()
#test_guaranteed_accuracy()
#test_all_parallel_methods()
|
|
import mimetypes
import os
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.errors import InvalidHeaderDefect, NonASCIILocalPartDefect
from email.header import Header
from email.headerregistry import Address
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, getaddresses, make_msgid, parseaddr
from io import BytesIO, StringIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset('utf-8')
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return name, val
def split_addr(addr, encoding):
"""
Split the address into local part and domain, properly encoded.
When non-ascii characters are present in the local part, it must be
MIME-word encoded. The domain name must be idna-encoded if it contains
non-ascii characters.
"""
if '@' in addr:
localpart, domain = addr.split('@', 1)
# Try to get the simplest encoding - ascii if possible so that
# to@example.com doesn't become =?utf-8?q?to?=@example.com. This
# makes unit testing a bit easier and more readable.
try:
localpart.encode('ascii')
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = domain.encode('idna').decode('ascii')
else:
localpart = Header(addr, encoding).encode()
domain = ''
return (localpart, domain)
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
localpart, domain = None, None
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN or non-ascii in the local part
localpart, domain = split_addr(addr, encoding)
# An `email.headerregistry.Address` object is used since
# email.utils.formataddr() naively encodes the name as ascii (see #25986).
if localpart and domain:
address = Address(nm, username=localpart, domain=domain)
return str(address)
try:
address = Address(nm, addr_spec=addr)
except (InvalidHeaderDefect, NonASCIILocalPartDefect):
localpart, domain = split_addr(addr, encoding)
address = Address(nm, username=localpart, domain=domain)
return str(address)
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == 'utf-8':
has_long_lines = any(
len(l.encode('utf-8')) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for l in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, it will be decoded as UTF-8. If that fails,
the mimetype will be set to DEFAULT_ATTACHMENT_MIME_TYPE and the
content is not decoded.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode('utf-8')
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's actually
# binary, read() will raise a UnicodeDecodeError on Python 3.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attaches a file from the filesystem.
The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is
not specified and cannot be guessed.
For a text/* mimetype (guessed or specified), the file's content
will be decoded as UTF-8. If that fails, the mimetype will be set to
DEFAULT_ATTACHMENT_MIME_TYPE and the content is not decoded.
"""
filename = os.path.basename(path)
with open(path, 'rb') as file:
content = file.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
|
import networkx as nx
import re
import matplotlib.pyplot as plt
import networkx.algorithms.isomorphism as iso
import math
import numpy as np
import time
from itertools import cycle
from collections import deque, defaultdict
from .generalFunctions import *
from .path import Path
class SuperG():
def __init__(self, context):
self.Graph = self.createGraph(context)
self.createElementGraphs(context)
# self.drawGraph()
def createElementGraphs(self, context):
for e in [element for element in context.elements if element.isSpace()]:
e.elementG = ElementG(self, e)
def canTransmit(self, txElement, rxElement):
txsection = int(re.search(r'.+(\d)', txElement.getLocation()).group(1))
rxsection = int(re.search(r'.+(\d)', rxElement.getLocation()).group(1))
canT = False
if txElement.isSpace() and rxElement.isSpace():
if abs(txsection - rxsection) <= 1 or abs(txsection - rxsection) == 5:
canT = True
elif txElement.isSpace() and rxElement.isGround():
if txsection == rxsection:
canT = True
return canT
def createGraph(self, context):
G = nx.DiGraph()
torder = context.time%6
d = deque(range(6))
d.rotate(-torder)
for graphorder in d:
enames = [e.name+'.%d'%graphorder for e in context.elements]
G.add_nodes_from(enames)
for tx in enames:
for rx in [e for e in enames if e != tx]:
telement = context.nodeElementDict[tx[:-2]]
relement = context.nodeElementDict[rx[:-2]]
if self.canTransmit(telement, relement):
# txowner = telement.federateOwner
# rxowner = relement.federateOwner
cost = 0.
# if txowner != rxowner:
# if relement.isSpace():
# cost = rxowner.getCost('oISL', txowner)
# elif relement.isGround():
# cost = rxowner.getCost('oSGL', txowner)
# print("new edge:", telement.name, relement.name)
G.add_edge(tx, rx, weight=cost)
context.propagate()
return G
def drawGraph(self):
plt.figure()
# nx.draw_networkx_nodes(self.Graph, pos)
# nx.draw_networkx_edges(self.Graph, pos)
nx.draw(self.Graph)
plt.show()
class ElementG():
def __init__(self, SuperG, element):
self.storagePenalty = deque(6*[0])
self.Graph = None
self.elementOwner = element
self.orderPathDict = defaultdict(list)
self.createGraph(SuperG.Graph)
self.addPaths()
# self.rawSuperGraph = None
# self.SuperGaph = None
# self.elementOwner = element
# self.superShorestPaths = None
# self.superPathsCost = None
# self.graphList = []
def getPaths(self, time):
torder = time%6
return self.orderPathDict[torder]
def createGraph(self, G):
self.Graph = G.copy()
for i, s in enumerate(self.storagePenalty):
name1 = '%s.%d'%(self.elementOwner.name, i%6)
name2 = '%s.%d'%(self.elementOwner.name, (i+1)%6)
# print(name1, name2)
self.Graph.add_edge(name1, name2, weight= s)
def updateGraph(self, context, taskvaluelist):
self.storagePenalty = deque(self.elementOwner.federateOwner.getStorageCostList(self.elementOwner, taskvaluelist=taskvaluelist))
torder = self.elementOwner.federateOwner.time%6
self.storagePenalty.rotate(-torder)
# print(self.storagePenalty)
# edges = self.Graph.edges()
# fedname = self.elementOwner.federateOwner.name
# for e1, e2 in edges:
# fname = re.search(r'.+\.(F\d)\..+', e2).group(1)
# if fedname == fname:
# self.Graph[e1][e2]['weight'] = 0.
# elif 'GS' in e2:
# self.Graph[e1][e2]['weight'] = context.auctioneer.costSGLDict[fname]
# else:
# self.Graph[e1][e2]['weight'] = context.auctioneer.costISLDict[fname]
#
# # print("updateGraph:", e1, e2, self.Graph[e1][e2]['weight'])
for i, s in enumerate(self.storagePenalty):
name1 = '%s.%d'%(self.elementOwner.name, i%6)
name2 = '%s.%d'%(self.elementOwner.name, (i+1)%6)
# print name1, name2, s
self.Graph[name1][name2]['weight'] = s
# edges = self.Graph.edges()
# print(context.auctioneer.costSGLDict, context.auctioneer.costISLDict)
# for e in edges:
# print("edge data:", self.elementOwner.federateOwner.name, e, self.Graph.get_edge_data(*e))
def bfs_paths(self, source, destination):
q = [(source, [source])]
while q:
v, path = q.pop(0)
for next in set(self.Graph.neighbors(v)) - set(path):
if next == destination:
yield path + [next]
else:
q.append((next, path + [next]))
def findAllPaths(self, source, destinations):
allpathes = []
for d in destinations:
allpathes.extend(self.bfs_paths(source, d))
return allpathes
def addPaths(self):
nodes = self.Graph.nodes()
sources = [n for n in nodes if self.elementOwner.name in n]
destinations = [n for n in nodes if 'GS' in n]
for s in sources:
# print("source:", s)
nodelist = self.findAllPaths(s, destinations)
# print("nodelist:", nodelist)
# print("order:", int(s[-1]))
self.orderPathDict[int(s[-1])] = [Path(self.elementOwner, nl) for nl in nodelist] if nodelist else []
def findcheapestpath(self, deltatime):
pass
# def findShortestPathes(self, Graph):
# nodes = Graph.nodes()
# # print "nodes:", nodes
# sourcename = '%s.%d'%(self.elementOwner.name, self.graphOrder)
#
# groundstations = [n for n in nodes if 'GS' in n]
# # print "ground stations:", groundstations
# temppathlist = []
# pathcostlist = []
# for i in range(len(self.storagePenalty)):
# for g in groundstations:
# # print sourcename, g
# if nx.has_path(Graph, source=sourcename,target=g):
# sh = nx.shortest_path(Graph, sourcename, g)
# temppathlist.append(sh)
# tuplist = convertPath2Edge(sh)
# # print tuplist
# costlist = []
# for (source, target) in tuplist:
# cost = (0 if (self.elementOwners[sourcename[:-2]] == self.elementOwners[target[:-2]] and sourcename[:-2] != target[:-2])
# else Graph[source][target]['weight'])
# costlist.append(cost)
#
# pathcostlist.append(costlist)
#
# # print pathcostlist
# # print "find shortest paths:", temppathlist, pathcostlist
# return temppathlist, pathcostlist
# def findcheapestpath(self, deltatime):
# future = (self.graphOrder + deltatime)%6
# futurename = '%s.%d'%(self.elementOwner.name, future)
#
# pathlist = self.superShorestPaths
# costlist = self.superPathsCost
# pathcost = [tup for tup in zip(costlist, pathlist) if futurename in tup[1]]
#
# sortedpath = sorted([(sum(x), y) for x,y in pathcost])
# # print "cost vs path:", sorted(zip([sum(c) for c in costlist], pathlist))
#
# # return convertPath2Edge(sortedpath[0])
# return sortedpath[0]
# def setGraphList(self, context):
# self.graphList = context.Graph.graphList
# self.graphOrder = context.Graph.graphOrder
|
|
# coding: ascii
# pygame - Python Game Library
# Copyright (C) 2000-2003 Pete Shinners
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Pete Shinners
# pete@shinners.org
"""sysfont, used in the font module to find system fonts"""
import os
import sys
from pygame.compat import xrange_, PY_MAJOR_VERSION
from os.path import basename, dirname, exists, join, splitext
OpenType_extensions = frozenset(('.ttf', '.ttc', '.otf'))
Sysfonts = {}
Sysalias = {}
# Python 3 compatibility
if PY_MAJOR_VERSION >= 3:
def toascii(raw):
"""convert bytes to ASCII-only string"""
return raw.decode('ascii', 'ignore')
if os.name == 'nt':
import winreg as _winreg
else:
import subprocess
else:
def toascii(raw):
"""return ASCII characters of a given unicode or 8-bit string"""
return raw.decode('ascii', 'ignore')
if os.name == 'nt':
import _winreg
else:
import subprocess
def _simplename(name):
"""create simple version of the font name"""
# return alphanumeric characters of a string (converted to lowercase)
return ''.join(c.lower() for c in name if c.isalnum())
def _addfont(name, bold, italic, font, fontdict):
"""insert a font and style into the font dictionary"""
if name not in fontdict:
fontdict[name] = {}
fontdict[name][bold, italic] = font
def initsysfonts_win32():
"""initialize fonts dictionary on Windows"""
fontdir = join(os.environ.get('WINDIR', 'C:\\Windows'), 'Fonts')
TrueType_suffix = '(TrueType)'
mods = ('demibold', 'narrow', 'light', 'unicode', 'bt', 'mt')
fonts = {}
# add fonts entered in the registry
# find valid registry keys containing font information.
# http://docs.python.org/lib/module-sys.html
# 0 (VER_PLATFORM_WIN32s) Win32s on Windows 3.1
# 1 (VER_PLATFORM_WIN32_WINDOWS) Windows 95/98/ME
# 2 (VER_PLATFORM_WIN32_NT) Windows NT/2000/XP
# 3 (VER_PLATFORM_WIN32_CE) Windows CE
if sys.getwindowsversion()[0] == 1:
key_name = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Fonts"
else:
key_name = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts"
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name)
for i in xrange_(_winreg.QueryInfoKey(key)[1]):
try:
# name is the font's name e.g. Times New Roman (TrueType)
# font is the font's filename e.g. times.ttf
name, font = _winreg.EnumValue(key, i)[0:2]
except EnvironmentError:
break
# try to handle windows unicode strings for file names with
# international characters
if PY_MAJOR_VERSION < 3:
# here are two documents with some information about it:
# http://www.python.org/peps/pep-0277.html
# https://www.microsoft.com/technet/archive/interopmigration/linux/mvc/lintowin.mspx#ECAA
try:
font = str(font)
except UnicodeEncodeError:
# MBCS is the windows encoding for unicode file names.
try:
font = font.encode('MBCS')
except:
# no success with str or MBCS encoding... skip this font.
continue
if splitext(font)[1].lower() not in OpenType_extensions:
continue
if not dirname(font):
font = join(fontdir, font)
if name.endswith(TrueType_suffix):
name = name.rstrip(TrueType_suffix).rstrip()
name = name.lower().split()
bold = italic = 0
for m in mods:
if m in name:
name.remove(m)
if 'bold' in name:
name.remove('bold')
bold = 1
if 'italic' in name:
name.remove('italic')
italic = 1
name = ''.join(name)
name = _simplename(name)
_addfont(name, bold, italic, font, fonts)
return fonts
def initsysfonts_darwin():
"""read the fonts on OS X. X11 is required for this to work."""
# if the X11 binary exists... try and use that.
# Not likely to be there on pre 10.4.x ...
if exists("/usr/X11/bin/fc-list"):
fonts = initsysfonts_unix("/usr/X11/bin/fc-list")
# This fc-list path will work with the X11 from the OS X 10.3 installation
# disc
elif exists("/usr/X11R6/bin/fc-list"):
fonts = initsysfonts_unix("/usr/X11R6/bin/fc-list")
else:
fonts = {}
return fonts
# read the fonts on unix
def initsysfonts_unix(path="fc-list"):
"""use the fc-list from fontconfig to get a list of fonts"""
fonts = {}
try:
# note, we capture stderr so if fc-list isn't there to stop stderr
# printing.
flout, flerr = subprocess.Popen('%s : file family style' % path, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True).communicate()
except Exception:
return fonts
entries = toascii(flout)
try:
for line in entries.split('\n'):
try:
filename, family, style = line.split(':', 2)
if splitext(filename)[1].lower() in OpenType_extensions:
bold = 'Bold' in style
italic = 'Italic' in style
oblique = 'Oblique' in style
for name in family.split(','):
if name:
break
else:
name = splitext(basename(filename))[0]
_addfont(
_simplename(name), bold, italic or oblique, filename, fonts)
except Exception:
# try the next one.
pass
except Exception:
pass
return fonts
def create_aliases():
"""map common fonts that are absent from the system to similar fonts that are installed in the system"""
alias_groups = (
('monospace', 'misc-fixed', 'courier', 'couriernew', 'console',
'fixed', 'mono', 'freemono', 'bitstreamverasansmono',
'verasansmono', 'monotype', 'lucidaconsole'),
('sans', 'arial', 'helvetica', 'swiss', 'freesans',
'bitstreamverasans', 'verasans', 'verdana', 'tahoma'),
('serif', 'times', 'freeserif', 'bitstreamveraserif', 'roman',
'timesroman', 'timesnewroman', 'dutch', 'veraserif',
'georgia'),
('wingdings', 'wingbats'),
)
for alias_set in alias_groups:
for name in alias_set:
if name in Sysfonts:
found = Sysfonts[name]
break
else:
continue
for name in alias_set:
if name not in Sysfonts:
Sysalias[name] = found
# initialize it all, called once
def initsysfonts():
if sys.platform == 'win32':
fonts = initsysfonts_win32()
elif sys.platform == 'darwin':
fonts = initsysfonts_darwin()
else:
fonts = initsysfonts_unix()
Sysfonts.update(fonts)
create_aliases()
if not Sysfonts: # dummy so we don't try to reinit
Sysfonts[None] = None
# pygame.font specific declarations
def font_constructor(fontpath, size, bold, italic):
import pygame.font
font = pygame.font.Font(fontpath, size)
if bold:
font.set_bold(1)
if italic:
font.set_italic(1)
return font
# the exported functions
def SysFont(name, size, bold=False, italic=False, constructor=None):
"""pygame.font.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font
create a pygame Font from system font resources
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fallback on the builtin pygame font if the given font
is not found.
Name can also be a comma separated list of names, in
which case set of names will be searched in order. Pygame
uses a small set of common font aliases, if the specific
font you ask for is not available, a reasonable alternative
may be used.
if optional contructor is provided, it must be a function with
signature constructor(fontpath, size, bold, italic) which returns
a Font instance. If None, a pygame.font.Font object is created.
"""
if constructor is None:
constructor = font_constructor
if not Sysfonts:
initsysfonts()
gotbold = gotitalic = False
fontname = None
if name:
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
plainname = styles.get((False, False))
fontname = styles.get((bold, italic))
if not fontname and not plainname:
# Neither requested style, nor plain font exists, so
# return a font with the name requested, but an
# arbitrary style.
(style, fontname) = list(styles.items())[0]
# Attempt to style it as requested. This can't
# unbold or unitalicize anything, but it can
# fake bold and/or fake italicize.
if bold and style[0]:
gotbold = True
if italic and style[1]:
gotitalic = True
elif not fontname:
fontname = plainname
elif plainname != fontname:
gotbold = bold
gotitalic = italic
if fontname:
break
set_bold = set_italic = False
if bold and not gotbold:
set_bold = True
if italic and not gotitalic:
set_italic = True
return constructor(fontname, size, set_bold, set_italic)
def get_fonts():
"""pygame.font.get_fonts() -> list
get a list of system font names
Returns the list of all found system fonts. Note that
the names of the fonts will be all lowercase with spaces
removed. This is how pygame internally stores the font
names for matching.
"""
if not Sysfonts:
initsysfonts()
return list(Sysfonts)
def match_font(name, bold=0, italic=0):
"""pygame.font.match_font(name, bold=0, italic=0) -> name
find the filename for the named system font
This performs the same font search as the SysFont()
function, only it returns the path to the TTF file
that would be loaded. The font name can be a comma
separated list of font names to try.
If no match is found, None is returned.
"""
if not Sysfonts:
initsysfonts()
fontname = None
allnames = name
for name in allnames.split(','):
name = _simplename(name)
styles = Sysfonts.get(name)
if not styles:
styles = Sysalias.get(name)
if styles:
while not fontname:
fontname = styles.get((bold, italic))
if italic:
italic = 0
elif bold:
bold = 0
elif not fontname:
fontname = list(styles.values())[0]
if fontname:
break
return fontname
|
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for deploy_board project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import logging
logger = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(__file__)
PROJECT_PATH = BASE_DIR
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS' : [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
],
},
},
]
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY", None)
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# OAuth
OAUTH_ENABLED_STR = os.getenv("OAUTH_ENABLED", "OFF")
if OAUTH_ENABLED_STR == "OFF":
OAUTH_ENABLED = False
else:
OAUTH_ENABLED = True
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET")
OAUTH_CALLBACK = os.getenv("OAUTH_CALLBACK")
OAUTH_DOMAIN = os.getenv("OAUTH_DOMAIN")
OAUTH_CLIENT_TYPE = os.getenv("OAUTH_CLIENT_TYPE")
OAUTH_USER_INFO_URI = os.getenv("OAUTH_USER_INFO_URI")
OAUTH_USER_INFO_KEY = os.getenv("OAUTH_USER_INFO_KEY")
OAUTH_ACCESS_TOKEN_URL = os.getenv("OAUTH_ACCESS_TOKEN_URL")
OAUTH_AUTHORIZE_URL = os.getenv("OAUTH_AUTHORIZE_URL")
OAUTH_DEFAULT_SCOPE = os.getenv("OAUTH_DEFAULT_SCOPE")
OAUTH_USERNAME_INFO_KEY = os.getenv("OAUTH_USERNAME_INFO_KEY")
OAUTH_EXTRACT_USERNAME_FROM_EMAIL = os.getenv("OAUTH_EXTRACT_USERNAME_FROM_EMAIL")
# Teletraan backend service url
TELETRAAN_SERVICE_URL = os.getenv("TELETRAAN_SERVICE_URL")
TELETRAAN_SERVICE_VERSION = os.getenv("TELETRAAN_SERVICE_VERSION")
TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN = os.getenv("TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN", None)
TELETRAAN_HOST_INFORMATION_URL = os.getenv("HOST_INFORMATION_URL")
# CMDB vars
CMDB_API_HOST = os.getenv("CMDB_API_HOST", "http://localhost:8080/")
CMDB_INSTANCE_URL = os.getenv("CMDB_INSTANCE_URL", "api/cmdb/getinstance/")
CMDB_UI_HOST = os.getenv("CMDB_UI_HOST", "localhost")
PHOBOS_URL = os.getenv("PHOBOS_URL")
# Serviceframework add-on vars
SERVICE_RATELIMIT_CONFIG_URL = os.getenv("SERVICE_RATELIMIT_CONFIG_URL")
STATSBOARD_API_FORMAT = os.getenv("STATSBOARD_API_FORMAT", "OFF")
RATELIMIT_ENABLED_METRIC_FORMAT = os.getenv("RATELIMIT_ENABLED_METRIC_FORMAT", "OFF")
ENABLING_SERVICE_RATELIMIT_URL = os.getenv("ENABLING_SERVICE_RATELIMIT_URL", "OFF")
KAFKA_MSGS_DELIVERED_METRIC = os.getenv("KAFKA_MSGS_DELIVERED_METRIC", "OFF")
DASHBOARD_URL_ENDPOINT_FORMAT = os.getenv("DASHBOARD_URL_ENDPOINT_FORMAT","OFF")
# For rolling out new features
GUINEA_PIG_ENVS = os.getenv("GUINEA_PIG_ENVS", "").split(",")
KAFKA_LOGGING_ADD_ON_ENVS = os.getenv("KAFKA_LOGGING_ADD_ON_ENVS", "").split(",")
LOG_DIR = os.getenv("LOG_DIR")
LOG_LEVEL = os.getenv("LOG_LEVEL")
# Change to your domain or hosts
if LOG_LEVEL == 'DEBUG':
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = ['*']
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '%s/service.log' % LOG_DIR,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'request_handler': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '%s/access.log' % LOG_DIR,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default', 'console'],
'level': LOG_LEVEL,
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': LOG_LEVEL,
'propagate': False
},
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deploy_board.webapp',
)
oauth_middleware = 'deploy_board.webapp.security.DelegatedOAuthMiddleware'
if TELETRAAN_SERVICE_FIXED_OAUTH_TOKEN:
oauth_middleware = 'deploy_board.webapp.security.FixedOAuthMiddleware'
MIDDLEWARE_CLASSES = (
'csp.middleware.CSPMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
oauth_middleware,
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'deploy_board.webapp.error_views.ExceptionHandlerMiddleware',
'deploy_board.webapp.security.PRRMiddleware'
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
ROOT_URLCONF = 'deploy_board.urls'
WSGI_APPLICATION = 'deploy_board.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# The number of days since the build publish date required to trigger an old build version warning message
OLD_BUILD_WARNING_THRESHOLD_DAYS = 10
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, "static"),
)
#STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Site global metrics
SITE_METRICS_CONFIGS = []
# Deep Teletraan backend health check url
TELETRAAN_SERVICE_HEALTHCHECK_URL = os.getenv("TELETRAAN_SERVICE_HEALTHCHECK_URL", None)
# Show hosts that are STOPPING or STOPPED in the environments page
DISPLAY_STOPPING_HOSTS = os.getenv("DISPLAY_STOPPING_HOSTS", "true")
# Pinterest specific settings
IS_PINTEREST = True if os.getenv("IS_PINTEREST", "false") == "true" else False
BUILD_URL = os.getenv("BUILD_URL", None)
USER_DATA_CONFIG_SETTINGS_WIKI = os.getenv("USER_DATA_CONFIG_SETTINGS_WIKI", None)
TELETRAAN_DISABLE_CREATE_ENV_PAGE = True if os.getenv("TELETRAAN_DISABLE_CREATE_ENV_PAGE", "false") == "true" else False
TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL = os.getenv("TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL", None)
IS_DURING_CODE_FREEZE = True if os.getenv("TELETRAAN_CODE_FREEZE", "false") == "true" else False
TELETRAAN_CODE_FREEZE_URL = os.getenv("TELETRAAN_CODE_FREEZE_URL", None)
TELETRAAN_JIRA_SOURCE_URL = os.getenv("TELETRAAN_JIRA_SOURCE_URL", None)
TELETRAAN_TRANSFER_OWNERSHIP_URL = os.getenv("TELETRAAN_TRANSFER_OWNERSHIP_URL", None)
TELETRAAN_RESOURCE_OWNERSHIP_WIKI_URL = os.getenv("TELETRAAN_RESOURCE_OWNERSHIP_WIKI_URL", None)
# use Rodimus if present
RODIMUS_SERVICE_URL = os.getenv("RODIMUS_SERVICE_URL", None)
RODIMUS_SERVICE_VERSION = os.getenv("RODIMUS_SERVICE_VERSION", None)
if IS_PINTEREST:
# use knox if present
KNOX_SESSION_ID = os.getenv("KNOX_SESSION_ID")
if KNOX_SESSION_ID:
from knox import Knox
SECRET_KEY = Knox().get_primary(KNOX_SESSION_ID)
ADMIN_OAUTH_SECRET_KNOX_ID = os.getenv("ADMIN_OAUTH_SECRET_KNOX_ID")
if ADMIN_OAUTH_SECRET_KNOX_ID:
from knox import Knox
OAUTH_CLIENT_SECRET = Knox().get_primary(ADMIN_OAUTH_SECRET_KNOX_ID)
# Site health metrics
REQUESTS_URL = os.getenv("REQUESTS_URL")
SUCCESS_RATE_URL = os.getenv("SUCCESS_RATE_URL")
LATENCY_URL = os.getenv("LATENCY_URL")
SITE_METRICS_CONFIGS = [
{"title": "Requests", "url": REQUESTS_URL,
"specs": [{"min": 0, "max": 50000, "color": "Red"},
{"min": 50000, "max": 80000, "color": "Yellow"},
{"min": 80000, "max": 200000, "color": "Green"}]},
{"title": "Success", "url": SUCCESS_RATE_URL,
"specs": [{"min": 90, "max": 98, "color": "Red"},
{"min": 98, "max": 99, "color": "Yellow"},
{"min": 99, "max": 100, "color": "Green"}]},
{"title": "Latency", "url": LATENCY_URL,
"specs": [{"min": 800, "max": 1000, "color": "Red"},
{"min": 600, "max": 800, "color": "Yellow"},
{"min": 300, "max": 600, "color": "Green"}]}
]
# Pinterest ngapp2 status file
NGAPP_PRE_DEPLOY_STATUS_NODE = "varnish_pre_deploy_status"
NGAPP_POST_DEPLOY_STATUS_NODE = "varnish_post_deploy_status"
NGAPP_ROLLBACK_STATUS_NODE = "varnish_rollback_status"
NGAPP_DEPLOY_CHANNEL = "deploys"
DEFAULT_START_TIME = "-1d"
#Pinterest Default Cloud Provider
DEFAULT_PROVIDER = 'AWS'
#Pinterest Default AMI image name
DEFAULT_CMP_IMAGE = 'cmp_base-ebs'
#Pinterest Default Host Type
DEFAULT_CMP_HOST_TYPE = 'EbsComputeLo(Recommended)'
DEFAULT_CELL = 'aws-us-east-1'
DEFAULT_PLACEMENT = os.getenv('DEFAULT_CMP_PLACEMENT')
#Pinterest Default Puppet Environment
DEFAULT_CMP_PINFO_ENVIRON = os.getenv('DEFAULT_CMP_PINFO_ENVIRON')
DEFAULT_CMP_ACCESS_ROLE = os.getenv('DEFAULT_CMP_ACCESS_ROLE')
#CSP Config
CSP_SCRIPT_SRC = ("'self'", "https://www.google.com/ 'unsafe-inline' 'unsafe-eval'")
CSP_DEFAULT_SRC = ("'self'")
CSP_CONNECT_SRC = ("'self'")
CSP_EXCLUDE_URL_PREFIXES = ('/api-docs',)
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'")
# Nimbus service url
NIMBUS_SERVICE_URL = os.getenv("NIMBUS_SERVICE_URL", None)
NIMBUS_SERVICE_VERSION = os.getenv("NIMBUS_SERVICE_VERSION", None)
|
|
import os
import time
import json
import struct
import logging
import platform
import subprocess
from fcntl import ioctl
from pyroute2.common import map_namespace
from pyroute2.common import ANCIENT
# from pyroute2.netlink import NLMSG_ERROR
from pyroute2.netlink import nla
from pyroute2.netlink import nlmsg
from pyroute2.netlink import nlmsg_atoms
from pyroute2.netlink.rtnl.iw_event import iw_event
# it's simpler to double constants here, than to change all the
# module layout; but it is a subject of the future refactoring
RTM_NEWLINK = 16
RTM_DELLINK = 17
#
_ANCIENT_BARRIER = 0.3
_BONDING_MASTERS = '/sys/class/net/bonding_masters'
_BONDING_SLAVES = '/sys/class/net/%s/bonding/slaves'
_BRIDGE_MASTER = '/sys/class/net/%s/brport/bridge/ifindex'
_BONDING_MASTER = '/sys/class/net/%s/master/ifindex'
IFNAMSIZ = 16
TUNDEV = '/dev/net/tun'
arch = platform.machine()
if arch == 'x86_64':
TUNSETIFF = 0x400454ca
TUNSETPERSIST = 0x400454cb
TUNSETOWNER = 0x400454cc
TUNSETGROUP = 0x400454ce
elif arch == 'ppc64':
TUNSETIFF = 0x800454ca
TUNSETPERSIST = 0x800454cb
TUNSETOWNER = 0x800454cc
TUNSETGROUP = 0x800454ce
else:
TUNSETIFF = None
##
#
# tuntap flags
#
IFT_TUN = 0x0001
IFT_TAP = 0x0002
IFT_NO_PI = 0x1000
IFT_ONE_QUEUE = 0x2000
IFT_VNET_HDR = 0x4000
IFT_TUN_EXCL = 0x8000
IFT_MULTI_QUEUE = 0x0100
IFT_ATTACH_QUEUE = 0x0200
IFT_DETACH_QUEUE = 0x0400
# read-only
IFT_PERSIST = 0x0800
IFT_NOFILTER = 0x1000
##
#
# normal flags
#
IFF_UP = 0x1 # interface is up
IFF_BROADCAST = 0x2 # broadcast address valid
IFF_DEBUG = 0x4 # turn on debugging
IFF_LOOPBACK = 0x8 # is a loopback net
IFF_POINTOPOINT = 0x10 # interface is has p-p link
IFF_NOTRAILERS = 0x20 # avoid use of trailers
IFF_RUNNING = 0x40 # interface RFC2863 OPER_UP
IFF_NOARP = 0x80 # no ARP protocol
IFF_PROMISC = 0x100 # receive all packets
IFF_ALLMULTI = 0x200 # receive all multicast packets
IFF_MASTER = 0x400 # master of a load balancer
IFF_SLAVE = 0x800 # slave of a load balancer
IFF_MULTICAST = 0x1000 # Supports multicast
IFF_PORTSEL = 0x2000 # can set media type
IFF_AUTOMEDIA = 0x4000 # auto media select active
IFF_DYNAMIC = 0x8000 # dialup device with changing addresses
IFF_LOWER_UP = 0x10000 # driver signals L1 up
IFF_DORMANT = 0x20000 # driver signals dormant
IFF_ECHO = 0x40000 # echo sent packets
(IFF_NAMES, IFF_VALUES) = map_namespace('IFF', globals())
IFF_MASK = IFF_UP |\
IFF_DEBUG |\
IFF_NOTRAILERS |\
IFF_NOARP |\
IFF_PROMISC |\
IFF_ALLMULTI
IFF_VOLATILE = IFF_LOOPBACK |\
IFF_POINTOPOINT |\
IFF_BROADCAST |\
IFF_ECHO |\
IFF_MASTER |\
IFF_SLAVE |\
IFF_RUNNING |\
IFF_LOWER_UP |\
IFF_DORMANT
states = ('UNKNOWN',
'NOTPRESENT',
'DOWN',
'LOWERLAYERDOWN',
'TESTING',
'DORMANT',
'UP')
state_by_name = dict(((i[1], i[0]) for i in enumerate(states)))
state_by_code = dict(enumerate(states))
stats_names = ('rx_packets',
'tx_packets',
'rx_bytes',
'tx_bytes',
'rx_errors',
'tx_errors',
'rx_dropped',
'tx_dropped',
'multicast',
'collisions',
'rx_length_errors',
'rx_over_errors',
'rx_crc_errors',
'rx_frame_errors',
'rx_fifo_errors',
'rx_missed_errors',
'tx_aborted_errors',
'tx_carrier_errors',
'tx_fifo_errors',
'tx_heartbeat_errors',
'tx_window_errors',
'rx_compressed',
'tx_compressed')
class ifinfbase(object):
'''
Network interface message
struct ifinfomsg {
unsigned char ifi_family; /* AF_UNSPEC */
unsigned short ifi_type; /* Device type */
int ifi_index; /* Interface index */
unsigned int ifi_flags; /* Device flags */
unsigned int ifi_change; /* change mask */
};
'''
prefix = 'IFLA_'
fields = (('family', 'B'),
('__align', 'B'),
('ifi_type', 'H'),
('index', 'i'),
('flags', 'I'),
('change', 'I'))
nla_map = (('IFLA_UNSPEC', 'none'),
('IFLA_ADDRESS', 'l2addr'),
('IFLA_BROADCAST', 'l2addr'),
('IFLA_IFNAME', 'asciiz'),
('IFLA_MTU', 'uint32'),
('IFLA_LINK', 'uint32'),
('IFLA_QDISC', 'asciiz'),
('IFLA_STATS', 'ifstats'),
('IFLA_COST', 'hex'),
('IFLA_PRIORITY', 'hex'),
('IFLA_MASTER', 'uint32'),
('IFLA_WIRELESS', 'wireless'),
('IFLA_PROTINFO', 'hex'),
('IFLA_TXQLEN', 'uint32'),
('IFLA_MAP', 'ifmap'),
('IFLA_WEIGHT', 'hex'),
('IFLA_OPERSTATE', 'state'),
('IFLA_LINKMODE', 'uint8'),
('IFLA_LINKINFO', 'ifinfo'),
('IFLA_NET_NS_PID', 'uint32'),
('IFLA_IFALIAS', 'hex'),
('IFLA_NUM_VF', 'uint32'),
('IFLA_VFINFO_LIST', 'hex'),
('IFLA_STATS64', 'ifstats64'),
('IFLA_VF_PORTS', 'hex'),
('IFLA_PORT_SELF', 'hex'),
('IFLA_AF_SPEC', 'af_spec'),
('IFLA_GROUP', 'uint32'),
('IFLA_NET_NS_FD', 'netns_fd'),
('IFLA_EXT_MASK', 'hex'),
('IFLA_PROMISCUITY', 'uint32'),
('IFLA_NUM_TX_QUEUES', 'uint32'),
('IFLA_NUM_RX_QUEUES', 'uint32'),
('IFLA_CARRIER', 'uint8'),
('IFLA_PHYS_PORT_ID', 'hex'),
('IFLA_CARRIER_CHANGES', 'uint32'))
@staticmethod
def flags2names(flags, mask=0xffffffff):
ret = []
for flag in IFF_VALUES:
if (flag & mask & flags) == flag:
ret.append(IFF_VALUES[flag])
return ret
@staticmethod
def names2flags(flags):
ret = 0
mask = 0
for flag in flags:
if flag[0] == '!':
flag = flag[1:]
else:
ret |= IFF_NAMES[flag]
mask |= IFF_NAMES[flag]
return (ret, mask)
def encode(self):
# convert flags
if isinstance(self['flags'], (set, tuple, list)):
self['flags'], self['change'] = self.names2flags(self['flags'])
return super(ifinfbase, self).encode()
class netns_fd(nla):
fields = [('value', 'I')]
netns_run_dir = '/var/run/netns'
netns_fd = None
def encode(self):
self.close()
#
# There are two ways to specify netns
#
# 1. provide fd to an open file
# 2. provide a file name
#
# In the first case, the value is passed to the kernel
# as is. In the second case, the object opens appropriate
# file from `self.netns_run_dir` and closes it upon
# `__del__(self)`
if isinstance(self.value, int):
self['value'] = self.value
else:
self.netns_fd = os.open('%s/%s' % (self.netns_run_dir,
self.value), os.O_RDONLY)
self['value'] = self.netns_fd
nla.encode(self)
self.register_clean_cb(self.close)
def close(self):
if self.netns_fd is not None:
os.close(self.netns_fd)
class wireless(iw_event):
pass
class state(nla):
fields = (('value', 'B'), )
def encode(self):
self['value'] = state_by_name[self.value]
nla.encode(self)
def decode(self):
nla.decode(self)
self.value = state_by_code[self['value']]
class ifstats(nla):
fields = [(i, 'I') for i in stats_names]
class ifstats64(nla):
fields = [(i, 'Q') for i in stats_names]
class ifmap(nla):
fields = (('mem_start', 'Q'),
('mem_end', 'Q'),
('base_addr', 'Q'),
('irq', 'H'),
('dma', 'B'),
('port', 'B'))
class ifinfo(nla):
nla_map = (('IFLA_INFO_UNSPEC', 'none'),
('IFLA_INFO_KIND', 'asciiz'),
('IFLA_INFO_DATA', 'info_data'),
('IFLA_INFO_XSTATS', 'hex'),
('IFLA_INFO_SLAVE_KIND', 'asciiz'),
('IFLA_INFO_SLAVE_DATA', 'info_data'))
def info_data(self, *argv, **kwarg):
'''
The function returns appropriate IFLA_INFO_DATA
type according to IFLA_INFO_KIND info. Return
'hex' type for all unknown kind's and when the
kind is not known.
'''
kind = self.get_attr('IFLA_INFO_KIND')
slave = self.get_attr('IFLA_INFO_SLAVE_KIND')
data_map = {'vlan': self.vlan_data,
'vxlan': self.vxlan_data,
'macvlan': self.macvlan_data,
'macvtap': self.macvtap_data,
'gre': self.gre_data,
'bond': self.bond_data,
'veth': self.veth_data,
'tuntap': self.tuntap_data,
'bridge': self.bridge_data}
slave_map = {'openvswitch': self.ovs_data}
return data_map.get(kind, slave_map.get(slave, self.hex))
class tuntap_data(nla):
'''
Fake data type
'''
prefix = 'IFTUN_'
nla_map = (('IFTUN_UNSPEC', 'none'),
('IFTUN_MODE', 'asciiz'),
('IFTUN_UID', 'uint32'),
('IFTUN_GID', 'uint32'),
('IFTUN_IFR', 'flags'))
class flags(nla):
fields = (('no_pi', 'B'),
('one_queue', 'B'),
('vnet_hdr', 'B'),
('tun_excl', 'B'),
('multi_queue', 'B'),
('persist', 'B'),
('nofilter', 'B'))
class veth_data(nla):
nla_map = (('VETH_INFO_UNSPEC', 'none'),
('VETH_INFO_PEER', 'info_peer'))
def info_peer(self, *argv, **kwarg):
return ifinfveth
class ovs_data(nla):
prefix = 'IFLA_'
nla_map = (('IFLA_OVS_UNSPEC', 'none'),
('IFLA_OVS_MASTER_IFNAME', 'asciiz'))
class vxlan_data(nla):
prefix = 'IFLA_'
nla_map = (('IFLA_VXLAN_UNSPEC', 'none'),
('IFLA_VXLAN_ID', 'uint32'),
('IFLA_VXLAN_GROUP', 'ip4addr'),
('IFLA_VXLAN_LINK', 'uint32'),
('IFLA_VXLAN_LOCAL', 'ip4addr'),
('IFLA_VXLAN_TTL', 'uint8'),
('IFLA_VXLAN_TOS', 'uint8'),
('IFLA_VXLAN_LEARNING', 'uint8'),
('IFLA_VXLAN_AGEING', 'uint32'),
('IFLA_VXLAN_LIMIT', 'uint32'),
('IFLA_VXLAN_PORT_RANGE', 'port_range'),
('IFLA_VXLAN_PROXY', 'uint8'),
('IFLA_VXLAN_RSC', 'uint8'),
('IFLA_VXLAN_L2MISS', 'uint8'),
('IFLA_VXLAN_L3MISS', 'uint8'),
('IFLA_VXLAN_PORT', 'uint16'),
('IFLA_VXLAN_GROUP6', 'ip6addr'),
('IFLA_VXLAN_LOCAL6', 'ip6addr'),
('IFLA_VXLAN_UDP_CSUM', 'uint8'),
('IFLA_VXLAN_UDP_ZERO_CSUM6_TX', 'uint8'),
('IFLA_VXLAN_UDP_ZERO_CSUM6_RX', 'uint8'))
class port_range(nla):
fields = (('low', '>H'),
('high', '>H'))
class gre_data(nla):
prefix = 'IFLA_'
nla_map = (('IFLA_GRE_UNSPEC', 'none'),
('IFLA_GRE_LINK', 'uint32'),
('IFLA_GRE_IFLAGS', 'uint16'),
('IFLA_GRE_OFLAGS', 'uint16'),
('IFLA_GRE_IKEY', 'uint32'),
('IFLA_GRE_OKEY', 'uint32'),
('IFLA_GRE_LOCAL', 'ip4addr'),
('IFLA_GRE_REMOTE', 'ip4addr'),
('IFLA_GRE_TTL', 'uint8'),
('IFLA_GRE_TOS', 'uint8'),
('IFLA_GRE_PMTUDISC', 'uint8'),
('IFLA_GRE_ENCAP_LIMIT', 'uint8'),
('IFLA_GRE_FLOWINFO', 'uint32'),
('IFLA_GRE_FLAGS', 'uint32'))
class macvx_data(nla):
prefix = 'IFLA_'
class mode(nlmsg_atoms.uint32):
value_map = {0: 'none',
1: 'private',
2: 'vepa',
4: 'bridge',
8: 'passthru'}
class flags(nlmsg_atoms.uint16):
value_map = {0: 'none',
1: 'nopromisc'}
class macvtap_data(macvx_data):
nla_map = (('IFLA_MACVTAP_UNSPEC', 'none'),
('IFLA_MACVTAP_MODE', 'mode'),
('IFLA_MACVTAP_FLAGS', 'flags'))
class macvlan_data(macvx_data):
nla_map = (('IFLA_MACVLAN_UNSPEC', 'none'),
('IFLA_MACVLAN_MODE', 'mode'),
('IFLA_MACVLAN_FLAGS', 'flags'))
class vlan_data(nla):
nla_map = (('IFLA_VLAN_UNSPEC', 'none'),
('IFLA_VLAN_ID', 'uint16'),
('IFLA_VLAN_FLAGS', 'vlan_flags'),
('IFLA_VLAN_EGRESS_QOS', 'hex'),
('IFLA_VLAN_INGRESS_QOS', 'hex'))
class vlan_flags(nla):
fields = (('flags', 'I'),
('mask', 'I'))
class bridge_data(nla):
prefix = 'IFLA_BRIDGE_'
nla_map = (('IFLA_BRIDGE_STP_STATE', 'uint32'),
('IFLA_BRIDGE_MAX_AGE', 'uint32'))
class bond_data(nla):
prefix = 'IFLA_BOND_'
nla_map = (('IFLA_BOND_UNSPEC', 'none'),
('IFLA_BOND_MODE', 'uint8'),
('IFLA_BOND_ACTIVE_SLAVE', 'uint32'),
('IFLA_BOND_MIIMON', 'uint32'),
('IFLA_BOND_UPDELAY', 'uint32'),
('IFLA_BOND_DOWNDELAY', 'uint32'),
('IFLA_BOND_USE_CARRIER', 'uint8'),
('IFLA_BOND_ARP_INTERVAL', 'uint32'),
('IFLA_BOND_ARP_IP_TARGET', 'arp_ip_target'),
('IFLA_BOND_ARP_VALIDATE', 'uint32'),
('IFLA_BOND_ARP_ALL_TARGETS', 'uint32'),
('IFLA_BOND_PRIMARY', 'uint32'),
('IFLA_BOND_PRIMARY_RESELECT', 'uint8'),
('IFLA_BOND_FAIL_OVER_MAC', 'uint8'),
('IFLA_BOND_XMIT_HASH_POLICY', 'uint8'),
('IFLA_BOND_RESEND_IGMP', 'uint32'),
('IFLA_BOND_NUM_PEER_NOTIF', 'uint8'),
('IFLA_BOND_ALL_SLAVES_ACTIVE', 'uint8'),
('IFLA_BOND_MIN_LINKS', 'uint32'),
('IFLA_BOND_LP_INTERVAL', 'uint32'),
('IFLA_BOND_PACKETS_PER_SLAVE', 'uint32'),
('IFLA_BOND_AD_LACP_RATE', 'uint8'),
('IFLA_BOND_AD_SELECT', 'uint8'),
('IFLA_BOND_AD_INFO', 'ad_info'))
class ad_info(nla):
nla_map = (('IFLA_BOND_AD_INFO_UNSPEC', 'none'),
('IFLA_BOND_AD_INFO_AGGREGATOR', 'uint16'),
('IFLA_BOND_AD_INFO_NUM_PORTS', 'uint16'),
('IFLA_BOND_AD_INFO_ACTOR_KEY', 'uint16'),
('IFLA_BOND_AD_INFO_PARTNER_KEY', 'uint16'),
('IFLA_BOND_AD_INFO_PARTNER_MAC', 'l2addr'))
class arp_ip_target(nla):
fields = (('targets', '16I'), )
class af_spec(nla):
nla_map = (('AF_UNSPEC', 'none'),
('AF_UNIX', 'hex'),
('AF_INET', 'inet'),
('AF_AX25', 'hex'),
('AF_IPX', 'hex'),
('AF_APPLETALK', 'hex'),
('AF_NETROM', 'hex'),
('AF_BRIDGE', 'hex'),
('AF_ATMPVC', 'hex'),
('AF_X25', 'hex'),
('AF_INET6', 'inet6'))
class inet(nla):
# ./include/linux/inetdevice.h: struct ipv4_devconf
field_names = ('sysctl',
'forwarding',
'mc_forwarding',
'proxy_arp',
'accept_redirects',
'secure_redirects',
'send_redirects',
'shared_media',
'rp_filter',
'accept_source_route',
'bootp_relay',
'log_martians',
'tag',
'arp_filter',
'medium_id',
'disable_xfrm',
'disable_policy',
'force_igmp_version',
'arp_announce',
'arp_ignore',
'promote_secondaries',
'arp_accept',
'arp_notify',
'accept_local',
'src_valid_mark',
'proxy_arp_pvlan',
'route_localnet')
fields = [(i, 'I') for i in field_names]
class inet6(nla):
nla_map = (('IFLA_INET6_UNSPEC', 'none'),
('IFLA_INET6_FLAGS', 'uint32'),
('IFLA_INET6_CONF', 'ipv6_devconf'),
('IFLA_INET6_STATS', 'ipv6_stats'),
('IFLA_INET6_MCAST', 'hex'),
('IFLA_INET6_CACHEINFO', 'ipv6_cache_info'),
('IFLA_INET6_ICMP6STATS', 'icmp6_stats'),
('IFLA_INET6_TOKEN', 'ip6addr'),
('IFLA_INET6_ADDR_GEN_MODE', 'uint8'))
class ipv6_devconf(nla):
# ./include/uapi/linux/ipv6.h
# DEVCONF_
field_names = ('forwarding',
'hop_limit',
'mtu',
'accept_ra',
'accept_redirects',
'autoconf',
'dad_transmits',
'router_solicitations',
'router_solicitation_interval',
'router_solicitation_delay',
'use_tempaddr',
'temp_valid_lft',
'temp_prefered_lft',
'regen_max_retry',
'max_desync_factor',
'max_addresses',
'force_mld_version',
'accept_ra_defrtr',
'accept_ra_pinfo',
'accept_ra_rtr_pref',
'router_probe_interval',
'accept_ra_rt_info_max_plen',
'proxy_ndp',
'optimistic_dad',
'accept_source_route',
'mc_forwarding',
'disable_ipv6',
'accept_dad',
'force_tllao',
'ndisc_notify')
fields = [(i, 'I') for i in field_names]
class ipv6_cache_info(nla):
# ./include/uapi/linux/if_link.h: struct ifla_cacheinfo
fields = (('max_reasm_len', 'I'),
('tstamp', 'I'),
('reachable_time', 'I'),
('retrans_time', 'I'))
class ipv6_stats(nla):
field_names = ('inoctets',
'fragcreates',
'indiscards',
'num',
'outoctets',
'outnoroutes',
'inbcastoctets',
'outforwdatagrams',
'outpkts',
'reasmtimeout',
'inhdrerrors',
'reasmreqds',
'fragfails',
'outmcastpkts',
'inaddrerrors',
'inmcastpkts',
'reasmfails',
'outdiscards',
'outbcastoctets',
'inmcastoctets',
'inpkts',
'fragoks',
'intoobigerrors',
'inunknownprotos',
'intruncatedpkts',
'outbcastpkts',
'reasmoks',
'inbcastpkts',
'innoroutes',
'indelivers',
'outmcastoctets')
fields = [(i, 'I') for i in field_names]
class icmp6_stats(nla):
fields = (('num', 'Q'),
('inerrors', 'Q'),
('outmsgs', 'Q'),
('outerrors', 'Q'),
('inmsgs', 'Q'))
class ifinfmsg(ifinfbase, nlmsg):
pass
class ifinfveth(ifinfbase, nla):
pass
def compat_fix_attrs(msg):
kind = None
ifname = msg.get_attr('IFLA_IFNAME')
# fix master
if ANCIENT:
master = compat_get_master(ifname)
if master is not None:
msg['attrs'].append(['IFLA_MASTER', master])
# fix linkinfo & kind
li = msg.get_attr('IFLA_LINKINFO')
if li is not None:
kind = li.get_attr('IFLA_INFO_KIND')
slave_kind = li.get_attr('IFLA_INFO_SLAVE_KIND')
if kind is None:
kind = get_interface_type(ifname)
li['attrs'].append(['IFLA_INFO_KIND', kind])
else:
kind = get_interface_type(ifname)
slave_kind = None
msg['attrs'].append(['IFLA_LINKINFO',
{'attrs': [['IFLA_INFO_KIND', kind]]}])
li = msg.get_attr('IFLA_LINKINFO')
# fetch specific interface data
if slave_kind == 'openvswitch':
# fix master for the OVS slave
proc = subprocess.Popen(['ovs-vsctl', 'iface-to-br', ifname],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ret = proc.communicate()
if ret[1]:
logging.warning("ovs communication error: %s" % ret[1])
commands = [['IFLA_OVS_MASTER_IFNAME', ret[0].strip()]]
li['attrs'].append(['IFLA_INFO_DATA', {'attrs': commands}])
if (kind in ('bridge', 'bond')) and \
[x for x in li['attrs'] if x[0] == 'IFLA_INFO_DATA']:
if kind == 'bridge':
t = '/sys/class/net/%s/bridge/%s'
ifdata = ifinfmsg.ifinfo.bridge_data
elif kind == 'bond':
t = '/sys/class/net/%s/bonding/%s'
ifdata = ifinfmsg.ifinfo.bond_data
commands = []
for cmd, _ in ifdata.nla_map:
try:
with open(t % (ifname, ifdata.nla2name(cmd)), 'r') as f:
value = f.read()
if cmd == 'IFLA_BOND_MODE':
value = value.split()[1]
commands.append([cmd, int(value)])
except:
pass
if commands:
li['attrs'].append(['IFLA_INFO_DATA', {'attrs': commands}])
def proxy_linkinfo(data, nl):
offset = 0
inbox = []
while offset < len(data):
msg = ifinfmsg(data[offset:])
msg.decode()
inbox.append(msg)
offset += msg['header']['length']
data = b''
for msg in inbox:
# Sysfs operations can require root permissions,
# but the script can be run under a normal user
# Bug-Url: https://github.com/svinota/pyroute2/issues/113
try:
compat_fix_attrs(msg)
except OSError:
# We can safely ignore here any OSError.
# In the worst case, we just return what we have got
# from the kernel via netlink
pass
msg.reset()
msg.encode()
data += msg.buf.getvalue()
return {'verdict': 'forward',
'data': data}
def proxy_setlink(data, nl):
def get_interface(index):
msg = nl.get_links(index)[0]
try:
ovs_master = msg.\
get_attr('IFLA_LINKINFO').\
get_attr('IFLA_INFO_DATA').\
get_attr('IFLA_OVS_MASTER_IFNAME')
except Exception:
ovs_master = None
return {'ifname': msg.get_attr('IFLA_IFNAME'),
'master': msg.get_attr('IFLA_MASTER'),
'ovs-master': ovs_master,
'kind': msg.
get_attr('IFLA_LINKINFO').
get_attr('IFLA_INFO_KIND')}
msg = ifinfmsg(data)
msg.decode()
forward = True
kind = None
infodata = None
ifname = msg.get_attr('IFLA_IFNAME') or \
get_interface(msg['index'])['ifname']
linkinfo = msg.get_attr('IFLA_LINKINFO')
if linkinfo:
kind = linkinfo.get_attr('IFLA_INFO_KIND')
infodata = linkinfo.get_attr('IFLA_INFO_DATA')
if kind in ('bond', 'bridge'):
code = 0
#
if kind == 'bond':
func = compat_set_bond
elif kind == 'bridge':
func = compat_set_bridge
#
for (cmd, value) in infodata.get('attrs', []):
cmd = infodata.nla2name(cmd)
code = func(ifname, cmd, value) or code
#
if code:
err = OSError()
err.errno = code
raise err
# is it a port setup?
master = msg.get_attr('IFLA_MASTER')
if master is not None:
if master == 0:
# port delete
# 1. get the current master
iface = get_interface(msg['index'])
if iface['ovs-master'] is not None:
master = {'ifname': iface['ovs-master'],
'kind': 'openvswitch'}
else:
master = get_interface(iface['master'])
cmd = 'del'
else:
# port add
# 1. get the master
master = get_interface(master)
cmd = 'add'
# 2. manage the port
forward_map = {'team': manage_team_port,
'bridge': compat_bridge_port,
'bond': compat_bond_port,
'openvswitch': manage_ovs_port}
forward = forward_map[master['kind']](cmd, master['ifname'], ifname)
if forward is not None:
return {'verdict': 'forward',
'data': data}
def proxy_dellink(data, nl):
orig_msg = ifinfmsg(data)
orig_msg.decode()
# get full interface description
msg = nl.get_links(orig_msg['index'])[0]
msg['header']['type'] = orig_msg['header']['type']
# get the interface kind
kind = None
li = msg.get_attr('IFLA_LINKINFO')
if li is not None:
kind = li.get_attr('IFLA_INFO_KIND')
if kind in ('ovs-bridge', 'openvswitch'):
return manage_ovs(msg)
if ANCIENT and kind in ('bridge', 'bond'):
# route the request
if kind == 'bridge':
compat_del_bridge(msg.get_attr('IFLA_IFNAME'))
elif kind == 'bond':
compat_del_bond(msg.get_attr('IFLA_IFNAME'))
# while RTM_NEWLINK is not intercepted -- sleep
time.sleep(_ANCIENT_BARRIER)
return
return {'verdict': 'forward',
'data': data}
def proxy_newlink(data, nl):
msg = ifinfmsg(data)
msg.decode()
kind = None
# get the interface kind
linkinfo = msg.get_attr('IFLA_LINKINFO')
if linkinfo is not None:
kind = [x[1] for x in linkinfo['attrs']
if x[0] == 'IFLA_INFO_KIND']
if kind:
kind = kind[0]
if kind == 'tuntap':
return manage_tuntap(msg)
elif kind == 'team':
return manage_team(msg)
elif kind in ('ovs-bridge', 'openvswitch'):
return manage_ovs(msg)
if ANCIENT and kind in ('bridge', 'bond'):
# route the request
if kind == 'bridge':
compat_create_bridge(msg.get_attr('IFLA_IFNAME'))
elif kind == 'bond':
compat_create_bond(msg.get_attr('IFLA_IFNAME'))
# while RTM_NEWLINK is not intercepted -- sleep
time.sleep(_ANCIENT_BARRIER)
return
return {'verdict': 'forward',
'data': data}
def manage_team(msg):
assert msg['header']['type'] == RTM_NEWLINK
config = {'device': msg.get_attr('IFLA_IFNAME'),
'runner': {'name': 'activebackup'},
'link_watch': {'name': 'ethtool'}}
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['teamd', '-d', '-n', '-c', json.dumps(config)],
stdout=fnull,
stderr=fnull)
def manage_team_port(cmd, master, ifname):
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['teamdctl', master, 'port',
'remove' if cmd == 'del' else 'add', ifname],
stdout=fnull,
stderr=fnull)
def manage_ovs_port(cmd, master, ifname):
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['ovs-vsctl', '%s-port' % cmd, master, ifname],
stdout=fnull,
stderr=fnull)
def manage_ovs(msg):
linkinfo = msg.get_attr('IFLA_LINKINFO')
ifname = msg.get_attr('IFLA_IFNAME')
kind = linkinfo.get_attr('IFLA_INFO_KIND')
# operations map
op_map = {RTM_NEWLINK: {'ovs-bridge': 'add-br',
'openvswitch': 'add-br'},
RTM_DELLINK: {'ovs-bridge': 'del-br',
'openvswitch': 'del-br'}}
op = op_map[msg['header']['type']][kind]
# make a call
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['ovs-vsctl', op, ifname],
stdout=fnull,
stderr=fnull)
def manage_tuntap(msg):
if TUNSETIFF is None:
raise Exception('unsupported arch')
if msg['header']['type'] != RTM_NEWLINK:
raise Exception('unsupported event')
ifru_flags = 0
linkinfo = msg.get_attr('IFLA_LINKINFO')
infodata = linkinfo.get_attr('IFLA_INFO_DATA')
flags = infodata.get_attr('IFTUN_IFR', None)
if infodata.get_attr('IFTUN_MODE') == 'tun':
ifru_flags |= IFT_TUN
elif infodata.get_attr('IFTUN_MODE') == 'tap':
ifru_flags |= IFT_TAP
else:
raise ValueError('invalid mode')
if flags is not None:
if flags['no_pi']:
ifru_flags |= IFT_NO_PI
if flags['one_queue']:
ifru_flags |= IFT_ONE_QUEUE
if flags['vnet_hdr']:
ifru_flags |= IFT_VNET_HDR
if flags['multi_queue']:
ifru_flags |= IFT_MULTI_QUEUE
ifr = msg.get_attr('IFLA_IFNAME')
if len(ifr) > IFNAMSIZ:
raise ValueError('ifname too long')
ifr += (IFNAMSIZ - len(ifr)) * '\0'
ifr = ifr.encode('ascii')
ifr += struct.pack('H', ifru_flags)
user = infodata.get_attr('IFTUN_UID')
group = infodata.get_attr('IFTUN_GID')
#
fd = os.open(TUNDEV, os.O_RDWR)
try:
ioctl(fd, TUNSETIFF, ifr)
if user is not None:
ioctl(fd, TUNSETOWNER, user)
if group is not None:
ioctl(fd, TUNSETGROUP, group)
ioctl(fd, TUNSETPERSIST, 1)
except Exception:
raise
finally:
os.close(fd)
def compat_create_bridge(name):
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['brctl', 'addbr', name],
stdout=fnull,
stderr=fnull)
def compat_create_bond(name):
with open(_BONDING_MASTERS, 'w') as f:
f.write('+%s' % (name))
def compat_set_bond(name, cmd, value):
# FIXME: join with bridge
# FIXME: use internal IO, not bash
t = 'echo %s >/sys/class/net/%s/bonding/%s'
with open(os.devnull, 'w') as fnull:
return subprocess.call(['bash', '-c', t % (value, name, cmd)],
stdout=fnull,
stderr=fnull)
def compat_set_bridge(name, cmd, value):
t = 'echo %s >/sys/class/net/%s/bridge/%s'
with open(os.devnull, 'w') as fnull:
return subprocess.call(['bash', '-c', t % (value, name, cmd)],
stdout=fnull,
stderr=fnull)
def compat_del_bridge(name):
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['ip', 'link', 'set',
'dev', name, 'down'])
subprocess.check_call(['brctl', 'delbr', name],
stdout=fnull,
stderr=fnull)
def compat_del_bond(name):
subprocess.check_call(['ip', 'link', 'set',
'dev', name, 'down'])
with open(_BONDING_MASTERS, 'w') as f:
f.write('-%s' % (name))
def compat_bridge_port(cmd, master, port):
if not ANCIENT:
return True
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['brctl', '%sif' % (cmd), master, port],
stdout=fnull,
stderr=fnull)
def compat_bond_port(cmd, master, port):
if not ANCIENT:
return True
remap = {'add': '+',
'del': '-'}
cmd = remap[cmd]
with open(_BONDING_SLAVES % (master), 'w') as f:
f.write('%s%s' % (cmd, port))
def compat_get_master(name):
f = None
for i in (_BRIDGE_MASTER, _BONDING_MASTER):
try:
f = open(i % (name))
break
except IOError:
pass
if f is not None:
master = int(f.read())
f.close()
return master
def get_interface_type(name):
'''
Utility function to get interface type.
Unfortunately, we can not rely on RTNL or even ioctl().
RHEL doesn't support interface type in RTNL and doesn't
provide extended (private) interface flags via ioctl().
Args:
* name (str): interface name
Returns:
* False -- sysfs info unavailable
* None -- type not known
* str -- interface type:
* 'bond'
* 'bridge'
'''
# FIXME: support all interface types? Right now it is
# not needed
try:
ifattrs = os.listdir('/sys/class/net/%s/' % (name))
except OSError as e:
if e.errno == 2:
return 'unknown'
else:
raise
if 'bonding' in ifattrs:
return 'bond'
elif 'bridge' in ifattrs:
return 'bridge'
else:
return 'unknown'
|
|
"""Text feature extraction"""
__author__ = 'thor'
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import CountVectorizer
from pandas import Series
import numpy as np
from urllib.parse import urlsplit
import re
from collections import Counter
from itertools import chain
from operator import add
from ut.ml.sk.feature_extraction.text import TreeTokenizer
from functools import reduce
path_separator_pattern = re.compile('/+')
word_inclusion_pattern = re.compile("\w+")
path_inclusion_pattern = re.compile("[^/]+")
def mk_url_count_vectorizer(preprocessor=lambda url: urlsplit(url.lower()).path,
max_df=1.0, min_df=1, max_features=None,
binary=True, **kwargs):
tokenizer = TreeTokenizer.mk_url_tree_tokenizer(max_df=max_df, min_df=min_df).tokenize
return CountVectorizer(preprocessor=preprocessor, tokenizer=tokenizer,
max_df=max_df, min_df=min_df, max_features=max_features, binary=binary, **kwargs)
class MultiVarTextVectorizer(BaseEstimator):
"""
MultiVarTextVectorizer is the "multi-dimensional corpus" version of CountVectorizer.
You use this when instead of items being text documents, items are a collection of documents of different types.
A MultiVarTextVectorizer is vectorizes as CountVectorizer does, but:
* where CountVectorizer fits from a corpus of docs, MultiVarTextVectorizer fits from a dataframe with some
specified text columns (or array of dicts with specified text fields).
* where CountVectorizer transforms text to a vector, MultiVarTextVectorizer transforms Series, or dicts, into
vectors, applying a different tokenizer to each different field/variable of the input item,
returning the union of the resulting tokens as the set to be vectorized.
Input:
* var_tokenizers: a dict of {var_name: Tokenizer} pairs that specifies how to tokenize each
"""
def __init__(self, var_tokenizers, count_vectorizer_kwargs):
self.set_params(var_tokenizers=var_tokenizers)
def set_params(self, **parameters):
for parameter, value in list(parameters.items()):
self.setattr(parameter, value)
self.tokenized_cols_ = list(self.var_tokenizers.keys())
return self
def get_params(self, deep=True):
return {
'var_tokenizers': self.var_tokenizers
}
def fit(self, X, y=None):
if not set(self.tokenized_cols_).issubset(X.columns):
raise ValueError("All keys of var_tokenizers must be present as columns of X")
X = X[self.tokenized_cols_]
self.tokenizer_ = \
lambda d: reduce(add, [tokenize(d[var]) for var, tokenize in self.var_tokenizers.items()], list())
self.count_vectorizer = CountVectorizer(tokenizer=self.tokenizer_, )
# class DeepTokenizer()
def mk_deep_tokenizer(text_collection=None,
tokenizers=[lambda x: [x], word_inclusion_pattern.findall],
token_prefixes='',
max_df=1.0,
min_df=1,
return_tokenizer_info=False):
"""
Makes a tokenizer that is the result of multiple different tokenizers that might either all be applied to the
same text, or are used recursively to break up the text into finner pieces.
In a deep_tokenizer tokenizers[0] tokenizes the text, then the next tokenizer, tokeizers[1] is applied to these
tokens, and so forth. By default, the union of the tokens are returned. If token_prefixes is specified (usually,
a different one for each tokenizer), they are prepended to the tokens to distinguish what level of tokenization
they come from.
If text_collection is specified, along with max_df and/or min_df, the text_collection will serve to learn a
vocabulary for each tokenizer by collecting only those tokens whose frequency is at least min_df and no more than
max_df.
Input:
* text_collection: a collection of the text to learn the vocabulary with
* tokenizers: A list of tokenizers (function taking text and outputing a list of strings
* token_prefixes: A list of prefixes to add in front of the tokens matched for each tokenizer
(or a single string that will be used for all tokenizers
* max_df and min_df: Only relevant when leaning text_collection is specified.
These are respectively the max and min frequency that tokens should have to be included.
The frequency can be expressed as a count, or a ratio of the total count.
Note that in the case of max_df, it will always be relative to the total count of tokens at the current level.
* return_tokenizer_info: Boolean (default False) indicating whether to return the tokenizer_info_list as well
>>> from ut.ml.feature_extraction.text import mk_deep_tokenizer
>>> import re
>>>
>>> t = [re.compile('[\w-]+').findall, re.compile('\w+').findall]
>>> p = ['level_1=', 'level_2=']
>>> tokenizer = mk_deep_tokenizer(tokenizers=t, token_prefixes=p)
>>>
>>> tokenizer('A-B C B')
['level_1=A-B', 'level_1=C', 'level_1=B', 'level_2=A', 'level_2=B', 'level_2=C', 'level_2=B']
>>> s = ['A-B-C A-B A B', 'A-B C B']
>>> tokenizer = mk_deep_tokenizer(text_collection=s, tokenizers=t, token_prefixes=p, min_df=2)
>>>
>>> tokenizer('A-B C B')
['level_1=B', 'level_1=A-B', 'level_2=C']
"""
raise DeprecationWarning("It's probably a better idea to use "
"ut.ml.sk.feature_extraction.text.TreeTokenizer().tokenizer")
n_tokenizers = len(tokenizers)
if not isinstance(token_prefixes, str):
assert n_tokenizers == len(token_prefixes), \
"Either all tokenizers must have the same prefix, " \
"or you should specify as many prefixes as there are tokenizers"
else:
token_prefixes = [token_prefixes] * n_tokenizers
if text_collection is None:
# to_be_tokenized_further = ['free-will is slave-of will']
# return list(chain(*imap(tokenizers[0], to_be_tokenized_further)))
def tokenizer(text):
tokens = []
to_be_tokenized_further = [text]
for level_tokenizer, token_prefix in zip(tokenizers, token_prefixes):
to_be_tokenized_further = list(chain(*map(level_tokenizer, to_be_tokenized_further)))
if len(to_be_tokenized_further) > 0: # if any tokens were matched...
# ... keep them
tokens.extend([token_prefix + x for x in to_be_tokenized_further])
else: # if not, we're done
break
return tokens
return tokenizer
else:
n = len(text_collection)
if max_df > 1:
max_df /= n
if min_df < 1:
min_df *= n
# make the needed data structures
tokenizer_info_list = list()
for i in range(n_tokenizers):
this_tokenizer_info = dict()
this_tokenizer_info['tokenize'] = tokenizers[i]
this_tokenizer_info['token_prefix'] = token_prefixes[i]
this_tokenizer_info['vocab'] = set([])
tokenizer_info_list.append(this_tokenizer_info)
# initialize remaining_element_counts to everything (with counts set to 1)
remaining_element_counts = Counter(text_collection)
max_df_thresh = max_df * len(text_collection)
for i, tokenizer_info in enumerate(tokenizer_info_list):
# filter(url_word_tokens_count.update,
# chain(*imap(lambda kv: [{word: kv[1]} for word in word_separation_pattern.findall(kv[0])],
# remaining_element_counts.iteritems())))
# initialize tokens_count
tokens_count = Counter()
# accumulate the counts of the tokens created by the current tokenizer
list(filter(tokens_count.update,
chain(*map(lambda kv: [{token: kv[1]} for token in tokenizer_info['tokenize'](kv[0])],
iter(remaining_element_counts.items())))))
if len(tokens_count) > 0: # if we got anything...
# ... remember the vocabulary
tokens_count = Series(tokens_count)
# get rid of what's too frequent
tokens_count = tokens_count[tokens_count <= max_df_thresh]
# add anything frequent enough in this tokenizer's vocabulary
min_lidx = tokens_count >= min_df
tokenizer_info['vocab'] = set(tokens_count[min_lidx].index.values)
# what's not frequent enough will be treated by the next tokenizer
remaining_element_counts = tokens_count[~min_lidx].to_dict()
max_df_thresh = max_df * len(remaining_element_counts)
else: # no need to go further
break
def tokenizer(text):
tokens = []
to_be_tokenized_further = [text]
for tokenizer_info in tokenizer_info_list:
if len(to_be_tokenized_further) > 0:
to_be_tokenized_further = \
set(chain(*map(tokenizer_info['tokenize'], to_be_tokenized_further)))
# to_be_tokenized_further = set(map(tokenizer_info['tokenize'], to_be_tokenized_further))
matched_tokens = to_be_tokenized_further.intersection(tokenizer_info['vocab'])
if len(matched_tokens) > 0: # if any tokens were matched...
# ... keep them
tokens.extend([tokenizer_info['token_prefix'] + x for x in matched_tokens])
# and don't tokenize them further
to_be_tokenized_further = to_be_tokenized_further.difference(matched_tokens)
else:
break
return tokens
if return_tokenizer_info:
return tokenizer, tokenizer_info_list
else:
return tokenizer
# def mk_multiVar_text_vectorizer(var_tokenizers, count_vectorizer_kwargs):
# tokenized_cols_ = var_tokenizers.keys()
def mk_url_tokenizer(urls=None, max_df=1.0, min_df=1, return_tokenizer_info=False):
tokenizers = [lambda x: [x], path_inclusion_pattern.findall, word_inclusion_pattern.findall]
token_prefixes = ['url=', 'url_section=', 'url_word=']
return mk_deep_tokenizer(urls,
tokenizers=tokenizers,
token_prefixes=token_prefixes,
max_df=max_df,
min_df=min_df,
return_tokenizer_info=return_tokenizer_info)
|
|
from __future__ import absolute_import
import re
import os
import json
import tempfile
import subprocess
import io
from six.moves import map
from six.moves import range
from six.moves import zip
import IPython.nbformat.v4.nbbase as nbbase
import IPython.nbformat.v4 as v4
from IPython.nbformat.v4.rwbase import NotebookReader
from IPython.nbformat.v4.rwbase import NotebookWriter
from IPython.nbformat.v4.nbjson import BytesEncoder
from IPython.nbconvert.preprocessors.execute import ExecutePreprocessor
from IPython.utils import py3compat
from IPython.utils.py3compat import unicode_type
from IPython.nbconvert import MarkdownExporter
from pandocattributes import PandocAttributes
languages = ['python', 'r', 'ruby', 'bash']
def strip(notebook):
"""Remove outputs from a notebook."""
for cell in notebook.cells:
if cell.cell_type == 'code':
cell.outputs = []
cell.execution_count = None
def run(notebook):
executor = ExecutePreprocessor()
notebook, resources = executor.preprocess(notebook, resources={})
# you can think of notedown as a document converter that uses the
# ipython notebook as its internal format
class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
(?P<attributes>.*) # followed by the group 'attributes'
\n # followed by a newline
(?P<content> # start a group 'content'
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
try:
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
if self.match == 'all':
pass
elif self.match == 'fenced':
if block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent']
+ '\n'))
elif self.match == 'strict':
if 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match != language:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and block['IO'] == 'output'
and cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
self.exporter = MarkdownExporter()
self.exporter.register_filter('string2json', self.string2json)
self.exporter.register_filter('create_input_codeblock',
self.create_input_codeblock)
self.exporter.register_filter('create_output_codeblock',
self.create_output_codeblock)
self.exporter.register_filter('create_output_block',
self.create_output_block)
self.exporter.register_filter('create_attributes',
self.create_attributes)
self.exporter.register_filter('dequote', self.dequote)
self.exporter.register_filter('data2uri', self.data2uri)
self.load_template(template_file)
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = './figures/'
def load_template(self, template_file):
"""IPython cannot load a template from an absolute path. If
we want to include templates in our package they will be
placed on an absolute path. Here we create a temporary file
on a relative path and read from there after copying the
template to it.
"""
tmp = tempfile.NamedTemporaryFile(dir='./', mode='w+')
tmp_path = os.path.relpath(tmp.name)
with io.open(template_file, encoding='utf-8') as orig:
tmp.file.write(orig.read())
tmp.file.flush()
self.exporter.template_file = tmp_path
self.exporter._load_template()
tmp.close()
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
if not py3compat.PY3 and not isinstance(text, unicode_type):
# this branch is likely only taken for JSON on Python 2
text = py3compat.str_to_unicode(text)
return text
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return py3compat.str_to_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
class CodeMagician(object):
# aliases to different languages
many_aliases = {('r', 'R'): '%%R\n'}
# convert to many to one lookup (found as self.aliases)
aliases = {}
for k, v in list(many_aliases.items()):
for key in k:
aliases[key] = v
@classmethod
def magic(self, alias):
"""Returns the appropriate IPython code magic when
called with an alias for a language.
"""
if alias in self.aliases:
return self.aliases[alias]
else:
return "%%{}\n".format(alias)
class Knitr(object):
class KnitrError(Exception):
pass
def __init__(self):
# raise exception if R or knitr not installed
cmd = ['Rscript', '-e', 'require(knitr)']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
message = "Rscript was not found on your path."
raise self.KnitrError(message)
stdout, stderr = p.communicate()
stderr = stderr.decode() # cast to unicode (python 3 compatible)
if 'Warning' in stderr:
message = ("Could not load knitr (needs manual installation).\n\n"
"$ {cmd}\n"
"{error}").format(cmd=' '.join(cmd), error=stderr)
raise self.KnitrError(message)
def knit(self, input_file, opts_chunk='eval=FALSE'):
"""Use Knitr to convert the r-markdown input_file
into markdown, returning a file object.
"""
# use temporary files at both ends to allow stdin / stdout
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out
@staticmethod
def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
"""Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
"""
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
def get_caption_comments(content):
"""Retrieve an id and a caption from a code cell.
If the code cell content begins with a commented
block that looks like
## fig:id
# multi-line or single-line
# caption
then the 'fig:id' and the caption will be returned.
The '#' are stripped.
"""
if not content.startswith('## fig:'):
return None, None
content = content.splitlines()
id = content[0].strip('## ')
caption = []
for line in content[1:]:
if not line.startswith('# '):
break
else:
caption.append(line.lstrip('# ').rstrip())
# add " around the caption. TODO: consider doing this upstream
# in pandoc-attributes
caption = '"' + ' '.join(caption) + '"'
return id, caption
|
|
"""test_resources"""
import unittest
from nose.tools import eq_, assert_raises
from routes import *
class TestResourceGeneration(unittest.TestCase):
def _assert_restful_routes(self, m, options, path_prefix=''):
baseroute = '/' + path_prefix + options['controller']
eq_(baseroute, m.generate(action='index', **options))
eq_(baseroute + '.xml', m.generate(action='index', format='xml', **options))
eq_(baseroute + '/new', m.generate(action='new', **options))
eq_(baseroute + '/1', m.generate(action='show', id='1', **options))
eq_(baseroute + '/1/edit', m.generate(action='edit',id='1', **options))
eq_(baseroute + '/1.xml', m.generate(action='show', id='1',format='xml', **options))
eq_(baseroute, m.generate(action='create', method='post', **options))
eq_(baseroute + '/1', m.generate(action='update', method='put', id='1', **options))
eq_(baseroute + '/1', m.generate(action='delete', method='delete', id='1', **options))
def test_resources(self):
m = Mapper()
m.resource('message', 'messages')
m.resource('massage', 'massages')
m.resource('passage', 'passages')
m.create_regs(['messages'])
options = dict(controller='messages')
eq_('/messages', url_for('messages'))
eq_('/messages.xml', url_for('formatted_messages', format='xml'))
eq_('/messages/1', url_for('message', id=1))
eq_('/messages/1.xml', url_for('formatted_message', id=1, format='xml'))
eq_('/messages/new', url_for('new_message'))
eq_('/messages/1.xml', url_for('formatted_message', id=1, format='xml'))
eq_('/messages/1/edit', url_for('edit_message', id=1))
eq_('/messages/1/edit.xml', url_for('formatted_edit_message', id=1, format='xml'))
self._assert_restful_routes(m, options)
def test_resources_with_path_prefix(self):
m = Mapper()
m.resource('message', 'messages', path_prefix='/thread/:threadid')
m.create_regs(['messages'])
options = dict(controller='messages', threadid='5')
self._assert_restful_routes(m, options, path_prefix='thread/5/')
def test_resources_with_collection_action(self):
m = Mapper()
m.resource('message', 'messages', collection=dict(rss='GET'))
m.create_regs(['messages'])
options = dict(controller='messages')
self._assert_restful_routes(m, options)
eq_('/messages/rss', m.generate(controller='messages', action='rss'))
eq_('/messages/rss', url_for('rss_messages'))
eq_('/messages/rss.xml', m.generate(controller='messages', action='rss', format='xml'))
eq_('/messages/rss.xml', url_for('formatted_rss_messages', format='xml'))
def test_resources_with_member_action(self):
for method in ['put', 'post']:
m = Mapper()
m.resource('message', 'messages', member=dict(mark=method))
m.create_regs(['messages'])
options = dict(controller='messages')
self._assert_restful_routes(m, options)
eq_('/messages/1/mark', m.generate(method=method, action='mark', id='1', **options))
eq_('/messages/1/mark.xml',
m.generate(method=method, action='mark', id='1', format='xml', **options))
def test_resources_with_new_action(self):
m = Mapper()
m.resource('message', 'messages/', new=dict(preview='POST'))
m.create_regs(['messages'])
options = dict(controller='messages')
self._assert_restful_routes(m, options)
eq_('/messages/new/preview', m.generate(controller='messages', action='preview', method='post'))
eq_('/messages/new/preview', url_for('preview_new_message'))
eq_('/messages/new/preview.xml',
m.generate(controller='messages', action='preview', method='post', format='xml'))
eq_('/messages/new/preview.xml', url_for('formatted_preview_new_message', format='xml'))
def test_resources_with_name_prefix(self):
m = Mapper()
m.resource('message', 'messages', name_prefix='category_', new=dict(preview='POST'))
m.create_regs(['messages'])
options = dict(controller='messages')
self._assert_restful_routes(m, options)
eq_('/messages/new/preview', url_for('category_preview_new_message'))
assert_raises(Exception, url_for, 'category_preview_new_message', method='get')
def test_resources_with_requirements(self):
m = Mapper()
m.resource('message', 'messages', path_prefix='/{project_id}/{user_id}/',
requirements={'project_id': r'[0-9a-f]{4}', 'user_id': r'\d+'})
options = dict(controller='messages', project_id='cafe', user_id='123')
self._assert_restful_routes(m, options, path_prefix='cafe/123/')
# in addition to the positive tests we need to guarantee we
# are not matching when the requirements don't match.
eq_({'action': u'create', 'project_id': u'cafe', 'user_id': u'123', 'controller': u'messages'},
m.match('/cafe/123/messages'))
eq_(None, m.match('/extensions/123/messages'))
eq_(None, m.match('/b0a3/123b/messages'))
eq_(None, m.match('/foo/bar/messages'))
class TestResourceRecognition(unittest.TestCase):
def test_resource(self):
m = Mapper()
m.resource('person', 'people')
m.create_regs(['people'])
con = request_config()
con.mapper = m
def test_path(path, method):
env = dict(HTTP_HOST='example.com', PATH_INFO=path, REQUEST_METHOD=method)
con.mapper_dict = {}
con.environ = env
test_path('/people', 'GET')
eq_({'controller':'people', 'action':'index'}, con.mapper_dict)
test_path('/people.xml', 'GET')
eq_({'controller':'people', 'action':'index', 'format':'xml'}, con.mapper_dict)
test_path('/people', 'POST')
eq_({'controller':'people', 'action':'create'}, con.mapper_dict)
test_path('/people.html', 'POST')
eq_({'controller':'people', 'action':'create', 'format':'html'}, con.mapper_dict)
test_path('/people/2.xml', 'GET')
eq_({'controller':'people', 'action':'show', 'id':'2', 'format':'xml'}, con.mapper_dict)
test_path('/people/2', 'GET')
eq_({'controller':'people', 'action':'show', 'id':'2'}, con.mapper_dict)
test_path('/people/2/edit', 'GET')
eq_({'controller':'people', 'action':'edit', 'id':'2'}, con.mapper_dict)
test_path('/people/2/edit.xml', 'GET')
eq_({'controller':'people', 'action':'edit', 'id':'2', 'format':'xml'}, con.mapper_dict)
test_path('/people/2', 'DELETE')
eq_({'controller':'people', 'action':'delete', 'id':'2'}, con.mapper_dict)
test_path('/people/2', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'2'}, con.mapper_dict )
test_path('/people/2.json', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'2', 'format':'json'}, con.mapper_dict )
# Test for dots in urls
test_path('/people/2\.13', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'2\.13'}, con.mapper_dict)
test_path('/people/2\.13.xml', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'2\.13', 'format':'xml'}, con.mapper_dict)
test_path('/people/user\.name', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name'}, con.mapper_dict)
test_path('/people/user\.\.\.name', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.\.\.name'}, con.mapper_dict)
test_path('/people/user\.name\.has\.dots', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name\.has\.dots'}, con.mapper_dict)
test_path('/people/user\.name\.is\.something.xml', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name\.is\.something', 'format':'xml'}, con.mapper_dict)
test_path('/people/user\.name\.ends\.with\.dot\..xml', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name\.ends\.with\.dot\.', 'format':'xml'}, con.mapper_dict)
test_path('/people/user\.name\.ends\.with\.dot\.', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name\.ends\.with\.dot\.'}, con.mapper_dict)
test_path('/people/\.user\.name\.starts\.with\.dot', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'\.user\.name\.starts\.with\.dot'}, con.mapper_dict)
test_path('/people/user\.name.json', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'user\.name', 'format':'json'}, con.mapper_dict)
def test_resource_with_nomin(self):
m = Mapper()
m.minimization = False
m.resource('person', 'people')
m.create_regs(['people'])
con = request_config()
con.mapper = m
def test_path(path, method):
env = dict(HTTP_HOST='example.com', PATH_INFO=path, REQUEST_METHOD=method)
con.mapper_dict = {}
con.environ = env
test_path('/people', 'GET')
eq_({'controller':'people', 'action':'index'}, con.mapper_dict)
test_path('/people', 'POST')
eq_({'controller':'people', 'action':'create'}, con.mapper_dict)
test_path('/people/2', 'GET')
eq_({'controller':'people', 'action':'show', 'id':'2'}, con.mapper_dict)
test_path('/people/2/edit', 'GET')
eq_({'controller':'people', 'action':'edit', 'id':'2'}, con.mapper_dict)
test_path('/people/2', 'DELETE')
eq_({'controller':'people', 'action':'delete', 'id':'2'}, con.mapper_dict)
test_path('/people/2', 'PUT')
eq_({'controller':'people', 'action':'update', 'id':'2'}, con.mapper_dict)
def test_resource_created_with_parent_resource(self):
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'))
m.create_regs(['locations'])
con = request_config()
con.mapper = m
def test_path(path, method):
env = dict(HTTP_HOST='example.com', PATH_INFO=path,
REQUEST_METHOD=method)
con.mapper_dict = {}
con.environ = env
test_path('/regions/13/locations', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'action': 'index'})
url = url_for('region_locations', region_id=13)
eq_(url, '/regions/13/locations')
test_path('/regions/13/locations', 'POST')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'action': 'create'})
# new
url = url_for('region_new_location', region_id=13)
eq_(url, '/regions/13/locations/new')
# create
url = url_for('region_locations', region_id=13)
eq_(url, '/regions/13/locations')
test_path('/regions/13/locations/60', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'show'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
test_path('/regions/13/locations/60/edit', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'edit'})
url = url_for('region_edit_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60/edit')
test_path('/regions/13/locations/60', 'DELETE')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'delete'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
test_path('/regions/13/locations/60', 'PUT')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'update'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
# Make sure ``path_prefix`` overrides work
# empty ``path_prefix`` (though I'm not sure why someone would do this)
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='')
url = url_for('region_locations')
eq_(url, '/locations')
# different ``path_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id')
url = url_for('region_locations', area_id=51)
eq_(url, '/areas/51/locations')
# Make sure ``name_prefix`` overrides work
# empty ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
name_prefix='')
url = url_for('locations', region_id=51)
eq_(url, '/regions/51/locations')
# different ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
name_prefix='area_')
url = url_for('area_locations', region_id=51)
eq_(url, '/regions/51/locations')
# Make sure ``path_prefix`` and ``name_prefix`` overrides work together
# empty ``path_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='',
name_prefix='place_')
url = url_for('place_locations')
eq_(url, '/locations')
# empty ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id',
name_prefix='')
url = url_for('locations', area_id=51)
eq_(url, '/areas/51/locations')
# different ``path_prefix`` and ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id',
name_prefix='place_')
url = url_for('place_locations', area_id=51)
eq_(url, '/areas/51/locations')
def test_resource_created_with_parent_resource_nomin(self):
m = Mapper()
m.minimization = False
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'))
m.create_regs(['locations'])
con = request_config()
con.mapper = m
def test_path(path, method):
env = dict(HTTP_HOST='example.com', PATH_INFO=path,
REQUEST_METHOD=method)
con.mapper_dict = {}
con.environ = env
test_path('/regions/13/locations', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'action': 'index'})
url = url_for('region_locations', region_id=13)
eq_(url, '/regions/13/locations')
test_path('/regions/13/locations', 'POST')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'action': 'create'})
# new
url = url_for('region_new_location', region_id=13)
eq_(url, '/regions/13/locations/new')
# create
url = url_for('region_locations', region_id=13)
eq_(url, '/regions/13/locations')
test_path('/regions/13/locations/60', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'show'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
test_path('/regions/13/locations/60/edit', 'GET')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'edit'})
url = url_for('region_edit_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60/edit')
test_path('/regions/13/locations/60', 'DELETE')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'delete'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
test_path('/regions/13/locations/60', 'PUT')
eq_(con.mapper_dict, {'region_id': '13', 'controller': 'locations',
'id': '60', 'action': 'update'})
url = url_for('region_location', region_id=13, id=60)
eq_(url, '/regions/13/locations/60')
# Make sure ``path_prefix`` overrides work
# empty ``path_prefix`` (though I'm not sure why someone would do this)
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='/')
url = url_for('region_locations')
eq_(url, '/locations')
# different ``path_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id')
url = url_for('region_locations', area_id=51)
eq_(url, '/areas/51/locations')
# Make sure ``name_prefix`` overrides work
# empty ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
name_prefix='')
url = url_for('locations', region_id=51)
eq_(url, '/regions/51/locations')
# different ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
name_prefix='area_')
url = url_for('area_locations', region_id=51)
eq_(url, '/regions/51/locations')
# Make sure ``path_prefix`` and ``name_prefix`` overrides work together
# empty ``path_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='',
name_prefix='place_')
url = url_for('place_locations')
eq_(url, '/locations')
# empty ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id',
name_prefix='')
url = url_for('locations', area_id=51)
eq_(url, '/areas/51/locations')
# different ``path_prefix`` and ``name_prefix``
m = Mapper()
m.resource('location', 'locations',
parent_resource=dict(member_name='region',
collection_name='regions'),
path_prefix='areas/:area_id',
name_prefix='place_')
url = url_for('place_locations', area_id=51)
eq_(url, '/areas/51/locations')
if __name__ == '__main__':
unittest.main()
|
|
"""multipart/form-data encoding module
This module provides functions that faciliate encoding name/value pairs
as multipart/form-data suitable for a HTTP POST or PUT request.
multipart/form-data is the standard way to upload files over HTTP"""
unicode = str
__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
'multipart_encode']
try:
import uuid
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
return uuid.uuid4().hex
except ImportError:
import random, sha
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
bits = random.getrandbits(160)
return sha.new(str(bits)).hexdigest()
import urllib, re, os, mimetypes
try:
from email.header import Header
except ImportError:
# Python 2.4
from email.Header import Header
def encode_and_quote(data):
"""If ``data`` is unicode, return urllib.parse.quote_plus(data.encode("utf-8"))
otherwise return urllib.parse.quote_plus(data)"""
if data is None:
return None
if isinstance(data, unicode):
data = data.encode("utf-8")
return urllib.parse.quote_plus(data)
def _strify(s):
"""If s is a unicode string, encode it to UTF-8 and return the results,
otherwise return str(s), or None if s is None"""
if s is None:
return None
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
class MultipartParam(object):
"""Represents a single parameter in a multipart/form-data request
``name`` is the name of this parameter.
If ``value`` is set, it must be a string or unicode object to use as the
data for this parameter.
If ``filename`` is set, it is what to say that this parameter's filename
is. Note that this does not have to be the actual filename any local file.
If ``filetype`` is set, it is used as the Content-Type for this parameter.
If unset it defaults to "text/plain; charset=utf8"
If ``filesize`` is set, it specifies the length of the file ``fileobj``
If ``fileobj`` is set, it must be a file-like object that supports
.read().
Both ``value`` and ``fileobj`` must not be set, doing so will
raise a ValueError assertion.
If ``fileobj`` is set, and ``filesize`` is not specified, then
the file's size will be determined first by stat'ing ``fileobj``'s
file descriptor, and if that fails, by seeking to the end of the file,
recording the current position as the size, and then by seeking back to the
beginning of the file.
``cb`` is a callable which will be called from iter_encode with (self,
current, total), representing the current parameter, current amount
transferred, and the total size.
"""
def __init__(self, name, value=None, filename=None, filetype=None,
filesize=None, fileobj=None, cb=None):
self.name = Header(name).encode()
self.value = _strify(value)
if filename is None:
self.filename = None
else:
if isinstance(filename, unicode):
# Encode with XML entities
self.filename = filename.encode("ascii", "xmlcharrefreplace")
else:
self.filename = str(filename)
self.filename = self.filename.replace(b'"', b'\\"')
self.filetype = _strify(filetype)
self.filesize = filesize
self.fileobj = fileobj
self.cb = cb
if self.value is not None and self.fileobj is not None:
raise ValueError("Only one of value or fileobj may be specified")
if fileobj is not None and filesize is None:
# Try and determine the file size
try:
self.filesize = os.fstat(fileobj.fileno()).st_size
except (OSError, AttributeError):
try:
fileobj.seek(0, 2)
self.filesize = fileobj.tell()
fileobj.seek(0)
except:
raise ValueError("Could not determine filesize")
def __cmp__(self, other):
attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
myattrs = [getattr(self, a) for a in attrs]
oattrs = [getattr(other, a) for a in attrs]
return cmp(myattrs, oattrs)
def reset(self):
if self.fileobj is not None:
self.fileobj.seek(0)
elif self.value is None:
raise ValueError("Don't know how to reset this parameter")
@classmethod
def from_file(cls, paramname, filename):
"""Returns a new MultipartParam object constructed from the local
file at ``filename``.
``filesize`` is determined by os.path.getsize(``filename``)
``filetype`` is determined by mimetypes.guess_type(``filename``)[0]
``filename`` is set to os.path.basename(``filename``)
"""
return cls(paramname, filename=os.path.basename(filename),
filetype=mimetypes.guess_type(filename)[0],
filesize=os.path.getsize(filename),
fileobj=open(filename, "rb"))
@classmethod
def from_params(cls, params):
"""Returns a list of MultipartParam objects from a sequence of
name, value pairs, MultipartParam instances,
or from a mapping of names to values
The values may be strings or file objects, or MultipartParam objects.
MultipartParam object names must match the given names in the
name,value pairs or mapping, if applicable."""
if hasattr(params, 'items'):
params = params.items()
retval = []
for item in params:
if isinstance(item, cls):
retval.append(item)
continue
name, value = item
if isinstance(value, cls):
assert value.name == name
retval.append(value)
continue
if hasattr(value, 'read'):
# Looks like a file object
filename = getattr(value, 'name', None)
if filename is not None:
filetype = mimetypes.guess_type(filename)[0]
else:
filetype = None
retval.append(cls(name=name, filename=filename,
filetype=filetype, fileobj=value))
else:
retval.append(cls(name, value))
return retval
def encode_hdr(self, boundary):
"""Returns the header of the encoding of this parameter"""
boundary = encode_and_quote(boundary)
headers = ["--%s" % boundary]
if self.filename:
disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
self.filename.decode("utf-8"))
else:
disposition = 'form-data; name="%s"' % self.name
headers.append("Content-Disposition: %s" % disposition)
if self.filetype:
filetype = self.filetype
else:
filetype = "text/plain; charset=utf-8"
try:
headers.append("Content-Type: %s" % filetype.decode("utf-8"))
except:
headers.append("Content-Type: %s" % filetype)
headers.append("")
headers.append("")
return "\r\n".join(headers)
def encode(self, boundary):
"""Returns the string encoding of this parameter"""
if self.value is None:
value = self.fileobj.read()
else:
value = self.value
try:
if re.search("^--%s$" % re.escape(boundary), value, re.M):
raise ValueError("boundary found in encoded string")
except TypeError:
pass
return "%s%s\r\n" % (self.encode_hdr(boundary), value)
def iter_encode(self, boundary, blocksize=4096):
"""Yields the encoding of this parameter
If self.fileobj is set, then blocks of ``blocksize`` bytes are read and
yielded."""
total = self.get_size(boundary)
current = 0
if self.value is not None:
block = self.encode(boundary)
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
else:
block = self.encode_hdr(boundary)
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
last_block = ""
encoded_boundary = "--%s" % encode_and_quote(boundary)
boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary),
re.M)
while True:
block = self.fileobj.read(blocksize)
if not block:
current += 2
yield "\r\n"
if self.cb:
self.cb(self, current, total)
break
try:
last_block += block
except:
last_block += str(block)
if boundary_exp.search(last_block):
raise ValueError("boundary found in file data")
last_block = last_block[-len(encoded_boundary)-2:]
current += len(block)
yield block
if self.cb:
self.cb(self, current, total)
def get_size(self, boundary):
"""Returns the size in bytes that this param will be when encoded
with the given boundary."""
if self.filesize is not None:
valuesize = self.filesize
else:
valuesize = len(self.value)
return len(self.encode_hdr(boundary)) + 2 + valuesize
def encode_string(boundary, name, value):
"""Returns ``name`` and ``value`` encoded as a multipart/form-data
variable. ``boundary`` is the boundary string used throughout
a single request to separate variables."""
return MultipartParam(name, value).encode(boundary)
def encode_file_header(boundary, paramname, filesize, filename=None,
filetype=None):
"""Returns the leading data for a multipart/form-data field that contains
file data.
``boundary`` is the boundary string used throughout a single request to
separate variables.
``paramname`` is the name of the variable in this request.
``filesize`` is the size of the file data.
``filename`` if specified is the filename to give to this field. This
field is only useful to the server for determining the original filename.
``filetype`` if specified is the MIME type of this file.
The actual file data should be sent after this header has been sent.
"""
return MultipartParam(paramname, filesize=filesize, filename=filename,
filetype=filetype).encode_hdr(boundary)
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6
def get_headers(params, boundary):
"""Returns a dictionary with Content-Type and Content-Length headers
for the multipart/form-data encoding of ``params``."""
headers = {}
boundary = urllib.parse.quote_plus(boundary)
headers['Content-Type'] = "multipart/form-data; boundary=%s" % boundary
headers['Content-Length'] = str(get_body_size(params, boundary))
return headers
class multipart_yielder:
def __init__(self, params, boundary, cb):
self.params = params
self.boundary = boundary
self.cb = cb
self.i = 0
self.p = None
self.param_iter = None
self.current = 0
self.total = get_body_size(params, boundary)
def __iter__(self):
return self
def __next__(self):
"""generator function to yield multipart/form-data representation
of parameters"""
if self.param_iter is not None:
try:
block = self.param_iter.__next__()
self.current += len(block)
if self.cb:
self.cb(self.p, self.current, self.total)
return block
except StopIteration:
self.p = None
self.param_iter = None
if self.i is None:
raise StopIteration
elif self.i >= len(self.params):
self.param_iter = None
self.p = None
self.i = None
block = "--%s--\r\n" % self.boundary
self.current += len(block)
if self.cb:
self.cb(self.p, self.current, self.total)
return block
self.p = self.params[self.i]
self.param_iter = self.p.iter_encode(self.boundary)
self.i += 1
return self.__next__()
def reset(self):
self.i = 0
self.current = 0
for param in self.params:
param.reset()
def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.parse.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
return multipart_yielder(params, boundary, cb), headers
|
|
"""Implements the Astropy TestRunner which is a thin wrapper around py.test."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import os
import copy
import shlex
import sys
import tempfile
import warnings
from collections import OrderedDict
from ..config.paths import set_temp_config, set_temp_cache
from ..extern import six
from ..utils import wraps, find_current_module
from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
class keyword(object):
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase(object):
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
if six.PY2:
functions = inspect.getmembers(cls, predicate=inspect.ismethod)
else:
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
if six.PY2:
cls.run_tests.__func__.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
else:
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super(TestRunnerBase, cls).__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabaling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
if six.PY2:
args = [x.encode('utf-8') for x in args]
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
Parameters
----------
{keywords}
See Also
--------
pytest.main : This method builds arguments for and then calls this function.
"""
def run_tests(self, **kwargs):
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
# override the config locations to not make a new directory nor use
# existing cache or config
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=self.keywords['plugins'])
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',), exclude_args=('self',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default Astropy tests are run.
"""
if package is None:
self.package_path = self.base_path
else:
self.package_path = os.path.join(self.base_path,
package.replace('.', os.path.sep))
if not os.path.isdir(self.package_path):
raise ValueError('Package not found: {0}'.format(package))
if not kwargs['test_path']:
return [self.package_path]
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Since we aren't testing any Python files within
# the astropy tree, we need to forcibly load the
# astropy py.test plugins, and then turn on the
# doctest_rst plugin.
all_args.extend(['-p', 'astropy.tests.pytest_plugins',
'--doctest-rst'])
test_path = abs_test_path
if not (os.path.isdir(test_path) or ext in ('.py', '.rst')):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword()
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int, optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is negative, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
return ['-n', six.text_type(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
docs_path = os.path.join(
docs_path, kwargs['package'].replace('.', os.path.sep))
if not os.path.exists(docs_path):
warnings.warn(
"Can not test .rst docs, since docs path "
"({0}) does not exist.".format(docs_path))
docs_path = None
if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']:
return [docs_path, '--doctest-rst']
return []
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from ..table import Table # pylint: disable=W0611
return super(TestRunner, self).run_tests(**kwargs)
|
|
#!/usr/bin/env python
'''
test.test_wind_flags
Tests for Flag Setting and Checking for In-situ temperature and salinity
'''
from ioos_qartod.flags import QARTODEval
from ioos_qartod.flags.temp_salinity import *
import numpy as np
import unittest
class TestWindFlags(unittest.TestCase):
def setUp(self):
self.flags = QARTODEval()
def test_timing_gap(self):
'''
Tests the TimingGap Test Flags
'''
self.flags.mark_test(TimingGap.passes())
assert self.flags.check(TimingGap) == TimingGap.pass_flag
assert self.flags.check_str(TimingGap) == 'pass_flag'
self.flags.mark_test(TimingGap.fail())
assert self.flags.check(TimingGap) == TimingGap.fail_flag
assert self.flags.check_str(TimingGap) == 'fail_flag'
self.flags.mark_test(TimingGap.not_eval())
assert self.flags.check(TimingGap) == TimingGap.not_eval_flag
assert self.flags.check_str(TimingGap) == 'not_eval_flag'
def test_syntax_check(self):
'''
Tests the SyntaxCheck Test Flags
'''
self.flags.mark_test(SyntaxCheck.passes())
assert self.flags.check(SyntaxCheck) == SyntaxCheck.pass_flag
assert self.flags.check_str(SyntaxCheck) == 'pass_flag'
self.flags.mark_test(SyntaxCheck.fail())
assert self.flags.check(SyntaxCheck) == SyntaxCheck.fail_flag
assert self.flags.check_str(SyntaxCheck) == 'fail_flag'
self.flags.mark_test(SyntaxCheck.not_eval())
assert self.flags.check(SyntaxCheck) == SyntaxCheck.not_eval_flag
assert self.flags.check_str(SyntaxCheck) == 'not_eval_flag'
def test_location_check(self):
'''
Tests the LocationTest Test Flags
'''
self.flags.mark_test(LocationTest.passes())
assert self.flags.check(LocationTest) == LocationTest.pass_flag
assert self.flags.check_str(LocationTest) == 'pass_flag'
self.flags.mark_test(LocationTest.fail())
assert self.flags.check(LocationTest) == LocationTest.fail_flag
assert self.flags.check_str(LocationTest) == 'fail_flag'
self.flags.mark_test(LocationTest.not_eval())
assert self.flags.check(LocationTest) == LocationTest.not_eval_flag
assert self.flags.check_str(LocationTest) == 'not_eval_flag'
self.flags.mark_test(LocationTest.suspect())
assert self.flags.check(LocationTest) == LocationTest.suspect_flag
assert self.flags.check_str(LocationTest) == 'suspect_flag'
def test_gross_range(self):
'''
Tests the GrossRangeTest Test Flags
'''
self.flags.mark_test(GrossRangeTest.passes())
assert self.flags.check(GrossRangeTest) == GrossRangeTest.pass_flag
assert self.flags.check_str(GrossRangeTest) == 'pass_flag'
self.flags.mark_test(GrossRangeTest.fail())
assert self.flags.check(GrossRangeTest) == GrossRangeTest.fail_flag
assert self.flags.check_str(GrossRangeTest) == 'fail_flag'
self.flags.mark_test(GrossRangeTest.not_eval())
assert self.flags.check(GrossRangeTest) == GrossRangeTest.not_eval_flag
assert self.flags.check_str(GrossRangeTest) == 'not_eval_flag'
self.flags.mark_test(GrossRangeTest.suspect())
assert self.flags.check(GrossRangeTest) == GrossRangeTest.suspect_flag
assert self.flags.check_str(GrossRangeTest) == 'suspect_flag'
def test_climatology(self):
'''
Tests the ClimatologyTest Test Flags
'''
self.flags.mark_test(ClimatologyTest.passes())
assert self.flags.check(ClimatologyTest) == ClimatologyTest.pass_flag
assert self.flags.check_str(ClimatologyTest) == 'pass_flag'
self.flags.mark_test(ClimatologyTest.fail())
assert self.flags.check(ClimatologyTest) == ClimatologyTest.fail_flag
assert self.flags.check_str(ClimatologyTest) == 'fail_flag'
self.flags.mark_test(ClimatologyTest.not_eval())
assert self.flags.check(ClimatologyTest) == ClimatologyTest.not_eval_flag
assert self.flags.check_str(ClimatologyTest) == 'not_eval_flag'
self.flags.mark_test(ClimatologyTest.suspect())
assert self.flags.check(ClimatologyTest) == ClimatologyTest.suspect_flag
assert self.flags.check_str(ClimatologyTest) == 'suspect_flag'
def test_spike(self):
'''
Tests the SpikeTest Test Flags
'''
self.flags.mark_test(SpikeTest.passes())
assert self.flags.check(SpikeTest) == SpikeTest.pass_flag
assert self.flags.check_str(SpikeTest) == 'pass_flag'
self.flags.mark_test(SpikeTest.fail())
assert self.flags.check(SpikeTest) == SpikeTest.fail_flag
assert self.flags.check_str(SpikeTest) == 'fail_flag'
self.flags.mark_test(SpikeTest.not_eval())
assert self.flags.check(SpikeTest) == SpikeTest.not_eval_flag
assert self.flags.check_str(SpikeTest) == 'not_eval_flag'
self.flags.mark_test(SpikeTest.suspect())
assert self.flags.check(SpikeTest) == SpikeTest.suspect_flag
assert self.flags.check_str(SpikeTest) == 'suspect_flag'
def test_rate_of_change(self):
'''
Tests the RateOfChangeTest Test Flags
'''
self.flags.mark_test(RateOfChangeTest.passes())
assert self.flags.check(RateOfChangeTest) == RateOfChangeTest.pass_flag
assert self.flags.check_str(RateOfChangeTest) == 'pass_flag'
self.flags.mark_test(RateOfChangeTest.not_eval())
assert self.flags.check(RateOfChangeTest) == RateOfChangeTest.not_eval_flag
assert self.flags.check_str(RateOfChangeTest) == 'not_eval_flag'
self.flags.mark_test(RateOfChangeTest.suspect())
assert self.flags.check(RateOfChangeTest) == RateOfChangeTest.suspect_flag
assert self.flags.check_str(RateOfChangeTest) == 'suspect_flag'
def test_flatline(self):
'''
Tests the FlatLineTest Test Flags
'''
self.flags.mark_test(FlatLineTest.passes())
assert self.flags.check(FlatLineTest) == FlatLineTest.pass_flag
assert self.flags.check_str(FlatLineTest) == 'pass_flag'
self.flags.mark_test(FlatLineTest.fail())
assert self.flags.check(FlatLineTest) == FlatLineTest.fail_flag
assert self.flags.check_str(FlatLineTest) == 'fail_flag'
self.flags.mark_test(FlatLineTest.not_eval())
assert self.flags.check(FlatLineTest) == FlatLineTest.not_eval_flag
assert self.flags.check_str(FlatLineTest) == 'not_eval_flag'
self.flags.mark_test(FlatLineTest.suspect())
assert self.flags.check(FlatLineTest) == FlatLineTest.suspect_flag
assert self.flags.check_str(FlatLineTest) == 'suspect_flag'
def test_multivariate(self):
'''
Tests the MultivariateTest Test Flags
'''
self.flags.mark_test(MultivariateTest.passes())
assert self.flags.check(MultivariateTest) == MultivariateTest.pass_flag
assert self.flags.check_str(MultivariateTest) == 'pass_flag'
self.flags.mark_test(MultivariateTest.not_eval())
assert self.flags.check(MultivariateTest) == MultivariateTest.not_eval_flag
assert self.flags.check_str(MultivariateTest) == 'not_eval_flag'
self.flags.mark_test(MultivariateTest.suspect())
assert self.flags.check(MultivariateTest) == MultivariateTest.suspect_flag
assert self.flags.check_str(MultivariateTest) == 'suspect_flag'
def test_attenuated_signal(self):
'''
Tests the AttenuatedSignalTest Test Flags
'''
self.flags.mark_test(AttenuatedSignalTest.passes())
assert self.flags.check(AttenuatedSignalTest) == AttenuatedSignalTest.pass_flag
assert self.flags.check_str(AttenuatedSignalTest) == 'pass_flag'
self.flags.mark_test(AttenuatedSignalTest.fail())
assert self.flags.check(AttenuatedSignalTest) == AttenuatedSignalTest.fail_flag
assert self.flags.check_str(AttenuatedSignalTest) == 'fail_flag'
self.flags.mark_test(AttenuatedSignalTest.not_eval())
assert self.flags.check(AttenuatedSignalTest) == AttenuatedSignalTest.not_eval_flag
assert self.flags.check_str(AttenuatedSignalTest) == 'not_eval_flag'
self.flags.mark_test(AttenuatedSignalTest.suspect())
assert self.flags.check(AttenuatedSignalTest) == AttenuatedSignalTest.suspect_flag
assert self.flags.check_str(AttenuatedSignalTest) == 'suspect_flag'
def test_neighbor(self):
'''
Tests the NeighborTest Test Flags
'''
self.flags.mark_test(NeighborTest.passes())
assert self.flags.check(NeighborTest) == NeighborTest.pass_flag
assert self.flags.check_str(NeighborTest) == 'pass_flag'
self.flags.mark_test(NeighborTest.not_eval())
assert self.flags.check(NeighborTest) == NeighborTest.not_eval_flag
assert self.flags.check_str(NeighborTest) == 'not_eval_flag'
self.flags.mark_test(NeighborTest.suspect())
assert self.flags.check(NeighborTest) == NeighborTest.suspect_flag
assert self.flags.check_str(NeighborTest) == 'suspect_flag'
def test_aggregate(self):
'''
Tests the AggregateTest Test Flags
'''
self.flags.mark_test(AggregateTest.passes())
assert self.flags.check(AggregateTest) == AggregateTest.pass_flag
assert self.flags.check_str(AggregateTest) == 'pass_flag'
self.flags.mark_test(AggregateTest.fail())
assert self.flags.check(AggregateTest) == AggregateTest.fail_flag
assert self.flags.check_str(AggregateTest) == 'fail_flag'
self.flags.mark_test(AggregateTest.not_eval())
assert self.flags.check(AggregateTest) == AggregateTest.not_eval_flag
assert self.flags.check_str(AggregateTest) == 'not_eval_flag'
self.flags.mark_test(AggregateTest.suspect())
assert self.flags.check(AggregateTest) == AggregateTest.suspect_flag
assert self.flags.check_str(AggregateTest) == 'suspect_flag'
def test_ts_curve_space(self):
'''
Tests the TSCurveSpaceTest Test Flags
'''
self.flags.mark_test(TSCurveSpaceTest.passes())
assert self.flags.check(TSCurveSpaceTest) == TSCurveSpaceTest.pass_flag
assert self.flags.check_str(TSCurveSpaceTest) == 'pass_flag'
self.flags.mark_test(TSCurveSpaceTest.fail())
assert self.flags.check(TSCurveSpaceTest) == TSCurveSpaceTest.fail_flag
assert self.flags.check_str(TSCurveSpaceTest) == 'fail_flag'
self.flags.mark_test(TSCurveSpaceTest.not_eval())
assert self.flags.check(TSCurveSpaceTest) == TSCurveSpaceTest.not_eval_flag
assert self.flags.check_str(TSCurveSpaceTest) == 'not_eval_flag'
self.flags.mark_test(TSCurveSpaceTest.suspect())
assert self.flags.check(TSCurveSpaceTest) == TSCurveSpaceTest.suspect_flag
assert self.flags.check_str(TSCurveSpaceTest) == 'suspect_flag'
def test_density_inversion(self):
'''
Tests the DensityInversionTest Test Flags
'''
self.flags.mark_test(DensityInversionTest.passes())
assert self.flags.check(DensityInversionTest) == DensityInversionTest.pass_flag
assert self.flags.check_str(DensityInversionTest) == 'pass_flag'
self.flags.mark_test(DensityInversionTest.fail())
assert self.flags.check(DensityInversionTest) == DensityInversionTest.fail_flag
assert self.flags.check_str(DensityInversionTest) == 'fail_flag'
self.flags.mark_test(DensityInversionTest.not_eval())
assert self.flags.check(DensityInversionTest) == DensityInversionTest.not_eval_flag
assert self.flags.check_str(DensityInversionTest) == 'not_eval_flag'
def test_numpy(self):
'''
Tests the flags for numpy support
'''
flags = np.zeros(20, dtype='uint32')
self.flags = QARTODEval(flags)
self.flags.mark_test(NeighborTest.passes())
expected = np.ones(20, dtype='uint32') * NeighborTest.pass_flag
result = self.flags.check(NeighborTest)
np.testing.assert_array_equal(expected, result)
expected = ['pass_flag' for i in xrange(20) ]
result = self.flags.check_str(NeighborTest)
assert expected == result
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
if sys.version_info >= (3, 0):
from unittest.mock import patch
else:
from mock import patch
from pyfakefs import fake_filesystem_unittest
from shellfoundry.utilities.shell_package_builder import ShellPackageBuilder
from tests.asserts import assertFileExists
from tests.test_utilities.test_package_builder import TestPackageBuilder
class TestShellPackageBuilder(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def test_tosca_based_shell_packed(self):
# Arrange
self.fs.create_file(
"nut-shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut-shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch\n"
" artifacts:\n"
" icon:\n"
" file: nxos.png\n"
" type: tosca.artifacts.File\n"
" driver:\n"
" file: NutShellDriver.zip\n"
" type: tosca.artifacts.File",
)
self.fs.create_file("nut-shell/nxos.png", contents="IMAGE")
self.fs.create_file("nut-shell/src/driver.py", contents="DRIVER CONTENT")
os.chdir("nut-shell")
shell_package_builder = ShellPackageBuilder()
# Act
with patch("click.echo"):
shell_package_builder.pack("/nut-shell")
# Assert
assertFileExists(self, "dist/NutShell.zip")
TestPackageBuilder.unzip("dist/NutShell.zip", "dist/package_content")
assertFileExists(self, "dist/package_content/TOSCA-Metadata/TOSCA.meta")
assertFileExists(self, "dist/package_content/shell-definition.yml")
assertFileExists(self, "dist/package_content/nxos.png")
assertFileExists(self, "dist/package_content/NutShellDriver.zip")
def test_tosca_based_shell_with_icon_packed(self):
# Arrange
self.fs.create_file(
"nut-shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut-shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
" template_icon: shell.png\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch\n"
" artifacts:\n"
" icon:\n"
" file: nxos.png\n"
" type: tosca.artifacts.File\n"
" driver:\n"
" file: NutShellDriver.zip\n"
" type: tosca.artifacts.File",
)
self.fs.create_file("nut-shell/shell.png", contents="SHELL_IMAGE")
self.fs.create_file("nut-shell/nxos.png", contents="IMAGE")
self.fs.create_file("nut-shell/src/driver.py", contents="DRIVER CONTENT")
os.chdir("nut-shell")
shell_package_builder = ShellPackageBuilder()
# Act
with patch("click.echo"):
shell_package_builder.pack("/nut-shell")
# Assert
assertFileExists(self, "dist/NutShell.zip")
TestPackageBuilder.unzip("dist/NutShell.zip", "dist/package_content")
assertFileExists(self, "dist/package_content/TOSCA-Metadata/TOSCA.meta")
assertFileExists(self, "dist/package_content/shell-definition.yml")
assertFileExists(self, "dist/package_content/nxos.png")
assertFileExists(self, "dist/package_content/shell.png")
assertFileExists(self, "dist/package_content/NutShellDriver.zip")
def test_tosca_based_shell_with_underscore_packed(self):
# Arrange
self.fs.create_file(
"nut_shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut_shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch\n"
" artifacts:\n"
" icon:\n"
" file: nxos.png\n"
" type: tosca.artifacts.File\n"
" driver:\n"
" file: NutShellDriver.zip\n"
" type: tosca.artifacts.File",
)
self.fs.create_file("nut_shell/nxos.png", contents="IMAGE")
self.fs.create_file("nut_shell/src/driver.py", contents="DRIVER CONTENT")
os.chdir("nut_shell")
shell_package_builder = ShellPackageBuilder()
# Act
with patch("click.echo"):
shell_package_builder.pack("/nut_shell")
# Assert
assertFileExists(self, "dist/NutShell.zip")
TestPackageBuilder.unzip("dist/NutShell.zip", "dist/package_content")
assertFileExists(self, "dist/package_content/TOSCA-Metadata/TOSCA.meta")
assertFileExists(self, "dist/package_content/shell-definition.yml")
assertFileExists(self, "dist/package_content/nxos.png")
assertFileExists(self, "dist/package_content/NutShellDriver.zip")
def test_tosca_based_shell_packed_when_artifacts_missing_in_yaml_file(self):
# Arrange
self.fs.create_file(
"nut-shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut-shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch",
)
os.chdir("nut-shell")
shell_package_builder = ShellPackageBuilder()
# Act
with patch("click.echo"):
shell_package_builder.pack("/nut-shell")
# Assert
assertFileExists(self, "dist/NutShell.zip")
TestPackageBuilder.unzip("dist/NutShell.zip", "dist/package_content")
assertFileExists(self, "dist/package_content/TOSCA-Metadata/TOSCA.meta")
assertFileExists(self, "dist/package_content/shell-definition.yml")
def test_tosca_based_shell_packed_when_some_artifacts_missing_in_directory(self):
# Arrange
self.fs.create_file(
"nut-shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut-shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch\n"
" artifacts:\n"
" icon:\n"
" file: nxos.png\n"
" type: tosca.artifacts.File\n",
)
os.chdir("nut-shell")
shell_package_builder = ShellPackageBuilder()
# Act
with patch("click.echo"):
shell_package_builder.pack("/nut-shell")
# Assert
assertFileExists(self, "dist/NutShell.zip")
TestPackageBuilder.unzip("dist/NutShell.zip", "dist/package_content")
assertFileExists(self, "dist/package_content/TOSCA-Metadata/TOSCA.meta")
assertFileExists(self, "dist/package_content/shell-definition.yml")
def test_tosca_based_shell_failed(self):
# Arrange
self.fs.create_file(
"nut-shell/TOSCA-Metadata/TOSCA.meta",
contents="TOSCA-Meta-File-Version: 1.0 \n"
"CSAR-Version: 1.1 \n"
"Created-By: Anonymous\n"
"Entry-Definitions: shell-definition.yml",
)
self.fs.create_file(
"nut-shell/shell-definition.yml",
contents="tosca_definitions_version: tosca_simple_yaml_1_0\n"
"metadata:\n"
" template_name: NutShell\n"
" template_author: Anonymous\n"
" template_version: 1.0.0\n"
"node_types:\n"
" vendor.switch.NXOS:\n"
" derived_from: cloudshell.nodes.Switch\n"
" artifacts:\n"
" icon:\n"
" file: nxos.png\n"
" type: tosca.artifacts.File\n"
" driver:\n"
" file: NutShellDriver.zip\n"
" type: tosca.artifacts.File",
)
self.fs.create_file("nut-shell/nxos.png", contents="IMAGE")
os.chdir("nut-shell")
shell_package_builder = ShellPackageBuilder()
# Act
with self.assertRaisesRegex(Exception, "Invalid driver structure."):
with patch("click.echo"):
shell_package_builder.pack("/nut-shell")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectorsOperations:
"""ConnectorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ConnectorSettingList"]:
"""Cloud accounts connectors of a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectorSettingList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.ConnectorSettingList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectorSettingList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectorSettingList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/connectors'} # type: ignore
async def get(
self,
connector_name: str,
**kwargs: Any
) -> "_models.ConnectorSetting":
"""Details of a specific cloud account connector.
:param connector_name: Name of the cloud account connector.
:type connector_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectorSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.ConnectorSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectorSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'connectorName': self._serialize.url("connector_name", connector_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectorSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/connectors/{connectorName}'} # type: ignore
async def create_or_update(
self,
connector_name: str,
connector_setting: "_models.ConnectorSetting",
**kwargs: Any
) -> "_models.ConnectorSetting":
"""Create a cloud account connector or update an existing one. Connect to your cloud account. For
AWS, use either account credentials or role-based authentication. For GCP, use account
organization credentials.
:param connector_name: Name of the cloud account connector.
:type connector_name: str
:param connector_setting: Settings for the cloud account connector.
:type connector_setting: ~azure.mgmt.security.models.ConnectorSetting
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectorSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.ConnectorSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectorSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'connectorName': self._serialize.url("connector_name", connector_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(connector_setting, 'ConnectorSetting')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectorSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/connectors/{connectorName}'} # type: ignore
async def delete(
self,
connector_name: str,
**kwargs: Any
) -> None:
"""Delete a cloud account connector from a subscription.
:param connector_name: Name of the cloud account connector.
:type connector_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'connectorName': self._serialize.url("connector_name", connector_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/connectors/{connectorName}'} # type: ignore
|
|
"""Convert epydoc markup into renderable content."""
import __builtin__
import exceptions
import inspect
import itertools
import os
import re
import sys
import urllib
from twisted.web.template import Tag, tags, XMLString
from pydoctor import model
STDLIB_DIR = os.path.dirname(os.__file__)
STDLIB_URL = 'http://docs.python.org/library/'
def link(o):
return o.system.urlprefix + urllib.quote(o.fullName()+'.html')
def get_parser(formatname):
try:
mod = __import__('epydoc.markup.' + formatname,
globals(), locals(), ['parse_docstring'])
except ImportError, e:
return None, e
else:
return mod.parse_docstring, None
def boringDocstring(doc, summary=False):
"""Generate an HTML representation of a docstring in a really boring way.
"""
# inspect.getdoc requires an object with a __doc__ attribute, not
# just a string :-(
if doc is None or not doc.strip():
return '<pre class="undocumented">Undocumented</pre>'
def crappit(): pass
crappit.__doc__ = doc
return (tags.pre, tags.tt)[bool(summary)](inspect.getdoc(crappit))
def stdlib_doc_link_for_name(name):
parts = name.split('.')
for i in range(len(parts), 0, -1):
sub_parts = parts[:i]
filename = '/'.join(sub_parts)
sub_name = '.'.join(sub_parts)
if sub_name == 'os.path' \
or os.path.exists(os.path.join(STDLIB_DIR, filename) + '.py') \
or os.path.exists(os.path.join(STDLIB_DIR, filename, '__init__.py')) \
or os.path.exists(os.path.join(STDLIB_DIR, 'lib-dynload', filename) + '.so') \
or sub_name in sys.builtin_module_names:
return STDLIB_URL + sub_name + '.html#' + name
part0 = parts[0]
if part0 in __builtin__.__dict__ and not part0.startswith('__'):
bltin = __builtin__.__dict__[part0]
if part0 in exceptions.__dict__:
return STDLIB_URL + 'exceptions.html#exceptions.' + name
elif isinstance(bltin, type):
return STDLIB_URL + 'stdtypes.html#' + name
elif callable(bltin):
return STDLIB_URL + 'functions.html#' + name
else:
return STDLIB_URL + 'constants.html#' + name
return None
class _EpydocLinker(object):
def __init__(self, obj):
self.obj = obj
def translate_indexterm(self, something):
# X{foobar} is meant to put foobar in an index page (like, a
# proper end-of-the-book index). Should we support that? There
# are like 2 uses in Twisted.
return de_p(something.to_html(self))
def _objLink(self, obj, prettyID):
if obj.documentation_location == model.DocLocation.PARENT_PAGE:
p = obj.parent
if isinstance(p, model.Module) and p.name == '__init__':
p = p.parent
linktext = link(p) + '#' + urllib.quote(obj.name)
elif obj.documentation_location == model.DocLocation.OWN_PAGE:
linktext = link(obj)
else:
raise AssertionError(
"Unknown documentation_location: %s" % obj.documentation_location)
return '<a href="%s"><code>%s</code></a>'%(linktext, prettyID)
def look_for_name(self, name, candidates):
part0 = name.split('.')[0]
potential_targets = []
for src in candidates:
if part0 not in src.contents:
continue
target = src.resolveName(name)
if target is not None and target not in potential_targets:
potential_targets.append(target)
if len(potential_targets) == 1:
return potential_targets[0]
elif len(potential_targets) > 1:
self.obj.system.msg(
"translate_identifier_xref", "%s:%s ambiguous ref to %s, could be %s" % (
self.obj.fullName(), self.obj.linenumber, name,
', '.join([ob.fullName() for ob in potential_targets])),
thresh=-1)
return None
def look_for_intersphinx(self, name):
"""
Return link for `name` based on intersphinx inventory.
Return None if link is not found.
"""
return self.obj.system.intersphinx.getLink(name)
def translate_identifier_xref(self, fullID, prettyID):
"""Figure out what ``L{fullID}`` should link to.
There is a lot of DWIM here. The order goes:
1. Check if fullID refers to an object by Python name resolution in
our context.
2. Walk up the object tree and see if fullID refers to an object by
Python name resolution in each context.
3. Check if fullID is the fullName of an object.
4. Check to see if fullID names a builtin or standard library
module.
4. Walk up the object tree again and see if fullID refers to an
object in an "uncle" object. (So if p.m1 has a class C, the
docstring for p.m2 can say L{C} to refer to the class in m1). If
at any level fullID refers to more than one object, complain.
5. Examine every module and package in the system and see if fullID
names an object in each one. Again, if more than one object is
found, complain.
"""
src = self.obj
while src is not None:
target = src.resolveName(fullID)
if target is not None:
return self._objLink(target, prettyID)
src = src.parent
target = self.obj.system.objForFullName(fullID)
if target is not None:
return self._objLink(target, prettyID)
fullerID = self.obj.expandName(fullID)
linktext = stdlib_doc_link_for_name(fullerID)
if linktext is not None:
return '<a href="%s"><code>%s</code></a>'%(linktext, prettyID)
src = self.obj
while src is not None:
target = self.look_for_name(fullID, src.contents.values())
if target is not None:
return self._objLink(target, prettyID)
src = src.parent
target = self.look_for_name(fullID, itertools.chain(
self.obj.system.objectsOfType(model.Module),
self.obj.system.objectsOfType(model.Package)))
if target is not None:
return self._objLink(target, prettyID)
target = self.look_for_intersphinx(fullID)
if target:
return '<a href="%s"><code>%s</code></a>'%(target, prettyID)
self.obj.system.msg(
"translate_identifier_xref", "%s:%s invalid ref to %s" % (
self.obj.fullName(), self.obj.linenumber, fullID),
thresh=-1)
return '<code>%s</code>'%(prettyID,)
class FieldDesc(object):
def __init__(self):
self.kind = None
self.name = None
self.type = None
self.body = None
def format(self):
if self.body is None:
body = ''
else:
body = self.body
if self.type is not None:
body = body, ' (type: ', self.type, ')'
return body
def __repr__(self):
contents = []
for k, v in self.__dict__.iteritems():
contents.append("%s=%r"%(k, v))
return "<%s(%s)>"%(self.__class__.__name__, ', '.join(contents))
def format_desc_list(singular, descs, plural=None):
if plural is None:
plural = singular + 's'
if not descs:
return ''
if len(descs) > 1:
label = plural
else:
label = singular
r = []
first = True
for d in descs:
if first:
row = tags.tr(class_="fieldStart")
row(tags.td(class_="fieldName")(label))
first = False
else:
row = tags.tr()
row(tags.td())
if d.name is None:
row(tags.td(colspan="2")(d.format()))
else:
row(tags.td(class_="fieldArg")(d.name), tags.td(d.format()))
r.append(row)
return r
def format_field_list(obj, singular, fields, plural=None):
if plural is None:
plural = singular + 's'
if not fields:
return ''
if len(fields) > 1:
label = plural
else:
label = singular
rows = []
first = True
for field in fields:
if first:
row = tags.tr(class_="fieldStart")
row(tags.td(class_="fieldName")(label))
first=False
else:
row = tags.tr()
row(tags.td())
row(tags.td(colspan="2")(field.body))
rows.append(row)
return rows
_ok_chars = '\r\n\t\f'
_class = ''
for _c in map(chr, range(0, 32)):
if _c not in _ok_chars:
_class += _c
_control_pat = re.compile('[' + _class + ']')
def html2stan(crap):
crap = _control_pat.sub(lambda m:'\\x%02x'%ord(m.group()), crap)
crap = "<div>" + crap + "</div>"
crap = XMLString(crap).load()[0].children
if crap and crap[-1] == u'\n':
del crap[-1]
return crap
class Field(object):
"""Like epydoc.markup.Field, but without the gross accessor
methods and with a formatted body."""
def __init__(self, field, obj):
self.tag = field.tag()
self.arg = field.arg()
self.body = html2stan(field.body().to_html(_EpydocLinker(obj)))
def __repr__(self):
r = repr(self.body)
if len(r) > 25:
r = r[:20] + '...' + r[-2:]
return "<%s %r %r %s>"%(self.__class__.__name__,
self.tag, self.arg, r)
class FieldHandler(object):
def __init__(self, obj):
self.obj = obj
self.types = {}
self.parameter_descs = []
self.return_desc = None
self.raise_descs = []
self.seealsos = []
self.notes = []
self.authors = []
self.sinces = []
self.unknowns = []
self.unattached_types = {}
def redef(self, field):
self.obj.system.msg(
"epytext",
"on %r: redefinition of @type %s"%(self.obj.fullName(), field.arg),
thresh=-1)
def handle_return(self, field):
if not self.return_desc:
self.return_desc = FieldDesc()
if self.return_desc.body:
self.obj.system.msg('epydoc2stan', 'XXX')
self.return_desc.body = field.body
handle_returns = handle_return
def handle_returntype(self, field):
if not self.return_desc:
self.return_desc = FieldDesc()
if self.return_desc.type:
self.obj.system.msg('epydoc2stan', 'XXX')
self.return_desc.type = field.body
handle_rtype = handle_returntype
def add_type_info(self, desc_list, field):
#print desc_list, field
if desc_list and desc_list[-1].name == field.arg:
if desc_list[-1].type is not None:
self.redef(field)
desc_list[-1].type = field.body
else:
d = FieldDesc()
d.kind = field.tag
d.name = field.arg
d.type = field.body
desc_list.append(d)
def add_info(self, desc_list, field):
d = FieldDesc()
d.kind = field.tag
d.name = field.arg
d.body = field.body
desc_list.append(d)
def handle_type(self, field):
self.types[field.arg] = field.body
def handle_param(self, field):
self.add_info(self.parameter_descs, field)
handle_arg = handle_param
handle_keyword = handle_param
def handled_elsewhere(self, field):
# Some fields are handled by extract_fields below.
pass
handle_ivar = handled_elsewhere
handle_cvar = handled_elsewhere
handle_var = handled_elsewhere
def handle_raises(self, field):
self.add_info(self.raise_descs, field)
handle_raise = handle_raises
def handle_seealso(self, field):
self.seealsos.append(field)
handle_see = handle_seealso
def handle_note(self, field):
self.notes.append(field)
def handle_author(self, field):
self.authors.append(field)
def handle_since(self, field):
self.sinces.append(field)
def handleUnknownField(self, field):
self.obj.system.msg(
'epydoc2stan',
'found unknown field on %r: %r'%(self.obj.fullName(), field),
thresh=-1)
self.add_info(self.unknowns, field)
def handle(self, field):
m = getattr(self, 'handle_' + field.tag, self.handleUnknownField)
m(field)
def resolve_types(self):
for pd in self.parameter_descs:
if pd.name in self.types:
pd.type = self.types[pd.name]
def format(self):
r = []
r.append(format_desc_list('Parameters', self.parameter_descs, 'Parameters'))
if self.return_desc:
r.append(tags.tr(class_="fieldStart")(tags.td(class_="fieldName")('Returns'),
tags.td(colspan="2")(self.return_desc.format())))
r.append(format_desc_list("Raises", self.raise_descs, "Raises"))
for s, p, l in (('Author', 'Authors', self.authors),
('See Also', 'See Also', self.seealsos),
('Present Since', 'Present Since', self.sinces),
('Note', 'Notes', self.notes)):
r.append(format_field_list(self.obj, s, l, p))
unknowns = {}
unknownsinorder = []
for fieldinfo in self.unknowns:
tag = fieldinfo.kind
if tag in unknowns:
unknowns[tag].append(fieldinfo)
else:
unknowns[tag] = [fieldinfo]
unknownsinorder.append(unknowns[tag])
for fieldlist in unknownsinorder:
label = "Unknown Field: " + fieldlist[0].kind
r.append(format_desc_list(label, fieldlist, label))
return tags.table(class_='fieldTable')(r)
def de_p(s):
if s.startswith('<p>') and s.endswith('</p>\n'):
s = s[3:-5] # argh reST
if s.endswith('\n'):
s = s[:-1]
return s
def reportErrors(obj, errs):
for err in errs:
if isinstance(err, str):
linenumber = '??'
descr = err
else:
linenumber = obj.linenumber + err.linenum()
descr = err._descr
obj.system.msg(
'epydoc2stan2',
'%s:%s epytext error %r' % (obj.fullName(), linenumber, descr))
if errs and obj.fullName() not in obj.system.epytextproblems:
obj.system.epytextproblems.append(obj.fullName())
obj.system.msg('epydoc2stan',
'epytext error in %s'%(obj,), thresh=1)
p = lambda m:obj.system.msg('epydoc2stan', m, thresh=2)
for i, l in enumerate(obj.docstring.splitlines()):
p("%4s"%(i+1)+' '+l)
for err in errs:
p(err)
def doc2stan(obj, summary=False, docstring=None):
"""Generate an HTML representation of a docstring"""
if getattr(obj, 'parsed_docstring', None) is not None:
r = html2stan(obj.parsed_docstring.to_html(_EpydocLinker(obj)))
if getattr(obj, 'parsed_type', None) is not None:
r = [r, ' (type: ', html2stan(obj.parsed_type.to_html(_EpydocLinker(obj))), ')']
return r, []
origobj = obj
if isinstance(obj, model.Package):
obj = obj.contents['__init__']
if docstring is None:
doc = None
for source in obj.docsources():
if source.docstring is not None:
doc = source.docstring
break
else:
source = obj
doc = docstring
if doc is None or not doc.strip():
text = "Undocumented"
subdocstrings = {}
subcounts = {}
for subob in origobj.contents.itervalues():
k = subob.kind.lower()
subcounts[k] = subcounts.get(k, 0) + 1
if subob.docstring is not None:
subdocstrings[k] = subdocstrings.get(k, 0) + 1
if isinstance(origobj, model.Package):
subcounts["module"] -= 1
if subdocstrings:
plurals = {'class':'classes'}
text = "No %s docstring"%origobj.kind.lower()
if summary:
u = []
for k in sorted(subcounts):
u.append("%s/%s %s"%(subdocstrings.get(k, 0), subcounts[k],
plurals.get(k, k+'s')))
text += '; ' + ', '.join(u) + " documented"
if summary:
return tags.span(class_="undocumented")(text), []
else:
return tags.div(class_="undocumented")(text), []
if summary:
# Use up to three first non-empty lines of doc string as summary.
lines = itertools.dropwhile(lambda line: not line.strip(),
doc.split('\n'))
lines = itertools.takewhile(lambda line: line.strip(), lines)
lines = [ line.strip() for line in lines ]
if len(lines) > 3:
return tags.span(class_="undocumented")('No summary'), []
else:
doc = ' '.join(lines)
parse_docstring, e = get_parser(obj.system.options.docformat)
if not parse_docstring:
msg = 'Error trying to import %r parser:\n\n %s: %s\n\nUsing plain text formatting only.'%(
obj.system.options.docformat, e.__class__.__name__, e)
obj.system.msg('epydoc2stan', msg, thresh=-1, once=True)
return boringDocstring(doc, summary), []
errs = []
def crappit(): pass
crappit.__doc__ = doc
doc = inspect.getdoc(crappit)
try:
pdoc = parse_docstring(doc, errs)
except Exception, e:
errs = [e.__class__.__name__ +': ' + str(e)]
if errs:
reportErrors(source, errs)
return boringDocstring(doc, summary), errs
pdoc, fields = pdoc.split_fields()
if pdoc is not None:
try:
crap = pdoc.to_html(_EpydocLinker(source))
except Exception, e:
reportErrors(source, [e.__class__.__name__ +': ' + str(e)])
return (boringDocstring(doc, summary),
[e.__class__.__name__ +': ' + str(e)])
else:
crap = ''
if isinstance(crap, unicode):
crap = crap.encode('utf-8')
if summary:
if not crap:
return (), []
stan = html2stan(crap)
if len(stan) == 1 and isinstance(stan[0], Tag) and stan[0].tagName == 'p':
stan = stan[0].children
s = tags.span(stan)
else:
if not crap and not fields:
return (), []
stan = html2stan(crap)
s = tags.div(stan)
fh = FieldHandler(obj)
for field in fields:
fh.handle(Field(field, obj))
fh.resolve_types()
s(fh.format())
return s, []
field_name_to_human_name = {
'ivar': 'Instance Variable',
'cvar': 'Class Variable',
'var': 'Variable',
}
def extract_fields(obj):
if isinstance(obj, model.Package):
obj = obj.contents['__init__']
if isinstance(obj, model.Function):
return []
doc = obj.docstring
if doc is None or not doc.strip():
return []
parse_docstring, e = get_parser(obj.system.options.docformat)
if not parse_docstring:
return []
def crappit(): pass
crappit.__doc__ = doc
doc = inspect.getdoc(crappit)
try:
pdoc = parse_docstring(doc, [])
except Exception:
return []
pdoc, fields = pdoc.split_fields()
if not fields:
return []
r = []
types = {}
for field in fields:
if field.tag() == 'type':
types[field.arg()] = field.body()
for field in fields:
if field.tag() in ['ivar', 'cvar', 'var']:
attrobj = obj.system.Attribute(obj.system, field.arg(), None, obj)
attrobj.parsed_docstring = field.body()
attrobj.parsed_type = types.get(field.arg())
attrobj.kind = field_name_to_human_name[field.tag()]
r.append(attrobj)
return r
|
|
"""Support to interface with the Plex API."""
from datetime import timedelta
import json
import logging
import requests
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import (
DEVICE_DEFAULT_NAME, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import dt as dt_util
from homeassistant.util.json import load_json, save_json
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
PLEX_CONFIG_FILE = 'plex.conf'
PLEX_DATA = 'plex'
CONF_INCLUDE_NON_CLIENTS = 'include_non_clients'
CONF_USE_EPISODE_ART = 'use_episode_art'
CONF_USE_CUSTOM_ENTITY_IDS = 'use_custom_entity_ids'
CONF_SHOW_ALL_CONTROLS = 'show_all_controls'
CONF_REMOVE_UNAVAILABLE_CLIENTS = 'remove_unavailable_clients'
CONF_CLIENT_REMOVE_INTERVAL = 'client_remove_interval'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_INCLUDE_NON_CLIENTS, default=False): cv.boolean,
vol.Optional(CONF_USE_EPISODE_ART, default=False): cv.boolean,
vol.Optional(CONF_USE_CUSTOM_ENTITY_IDS, default=False): cv.boolean,
vol.Optional(CONF_SHOW_ALL_CONTROLS, default=False): cv.boolean,
vol.Optional(CONF_REMOVE_UNAVAILABLE_CLIENTS, default=True): cv.boolean,
vol.Optional(CONF_CLIENT_REMOVE_INTERVAL, default=timedelta(seconds=600)):
vol.All(cv.time_period, cv.positive_timedelta),
})
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Plex platform."""
if PLEX_DATA not in hass.data:
hass.data[PLEX_DATA] = {}
# get config from plex.conf
file_config = load_json(hass.config.path(PLEX_CONFIG_FILE))
if file_config:
# Setup a configured PlexServer
host, host_config = file_config.popitem()
token = host_config['token']
try:
has_ssl = host_config['ssl']
except KeyError:
has_ssl = False
try:
verify_ssl = host_config['verify']
except KeyError:
verify_ssl = True
# Via discovery
elif discovery_info is not None:
# Parse discovery data
host = discovery_info.get('host')
port = discovery_info.get('port')
host = '%s:%s' % (host, port)
_LOGGER.info("Discovered PLEX server: %s", host)
if host in _CONFIGURING:
return
token = None
has_ssl = False
verify_ssl = True
else:
return
setup_plexserver(
host, token, has_ssl, verify_ssl,
hass, config, add_entities_callback
)
def setup_plexserver(
host, token, has_ssl, verify_ssl, hass, config, add_entities_callback):
"""Set up a plexserver based on host parameter."""
import plexapi.server
import plexapi.exceptions
cert_session = None
http_prefix = 'https' if has_ssl else 'http'
if has_ssl and (verify_ssl is False):
_LOGGER.info("Ignoring SSL verification")
cert_session = requests.Session()
cert_session.verify = False
try:
plexserver = plexapi.server.PlexServer(
'%s://%s' % (http_prefix, host),
token, cert_session
)
_LOGGER.info("Discovery configuration done (no token needed)")
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized,
plexapi.exceptions.NotFound) as error:
_LOGGER.info(error)
# No token or wrong token
request_configuration(host, hass, config, add_entities_callback)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = hass.components.configurator
configurator.request_done(request_id)
_LOGGER.info("Discovery configuration done")
# Save config
save_json(
hass.config.path(PLEX_CONFIG_FILE), {host: {
'token': token,
'ssl': has_ssl,
'verify': verify_ssl,
}})
_LOGGER.info('Connected to: %s://%s', http_prefix, host)
plex_clients = hass.data[PLEX_DATA]
plex_sessions = {}
track_utc_time_change(hass, lambda now: update_devices(), second=30)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update the devices objects."""
try:
devices = plexserver.clients()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex devices")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to plex server at http://%s (%s)", host, ex)
return
new_plex_clients = []
available_client_ids = []
for device in devices:
# For now, let's allow all deviceClass types
if device.deviceClass in ['badClient']:
continue
available_client_ids.append(device.machineIdentifier)
if device.machineIdentifier not in plex_clients:
new_client = PlexClient(
config, device, None, plex_sessions, update_devices,
update_sessions)
plex_clients[device.machineIdentifier] = new_client
_LOGGER.debug("New device: %s", device.machineIdentifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing device: %s",
device.machineIdentifier)
plex_clients[device.machineIdentifier].refresh(device, None)
# add devices with a session and no client (ex. PlexConnect Apple TV's)
if config.get(CONF_INCLUDE_NON_CLIENTS):
# To avoid errors when plex sessions created during iteration
sessions = list(plex_sessions.items())
for machine_identifier, (session, player) in sessions:
if machine_identifier in available_client_ids:
# Avoid using session if already added as a device.
_LOGGER.debug("Skipping session, device exists: %s",
machine_identifier)
continue
if (machine_identifier not in plex_clients
and machine_identifier is not None):
new_client = PlexClient(
config, player, session, plex_sessions, update_devices,
update_sessions)
plex_clients[machine_identifier] = new_client
_LOGGER.debug("New session: %s", machine_identifier)
new_plex_clients.append(new_client)
else:
_LOGGER.debug("Refreshing session: %s", machine_identifier)
plex_clients[machine_identifier].refresh(None, session)
clients_to_remove = []
for client in plex_clients.values():
# force devices to idle that do not have a valid session
if client.session is None:
client.force_idle()
client.set_availability(client.machine_identifier
in available_client_ids
or client.machine_identifier
in plex_sessions)
if not config.get(CONF_REMOVE_UNAVAILABLE_CLIENTS) \
or client.available:
continue
if (dt_util.utcnow() - client.marked_unavailable) >= \
(config.get(CONF_CLIENT_REMOVE_INTERVAL)):
hass.add_job(client.async_remove())
clients_to_remove.append(client.machine_identifier)
while clients_to_remove:
del plex_clients[clients_to_remove.pop()]
if new_plex_clients:
add_entities_callback(new_plex_clients)
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_sessions():
"""Update the sessions objects."""
try:
sessions = plexserver.sessions()
except plexapi.exceptions.BadRequest:
_LOGGER.exception("Error listing plex sessions")
return
except requests.exceptions.RequestException as ex:
_LOGGER.warning(
"Could not connect to plex server at http://%s (%s)", host, ex)
return
plex_sessions.clear()
for session in sessions:
for player in session.players:
plex_sessions[player.machineIdentifier] = session, player
update_sessions()
update_devices()
def request_configuration(host, hass, config, add_entities_callback):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(_CONFIGURING[host],
'Failed to register, please try again.')
return
def plex_configuration_callback(data):
"""Handle configuration changes."""
setup_plexserver(
host, data.get('token'),
cv.boolean(data.get('has_ssl')),
cv.boolean(data.get('do_not_verify')),
hass, config, add_entities_callback
)
_CONFIGURING[host] = configurator.request_config(
'Plex Media Server',
plex_configuration_callback,
description='Enter the X-Plex-Token',
entity_picture='/static/images/logo_plex_mediaserver.png',
submit_caption='Confirm',
fields=[{
'id': 'token',
'name': 'X-Plex-Token',
'type': ''
}, {
'id': 'has_ssl',
'name': 'Use SSL',
'type': ''
}, {
'id': 'do_not_verify_ssl',
'name': 'Do not verify SSL',
'type': ''
}])
class PlexClient(MediaPlayerDevice):
"""Representation of a Plex device."""
def __init__(self, config, device, session, plex_sessions,
update_devices, update_sessions):
"""Initialize the Plex device."""
self._app_name = ''
self._device = None
self._available = False
self._marked_unavailable = None
self._device_protocol_capabilities = None
self._is_player_active = False
self._is_player_available = False
self._player = None
self._machine_identifier = None
self._make = ''
self._name = None
self._player_state = 'idle'
self._previous_volume_level = 1 # Used in fake muting
self._session = None
self._session_type = None
self._session_username = None
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
self.config = config
self.plex_sessions = plex_sessions
self.update_devices = update_devices
self.update_sessions = update_sessions
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
self._media_position = None
self._media_position_updated_at = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
self.refresh(device, session)
# Assign custom entity ID if desired
if self.config.get(CONF_USE_CUSTOM_ENTITY_IDS):
prefix = ''
# allow for namespace prefixing when using custom entity names
if config.get("entity_namespace"):
prefix = config.get("entity_namespace") + '_'
# rename the entity id
if self.machine_identifier:
self.entity_id = "%s.%s%s" % (
'media_player', prefix,
self.machine_identifier.lower().replace('-', '_'))
else:
if self.name:
self.entity_id = "%s.%s%s" % (
'media_player', prefix,
self.name.lower().replace('-', '_'))
def _clear_media_details(self):
"""Set all Media Items to None."""
# General
self._media_content_id = None
self._media_content_rating = None
self._media_content_type = None
self._media_duration = None
self._media_image_url = None
self._media_title = None
# Music
self._media_album_artist = None
self._media_album_name = None
self._media_artist = None
self._media_track = None
# TV Show
self._media_episode = None
self._media_season = None
self._media_series_title = None
# Clear library Name
self._app_name = ''
def refresh(self, device, session):
"""Refresh key device data."""
import plexapi.exceptions
# new data refresh
self._clear_media_details()
if session: # Not being triggered by Chrome or FireTablet Plex App
self._session = session
if device:
self._device = device
try:
device_url = self._device.url("/")
except plexapi.exceptions.BadRequest:
device_url = '127.0.0.1'
if "127.0.0.1" in device_url:
self._device.proxyThroughServer()
self._session = None
self._machine_identifier = self._device.machineIdentifier
self._name = self._device.title or DEVICE_DEFAULT_NAME
self._device_protocol_capabilities = (
self._device.protocolCapabilities)
# set valid session, preferring device session
if self._device.machineIdentifier in self.plex_sessions:
self._session = self.plex_sessions.get(
self._device.machineIdentifier, [None, None])[0]
if self._session:
if self._device is not None and\
self._device.machineIdentifier is not None and \
self._session.players:
self._is_player_available = True
self._player = [p for p in self._session.players
if p.machineIdentifier ==
self._device.machineIdentifier][0]
self._name = self._player.title
self._player_state = self._player.state
self._session_username = self._session.usernames[0]
self._make = self._player.device
else:
self._is_player_available = False
# Calculate throttled position for proper progress display.
position = int(self._session.viewOffset / 1000)
now = dt_util.utcnow()
if self._media_position is not None:
pos_diff = (position - self._media_position)
time_diff = now - self._media_position_updated_at
if (pos_diff != 0 and
abs(time_diff.total_seconds() - pos_diff) > 5):
self._media_position_updated_at = now
self._media_position = position
else:
self._media_position_updated_at = now
self._media_position = position
self._media_content_id = self._session.ratingKey
self._media_content_rating = getattr(
self._session, 'contentRating', None)
self._set_player_state()
if self._is_player_active and self._session is not None:
self._session_type = self._session.type
self._media_duration = int(self._session.duration / 1000)
# title (movie name, tv episode name, music song name)
self._media_title = self._session.title
# media type
self._set_media_type()
self._app_name = self._session.section().title \
if self._session.section() is not None else ''
self._set_media_image()
else:
self._session_type = None
def _set_media_image(self):
thumb_url = self._session.thumbUrl
if (self.media_content_type is MEDIA_TYPE_TVSHOW
and not self.config.get(CONF_USE_EPISODE_ART)):
thumb_url = self._session.url(self._session.grandparentThumb)
if thumb_url is None:
_LOGGER.debug("Using media art because media thumb "
"was not found: %s", self.entity_id)
thumb_url = self.session.url(self._session.art)
self._media_image_url = thumb_url
def set_availability(self, available):
"""Set the device as available/unavailable noting time."""
if not available:
self._clear_media_details()
if self._marked_unavailable is None:
self._marked_unavailable = dt_util.utcnow()
else:
self._marked_unavailable = None
self._available = available
def _set_player_state(self):
if self._player_state == 'playing':
self._is_player_active = True
self._state = STATE_PLAYING
elif self._player_state == 'paused':
self._is_player_active = True
self._state = STATE_PAUSED
elif self.device:
self._is_player_active = False
self._state = STATE_IDLE
else:
self._is_player_active = False
self._state = STATE_OFF
def _set_media_type(self):
if self._session_type in ['clip', 'episode']:
self._media_content_type = MEDIA_TYPE_TVSHOW
# season number (00)
if callable(self._session.season):
self._media_season = str(
(self._session.season()).index).zfill(2)
elif self._session.parentIndex is not None:
self._media_season = self._session.parentIndex.zfill(2)
else:
self._media_season = None
# show name
self._media_series_title = self._session.grandparentTitle
# episode number (00)
if self._session.index is not None:
self._media_episode = str(self._session.index).zfill(2)
elif self._session_type == 'movie':
self._media_content_type = MEDIA_TYPE_MOVIE
if self._session.year is not None and \
self._media_title is not None:
self._media_title += ' (' + str(self._session.year) + ')'
elif self._session_type == 'track':
self._media_content_type = MEDIA_TYPE_MUSIC
self._media_album_name = self._session.parentTitle
self._media_album_artist = self._session.grandparentTitle
self._media_track = self._session.index
self._media_artist = self._session.originalTitle
# use album artist if track artist is missing
if self._media_artist is None:
_LOGGER.debug("Using album artist because track artist "
"was not found: %s", self.entity_id)
self._media_artist = self._media_album_artist
def force_idle(self):
"""Force client to idle."""
self._state = STATE_IDLE
self._session = None
self._clear_media_details()
@property
def unique_id(self):
"""Return the id of this plex client."""
return self.machine_identifier
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def machine_identifier(self):
"""Return the machine identifier of the device."""
return self._machine_identifier
@property
def app_name(self):
"""Return the library name of playing media."""
return self._app_name
@property
def device(self):
"""Return the device, if any."""
return self._device
@property
def marked_unavailable(self):
"""Return time device was marked unavailable."""
return self._marked_unavailable
@property
def session(self):
"""Return the session, if any."""
return self._session
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest details."""
self.update_devices(no_throttle=True)
self.update_sessions(no_throttle=True)
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return 'music'
return 'video'
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._session_type == 'clip':
_LOGGER.debug("Clip content type detected, "
"compatibility may vary: %s", self.entity_id)
return MEDIA_TYPE_TVSHOW
if self._session_type == 'episode':
return MEDIA_TYPE_TVSHOW
if self._session_type == 'movie':
return MEDIA_TYPE_MOVIE
if self._session_type == 'track':
return MEDIA_TYPE_MUSIC
return None
@property
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self._media_album_name
@property
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self._media_album_artist
@property
def media_track(self):
"""Return the track number of current playing media, music only."""
return self._media_track
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_image_url(self):
"""Return the image URL of current playing media."""
return self._media_image_url
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self._media_season
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self._media_series_title
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._media_episode
@property
def make(self):
"""Return the make of the device (ex. SHIELD Android TV)."""
return self._make
@property
def supported_features(self):
"""Flag media player features that are supported."""
if not self._is_player_active:
return 0
# force show all controls
if self.config.get(CONF_SHOW_ALL_CONTROLS):
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE)
# only show controls when we know what device is connecting
if not self._make:
return 0
# no mute support
if self.make.lower() == "shield android tv":
_LOGGER.debug(
"Shield Android TV client detected, disabling mute "
"controls: %s", self.entity_id)
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF)
# Only supports play,pause,stop (and off which really is stop)
if self.make.lower().startswith("tivo"):
_LOGGER.debug(
"Tivo client detected, only enabling pause, play, "
"stop, and off controls: %s", self.entity_id)
return (SUPPORT_PAUSE | SUPPORT_PLAY | SUPPORT_STOP |
SUPPORT_TURN_OFF)
# Not all devices support playback functionality
# Playback includes volume, stop/play/pause, etc.
if self.device and 'playback' in self._device_protocol_capabilities:
return (SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK |
SUPPORT_NEXT_TRACK | SUPPORT_STOP |
SUPPORT_VOLUME_SET | SUPPORT_PLAY |
SUPPORT_TURN_OFF | SUPPORT_VOLUME_MUTE)
return 0
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.setVolume(
int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (self._is_player_active and self.device and
'playback' in self._device_protocol_capabilities):
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and
'playback' in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
def turn_off(self):
"""Turn the client off."""
# Fake it since we can't turn the client off
self.media_stop()
def media_next_track(self):
"""Send next track command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and 'playback' in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and
'playback' in self._device_protocol_capabilities):
return
src = json.loads(media_id)
media = None
if media_type == 'MUSIC':
media = self.device.server.library.section(
src['library_name']).get(src['artist_name']).album(
src['album_name']).get(src['track_name'])
elif media_type == 'EPISODE':
media = self._get_tv_media(
src['library_name'], src['show_name'],
src['season_number'], src['episode_number'])
elif media_type == 'PLAYLIST':
media = self.device.server.playlist(src['playlist_name'])
elif media_type == 'VIDEO':
media = self.device.server.library.section(
src['library_name']).get(src['video_name'])
import plexapi.playlist
if (media and media_type == 'EPISODE' and
isinstance(media, plexapi.playlist.Playlist)):
# delete episode playlist after being loaded into a play queue
self._client_play_media(media=media, delete=True,
shuffle=src['shuffle'])
elif media:
self._client_play_media(media=media, shuffle=src['shuffle'])
def _get_tv_media(self, library_name, show_name, season_number,
episode_number):
"""Find TV media and return a Plex media object."""
target_season = None
target_episode = None
show = self.device.server.library.section(library_name).get(
show_name)
if not season_number:
playlist_name = "{} - {} Episodes".format(
self.entity_id, show_name)
return self.device.server.createPlaylist(
playlist_name, show.episodes())
for season in show.seasons():
if int(season.seasonNumber) == int(season_number):
target_season = season
break
if target_season is None:
_LOGGER.error("Season not found: %s\\%s - S%sE%s", library_name,
show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2))
else:
if not episode_number:
playlist_name = "{} - {} Season {} Episodes".format(
self.entity_id, show_name, str(season_number))
return self.device.server.createPlaylist(
playlist_name, target_season.episodes())
for episode in target_season.episodes():
if int(episode.index) == int(episode_number):
target_episode = episode
break
if target_episode is None:
_LOGGER.error("Episode not found: %s\\%s - S%sE%s",
library_name, show_name,
str(season_number).zfill(2),
str(episode_number).zfill(2))
return target_episode
def _client_play_media(self, media, delete=False, **params):
"""Instruct Plex client to play a piece of media."""
if not (self.device and
'playback' in self._device_protocol_capabilities):
_LOGGER.error("Client cannot play media: %s", self.entity_id)
return
import plexapi.playqueue
playqueue = plexapi.playqueue.PlayQueue.create(
self.device.server, media, **params)
# Delete dynamic playlists used to build playqueue (ex. play tv season)
if delete:
media.delete()
server_url = self.device.server.baseurl.split(':')
self.device.sendCommand('playback/playMedia', **dict({
'machineIdentifier': self.device.server.machineIdentifier,
'address': server_url[1].strip('/'),
'port': server_url[-1],
'key': media.key,
'containerKey':
'/playQueues/{}?window=100&own=1'.format(
playqueue.playQueueID),
}, **params))
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attr = {
'media_content_rating': self._media_content_rating,
'session_username': self._session_username,
'media_library_name': self._app_name
}
return attr
|
|
#author Philippe Raipin
#licence : apache v2
from flask import Flask, request,Response
from pymongo import MongoClient, MongoReplicaSetClient
from pymongo.read_preferences import ReadPreference
import json
from bson.dbref import DBRef
from bson.json_util import dumps
from bson import ObjectId
import time
configfile = "/opt/inkscope/etc/inkscope.conf"
def load_conf(config):
'''
load the configfile an return a json objet
'''
datasource = open(configfile, "r")
data = json.load(datasource)
datasource.close
return data
def getClient(conf):
'''
conf : json conf objet
conf=load_conf(configfile)
db = getClient(conf)['ceph']
collection =db['cluster']
cursor=collection.find_one()
Return a connexion to database specified in conf file
take care with authentication
'''
mongodb_host = conf.get("mongodb_host", "127.0.0.1")
mongodb_port = conf.get("mongodb_port", "27017")
mongodb_URL = "mongodb://"+mongodb_host+":"+str(mongodb_port)
#mongodb replication
is_mongo_replicat = conf.get("is_mongo_replicat", 0)
mongodb_set = "'"+conf.get("mongodb_set","")+"'"
mongodb_replicaSet =conf.get("mongodb_replicaSet",None)
mongodb_read_preference = conf.get("mongodb_read_preference",None)
cluster = conf.get("cluster", "ceph")
if is_mongo_replicat == 1:
client = MongoReplicaSetClient(eval(mongodb_set), replicaSet=mongodb_replicaSet, read_preference=eval(mongodb_read_preference))
else:
#if not replicated
client = MongoClient(mongodb_URL)
# mongo db authentication
is_mongo_authenticate = conf.get("is_mongo_authenticate", 0)
mongodb_user = conf.get("mongodb_user", "ceph")
mongodb_passwd = conf.get("mongodb_passwd", "empty")
if is_mongo_authenticate == 1:
client[cluster].authenticate(mongodb_user,mongodb_passwd)
return client
def getObject(db, collection, objectId, depth, branch):
"""
get an object from mongo database
depth specified how to dig the dabase to embed the DBRef
"""
br = None
if branch != None :
br = branch.copy()
br.add(collection+":"+str(objectId))
obj = db[collection].find_one({"_id" : objectId})
return _getObject(db, obj, depth, br)
def _getObject(db, obj, depth, branch):
if obj is None:
return None
if (depth <= 0):
for key in obj :
if isinstance(obj[key], DBRef):
if isinstance(obj[key].id, ObjectId):
obj[key] = {'$ref': obj[key].collection, '$id' : {'$oid': str(obj[key].id)}}
else :
obj[key] = {'$ref': obj[key].collection, '$id' : obj[key].id}
elif isinstance(obj[key], ObjectId):
obj[key] = {'$oid': str(obj[key])}
elif isinstance(obj[key], list):
obj[key] = _listObjects(db, obj[key], depth-1, branch)
return obj
for key in obj :
if isinstance(obj[key], DBRef):
if (obj[key].collection+":"+str(obj[key].id) not in branch) :
obj[key] = getObject(db, obj[key].collection, obj[key].id, depth - 1, branch)
elif isinstance(obj[key], ObjectId):
obj[key] = {'$oid': str(obj[key])}
elif isinstance(obj[key], list):
obj[key] = _listObjects(db, obj[key], depth, branch)
return obj
def _listObjects(db, objs, depth, branch):
if (depth <= 0):
r_objs = []
for obj in objs:
if isinstance(obj, int) or isinstance(obj, long) or isinstance(obj, float) or isinstance(obj, bool) or isinstance(obj, str) or isinstance(obj, unicode) :
pass
elif isinstance(obj, list):
obj = _listObjects(db, obj, depth, branch)
elif isinstance(obj, DBRef):
if isinstance(obj.id, ObjectId):
obj = {'$ref': obj.collection, '$id' : {'$oid': str(obj.id)}}
else :
obj = {'$ref': obj.collection, '$id' : obj.id}
else:
for key in obj :
if isinstance(obj[key], DBRef):
if isinstance(obj[key].id, ObjectId):
obj[key] = {'$ref': obj[key].collection, '$id' : {'$oid': str(obj[key].id)}}
else :
obj[key] = {'$ref': obj[key].collection, '$id' : obj[key].id}
elif isinstance(obj[key], ObjectId):
obj[key] = {'$oid': str(obj[key])}
elif isinstance(obj[key], list):
obj[key] = _listObjects(db, obj[key], depth-1, branch)
r_objs.append(obj)
return r_objs
r_objs = []
for obj in objs:
if isinstance(obj, int) or isinstance(obj, long) or isinstance(obj, float) or isinstance(obj, bool) or isinstance(obj, str) or isinstance(obj, unicode) :
pass
elif isinstance(obj, list):
obj = _listObjects(db, obj, depth, branch)
elif isinstance(obj, DBRef):
if (obj.collection+":"+str(obj.id) not in branch) :
obj = getObject(db, obj.collection, obj.id, depth - 1, branch)
else:
for key in obj :
if isinstance(obj[key], DBRef):
if (obj[key].collection+":"+str(obj[key].id) not in branch) :
obj[key] = getObject(db, obj[key].collection, obj[key].id, depth - 1, branch)
elif isinstance(obj[key], ObjectId):
obj[key] = {'$oid': str(obj[key])}
elif isinstance(obj[key], list):
obj[key] = _listObjects(db, obj[key], depth-1, branch)
r_objs.append(obj)
return r_objs
def listObjects(db, filters, collection, depth ):
"""
get a list of filtered objects from mongo database
depth specified how to dig the dabase to embed the DBRef
"""
select = None
template = None
if filters != None:
_complex = False
if "$select" in filters :
select = filters["$select"]
_complex = True
if "$template" in filters :
template = filters["$template"]
_complex = True
if not _complex :
select = filters
template = None
objs = list(db[collection].find(select, template))
return _listObjects(db, objs, depth, set())
def execute(db, command, keyvalues):
if "action" not in command :
return None
action = command["action"]
if action == "get":
return evaluate(command.get("field", None), keyvalues)
elif action == "find":
if "collection" not in command :
return None
collection = command["collection"]
depth = command.get("depth", 0)
select = evaluate(command.get("select", None), keyvalues)
template = command.get("template", None)
objs = list(db[collection].find(select, template))
return _listObjects(db, objs, depth, set())
elif action == "findOne":
if "collection" not in command :
return None
depth = command.get("depth", 0)
collection = command["collection"]
select = evaluate(command.get("select", None), keyvalues)
template = command.get("template", None)
objs = list(db[collection].find(select, template))
r = _listObjects(db, objs, depth, set())
if r :
return r[0]
else:
return None
elif action == "aggregate":
if "collection" not in command :
return None
depth = command.get("depth", 0)
collection = command["collection"]
pipeline = evaluate(command.get("pipeline", None), keyvalues)
if not pipeline :
return None
objs = list(db[collection].aggregate(pipeline))
return _listObjects(db, objs, depth, set())
def evaluate(obj, keyvalues):
if not obj :
return obj
elif isinstance(obj, basestring):
if obj.startswith("@"):
return getValue(keyvalues, obj[1:])
else :
return obj
elif isinstance(obj, list):
l = []
for item in obj:
l.append(evaluate(item, keyvalues))
return l
elif isinstance(obj, dict):
d = obj.copy()
for key in d:
d[key] = evaluate(d[key], keyvalues)
return d
return obj
def getValue(res, path):
wpath = path.split(".")
path = []
for node in wpath:
if '#' in node:
part = node.partition('#')
path.append(part[0])
path.append(int(part[2]))
else:
path.append(node)
walk = res
for node in path:
walk = walk[node]
return walk
def build(db, obj):
res = {}
allres = {}
steps = {}
for key in obj:
command = obj[key]
command["key"] = key
c_step= command.get("step", 0)
step = steps.get(c_step, [])
step.append(command)
steps[c_step] = step
for step in sorted(steps.iterkeys()):
for command in steps[step]:
resp = execute(db, command, allres)
if not command["key"].startswith("__"):
res[command["key"]] = resp
allres[command["key"]] = resp
return res
#@app.route('/<db>/<collection>', methods=['GET', 'POST'])
def find(conf, db, collection):
depth = int(request.args.get('depth', '0'))
if request.method == 'POST':
body_json = request.get_json(force=True)
db = getClient(conf)[db]
response_body = dumps(listObjects(db, body_json, collection, depth))
return Response(response_body, headers = {"timestamp" : int(round(time.time() * 1000))}, mimetype='application/json')
else:
db = getClient(conf)[db]
response_body = dumps(listObjects(db, None, collection, depth))
return Response(response_body, headers = {"timestamp" : int(round(time.time() * 1000))}, mimetype='application/json')
# @app.route('/<db>', methods=['POST'])
def full(conf, db):
if request.method == 'POST':
body_json = request.get_json(force=True)
db = getClient(conf)[db]
response_body = dumps(build(db, body_json))
return Response(response_body, headers = {"timestamp" : int(round(time.time() * 1000))}, mimetype='application/json')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TopologyConstructionTest(test.TestCase):
def test_get_updates_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
self.assertListEqual(dense_layer.get_updates_for(a), [0])
self.assertListEqual(dense_layer.get_updates_for(None), [1])
def test_get_losses_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
self.assertListEqual(dense_layer.get_losses_for(a), [0])
self.assertListEqual(dense_layer.get_losses_for(None), [1])
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
if h5py is None:
return # Skip rest of test if H5py isn't available.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_learning_phase(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
a_2 = keras.layers.Dense(16, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
self.assertFalse(a_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# test merge
m = keras.layers.concatenate([a_2, b_2])
self.assertTrue(m._uses_learning_phase)
# Test recursion
model = keras.models.Model([a, b], [a_2, b_2])
self.assertTrue(model.uses_learning_phase)
c = keras.layers.Input(shape=(32,), name='input_c')
d = keras.layers.Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
self.assertTrue(c_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# try actually running graph
fn = keras.backend.function(
model.inputs + [keras.backend.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
self.assertEqual(fn_outputs_no_dp[0].sum(), fn_outputs_dp[0].sum())
# output b: dropout applied
self.assertNotEqual(fn_outputs_no_dp[1].sum(), fn_outputs_dp[1].sum())
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
self.assertFalse(model.uses_learning_phase)
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertFalse(out2._uses_learning_phase)
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertFalse(model.uses_learning_phase)
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknwon_kwarg=None)
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer.inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer.inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense.inbound_nodes), 2)
self.assertEqual(len(dense.outbound_nodes), 0)
self.assertListEqual(dense.inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense.inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense.inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense.inbound_nodes[1].input_tensors, [b])
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
def test_multi_input_layer(self):
with self.test_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer.inbound_nodes), 1)
self.assertEqual(len(merge_layer.outbound_nodes), 0)
self.assertEqual(len(merge_layer.inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer.inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model._compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.topology.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_recursion(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
g, h = model([e, f])
self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())
self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model._compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
def test_multi_input_multi_output_recursion(self):
with self.test_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.get_shape().as_list(), [None, 5])
self.assertListEqual(q.get_shape().as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.topology.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
def test_layer_sharing_at_heterogenous_depth(self):
with self.test_session():
x_val = np.random.random((10, 5))
x = keras.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
with self.test_session():
input_shape = (16, 9, 3)
input_layer = keras.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
if __name__ == '__main__':
test.main()
|
|
import csv
import re
from django import forms
from django.core.urlresolvers import reverse_lazy
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
EXPANSION_PATTERN = '\[(\d+-\d+)\]'
def expand_pattern(string):
"""
Expand a numeric pattern into a list of strings. Examples:
'ge-0/0/[0-3]' => ['ge-0/0/0', 'ge-0/0/1', 'ge-0/0/2', 'ge-0/0/3']
'xe-0/[0-3]/[0-7]' => ['xe-0/0/0', 'xe-0/0/1', 'xe-0/0/2', ... 'xe-0/3/5', 'xe-0/3/6', 'xe-0/3/7']
"""
lead, pattern, remnant = re.split(EXPANSION_PATTERN, string, maxsplit=1)
x, y = pattern.split('-')
for i in range(int(x), int(y) + 1):
if re.search(EXPANSION_PATTERN, remnant):
for string in expand_pattern(remnant):
yield "{}{}{}".format(lead, i, string)
else:
yield "{}{}{}".format(lead, i, remnant)
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, '---------'),) + choices
#
# Widgets
#
class SmallTextarea(forms.Textarea):
pass
class SelectWithDisabled(forms.Select):
"""
Modified the stock Select widget to accept choices using a dict() for a label. The dict for each option must include
'label' (string) and 'disabled' (boolean).
"""
def render_option(self, selected_choices, option_value, option_label):
# Determine if option has been selected
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
# Determine if option has been disabled
option_disabled = False
exempt_value = force_text(self.attrs.get('exempt', None))
if isinstance(option_label, dict):
option_disabled = option_label['disabled'] if option_value != exempt_value else False
option_label = option_label['label']
disabled_html = ' disabled="disabled"' if option_disabled else ''
return format_html(u'<option value="{}"{}{}>{}</option>',
option_value,
selected_html,
disabled_html,
force_text(option_label))
class APISelect(SelectWithDisabled):
"""
A select widget populated via an API call
:param api_url: API URL
:param display_field: (Optional) Field to display for child in selection list. Defaults to `name`.
:param disabled_indicator: (Optional) Mark option as disabled if this field equates true.
"""
def __init__(self, api_url, display_field=None, disabled_indicator=None, *args, **kwargs):
super(APISelect, self).__init__(*args, **kwargs)
self.attrs['class'] = 'api-select'
self.attrs['api-url'] = api_url
if display_field:
self.attrs['display-field'] = display_field
if disabled_indicator:
self.attrs['disabled-indicator'] = disabled_indicator
class Livesearch(forms.TextInput):
"""
A text widget that carries a few extra bits of data for use in AJAX-powered autocomplete search
:param query_key: The name of the parameter to query against
:param query_url: The name of the API URL to query
:param field_to_update: The name of the "real" form field whose value is being set
:param obj_label: The field to use as the option label (optional)
"""
def __init__(self, query_key, query_url, field_to_update, obj_label=None, *args, **kwargs):
super(Livesearch, self).__init__(*args, **kwargs)
self.attrs = {
'data-key': query_key,
'data-source': reverse_lazy(query_url),
'data-field': field_to_update,
}
if obj_label:
self.attrs['data-label'] = obj_label
#
# Form fields
#
class CSVDataField(forms.CharField):
"""
A field for comma-separated values (CSV). Values containing commas should be encased within double quotes. Example:
'"New York, NY",new-york-ny,Other stuff' => ['New York, NY', 'new-york-ny', 'Other stuff']
"""
csv_form = None
def __init__(self, csv_form, *args, **kwargs):
self.csv_form = csv_form
self.columns = self.csv_form().fields.keys()
self.widget = forms.Textarea
super(CSVDataField, self).__init__(*args, **kwargs)
self.strip = False
if not self.label:
self.label = 'CSV Data'
if not self.help_text:
self.help_text = 'Enter one line per record in CSV format.'
def to_python(self, value):
# Return a list of dictionaries, each representing an individual record
records = []
reader = csv.reader(value.splitlines())
for i, row in enumerate(reader, start=1):
if row:
if len(row) < len(self.columns):
raise forms.ValidationError("Line {}: Field(s) missing (found {}; expected {})"
.format(i, len(row), len(self.columns)))
elif len(row) > len(self.columns):
raise forms.ValidationError("Line {}: Too many fields (found {}; expected {})"
.format(i, len(row), len(self.columns)))
record = dict(zip(self.columns, row))
records.append(record)
return records
class ExpandableNameField(forms.CharField):
"""
A field which allows for numeric range expansion
Example: 'Gi0/[1-3]' => ['Gi0/1', 'Gi0/2', 'Gi0/3']
"""
def __init__(self, *args, **kwargs):
super(ExpandableNameField, self).__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Numeric ranges are supported for bulk creation.<br />'\
'Example: <code>ge-0/0/[0-47]</code>'
def to_python(self, value):
if re.search(EXPANSION_PATTERN, value):
return list(expand_pattern(value))
return [value]
class CommentField(forms.CharField):
"""
A textarea with support for GitHub-Flavored Markdown. Exists mostly just to add a standard help_text.
"""
widget = forms.Textarea
# TODO: Port GFM syntax cheat sheet to internal documentation
default_helptext = '<i class="fa fa-info-circle"></i> '\
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank">'\
'GitHub-Flavored Markdown</a> syntax is supported'
def __init__(self, *args, **kwargs):
required = kwargs.pop('required', False)
help_text = kwargs.pop('help_text', self.default_helptext)
super(CommentField, self).__init__(required=required, help_text=help_text, *args, **kwargs)
class FlexibleModelChoiceField(forms.ModelChoiceField):
"""
Allow a model to be reference by either '{ID}' or the field specified by `to_field_name`.
"""
def to_python(self, value):
if value in self.empty_values:
return None
try:
if not self.to_field_name:
key = 'pk'
elif re.match('^\{\d+\}$', value):
key = 'pk'
value = value.strip('{}')
else:
key = self.to_field_name
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise forms.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
class SlugField(forms.SlugField):
def __init__(self, slug_source='name', *args, **kwargs):
label = kwargs.pop('label', "Slug")
help_text = kwargs.pop('help_text', "URL-friendly unique shorthand")
super(SlugField, self).__init__(label=label, help_text=help_text, *args, **kwargs)
self.widget.attrs['slug-source'] = slug_source
#
# Forms
#
class BootstrapMixin(forms.BaseForm):
def __init__(self, *args, **kwargs):
super(BootstrapMixin, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
if type(field.widget) not in [type(forms.CheckboxInput()), type(forms.RadioSelect())]:
try:
field.widget.attrs['class'] += ' form-control'
except KeyError:
field.widget.attrs['class'] = 'form-control'
if field.required:
field.widget.attrs['required'] = 'required'
if 'placeholder' not in field.widget.attrs:
field.widget.attrs['placeholder'] = field.label
class ConfirmationForm(forms.Form, BootstrapMixin):
confirm = forms.BooleanField(required=True)
class BulkImportForm(forms.Form):
def clean(self):
records = self.cleaned_data.get('csv')
if not records:
return
obj_list = []
for i, record in enumerate(records, start=1):
obj_form = self.fields['csv'].csv_form(data=record)
if obj_form.is_valid():
obj = obj_form.save(commit=False)
obj_list.append(obj)
else:
for field, errors in obj_form.errors.items():
for e in errors:
if field == '__all__':
self.add_error('csv', "Record {}: {}".format(i, e))
else:
self.add_error('csv', "Record {} ({}): {}".format(i, field, e))
self.cleaned_data['csv'] = obj_list
|
|
#!/usr/bin/env python
import web
import ovsdb
import simplejson as json
import ofctrl
urls = (
'/', 'Index',
# All Bridges
'/bridges', 'Bridges',
'/bridges/add', 'Bridges',
# A single Bridge
'/bridges/(\w+)', 'Bridge',
'/bridges/(\w+)/(update|del)', 'Bridge',
# Controllers
'/bridges/(\w+)/controllers', 'Controller',
'/bridges/(\w+)/controllers/(update|del|add)', 'Controller',
# Normal Ports
'/bridges/(\w+)/ports', 'Ports',
'/bridges/(\w+)/ports/(\w+)/(update|del|add)', 'Ports',
# Mirrors
'/bridges/(\w+)/mirrors', 'Mirror',
'/bridges/(\w+)/mirrors/(\w+)/(update|del|add)', 'Mirror',
# NetFlow
'/bridges/(\w+)/netflow', 'NetFlow',
'/bridges/(\w+)/netflow/(update|del|add)', 'NetFlow',
# sFlow
'/bridges/(\w+)/sflow', 'sFlow',
'/bridges/(\w+)/sflow/(update|del|add)', 'sFlow',
# Queue
'/bridges/(\w+)/queues', 'Queues',
'/bridges/(\w+)/queues/add', 'Queues',
'/bridges/(\w+)/queues/(\w{8})/(update|del)', 'Queue',
# Qos
'/bridges/(\w+)/qos', 'QoSes',
'/bridges/(\w+)/qos/add', 'QoSes',
'/bridges/(\w+)/qos/(\w{8})/(update|del)', 'QoS',
# Flows
'/bridges/([\w:.]+)/tables', 'Tables',
'/bridges/([\w:.]+)/tables/(\d+)/flows', 'Flows',
'/bridges/([\w:.]+)/tables/(\d+)/flows/(update|add|del)', 'Flows',
)
class Index(object):
def GET(self):
# redirect to layout template
raise web.seeother("/index.html")
class Bridges(object):
def GET(self):
"""
GET /bridges
"""
return ovsdb.fast_get_bridges()
def POST(self):
"""
POST /bridges/add?name=br0
"""
getInput = web.input()
# TODO, elaborate add_bridge
return ovsdb.add_bridge(str(getInput.name))
class Bridge(object):
def GET(self, name):
"""
GET /bridges/br0
"""
return ovsdb.get_bridge(name)
def POST(self, name, op):
"""
POST /bridges/br0/update
POST /bridges/br0/del
"""
data = web.data()
if op == "update":
# TODO, elaborate update_bridge
return ovsdb.update_bridge(name, data)
elif op == "del":
# TODO, elaborate del_bridge
return ovsdb.del_bridge(name)
class Controller(object):
def GET(self, name):
"""
GET /bridges/br0/controllers
"""
return ovsdb.get_controllers(name)
def POST(self, name, op):
"""
POST /bridges/br0/controllers/update
POST /bridges/br0/controllers/add
POST /bridges/br0/controllers/del
"""
data = web.data()
if op == "update":
return ovsdb.update_controller(name, data)
elif op == "del":
return ovsdb.del_controller(name, data)
elif op == "add":
return ovsdb.add_controller(name, data)
class Ports(object):
def GET(self, brname):
"""
GET /bridges/br0/Ports
"""
return ovsdb.get_ports(brname)
def POST(self, brname, portname, op):
"""
POST /bridges/br0/ports/eth0/update
POST /bridges/br0/ports/eth0/add
POST /bridges/br0/ports/eth0/del
"""
data = web.data()
if op == "update":
return ovsdb.update_port(brname, data)
elif op == "del":
return ovsdb.del_port(brname, data)
elif op == "add":
return ovsdb.add_port(brname, data)
class Mirror(object):
def GET(self, brname):
"""
GET /bridges/br0/mirrors
"""
return ovsdb.get_mirrors(brname)
def POST(self, brname, mirrorname, op):
"""
POST /bridges/br0/mirrors/M1/update
POST /bridges/br0/mirrors/M1/add
POST /bridges/br0/mirrors/M1/del
"""
data = web.data()
if op == "update":
return ovsdb.update_mirror(brname, data)
elif op == "del":
return ovsdb.del_mirror(brname, data)
elif op == "add":
return ovsdb.add_mirror(brname, data)
class NetFlow(object):
def GET(self, brname):
"""
GET /bridges/br0/netflow
"""
return ovsdb.get_netflows(brname)
def POST(self, brname, op):
"""
POST /bridges/br0/netflow/update
POST /bridges/br0/netflow/add
POST /bridges/br0/netflow/del
"""
data = web.data()
if op == "update":
return ovsdb.update_netflow(brname, data)
elif op == "del":
return ovsdb.del_netflow(brname, data)
elif op == "add":
return ovsdb.add_netflow(brname, data)
class sFlow(object):
def GET(self, brname):
"""
GET /bridges/br0/sflow
"""
return ovsdb.get_sflow(brname)
def POST(self, brname, op):
"""
POST /bridges/br0/sflow/update
POST /bridges/br0/sflow/add
POST /bridges/br0/sflow/del
"""
data = web.data()
if op == "update":
return ovsdb.update_sflow(brname, data)
elif op == "del":
return ovsdb.del_sflow(brname, data)
elif op == "add":
return ovsdb.add_sflow(brname, data)
class Queues(object):
def GET(self, brname):
"""
GET /bridges/br0/queues
"""
return ovsdb.get_queues()
def POST(self, brname):
"""
POST /bridges/br0/queues/add
"""
data = web.data()
return ovsdb.add_queue(data)
class Queue(object):
def GET(self):
pass
def POST(self, brname, uuid, op):
"""
POST /bridges/br0/queues/00000000/update
POST /bridges/br0/queues/00000000/del
"""
data = web.data()
if op == "update":
return ovsdb.update_queue(data)
elif op == "del":
return ovsdb.del_queue(data)
class QoSes(object):
def GET(self, brname):
"""
GET /bridges/br0/qos
"""
return ovsdb.get_all_qos()
def POST(self, brname):
"""
POST /bridges/br0/qos/add
"""
data = web.data()
return ovsdb.add_qos(data)
class QoS(object):
def GET(self):
pass
def POST(self, brname, uuid, op):
"""
POST /bridges/br0/qos/00000000/update
POST /bridges/br0/qos/00000000/del
"""
data = web.data()
if op == "update":
return ovsdb.update_qos(data)
elif op == "del":
return ovsdb.del_qos(data)
class Tables():
def GET(self, brname):
"""
GET /bridges/br0/tables
"""
wrapper = ofctrl.SimpleCtrl(brname)
return wrapper.get_tables()
class Flows():
def GET(self, brname, tid):
"""
GET /bridges/br0/tables/0/flows
"""
wrapper = ofctrl.SimpleCtrl(brname)
return wrapper.get_flows(int(tid))
def POST(self, brname, tid, op):
"""
POST /bridges/br0/tables/0/flows/update
POST /bridges/br0/tables/0/flows/add
POST /bridges/br0/tables/0/flows/del
"""
data = web.data()
ofctl_wrapper = ofctrl.SimpleCtrl(brname)
if op == "update":
return ofctl_wrapper.mod_flow(data)
elif op == "del":
return ofctl_wrapper.del_flow(data)
elif op == "add":
return ofctl_wrapper.add_flow(data)
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
#if name:
#bridge = json.loads(ovsdb.get_bridge(str(name)))
#return self.render.bridge(bridge)
|
|
# -*- coding: utf-8 -*-
#
# msoAutoShapeType.py
#
# original source for msoAutoShapeType constants and spec definitions
#
def msdn_msoAutoShapeTypes():
"""
Return sequence of tuples representing the msoAutoShapeType enumeration
as defined in the MS Office API.
Access with::
for ms_name, id_, desc in msdn_msoAutoShapeTypes():
...
This is structured as a function simply so code folding will work on it.
"""
return (
('msoShape10PointStar', 149, '10-Point Star'),
('msoShape12PointStar', 150, '12-Point Star'),
('msoShape16pointStar', 94, '16-point star.'),
('msoShape24pointStar', 95, '24-point star.'),
('msoShape32pointStar', 96, '32-point star.'),
('msoShape4pointStar', 91, '4-point star.'),
('msoShape5pointStar', 92, '5-point star.'),
('msoShape6PointStar', 147, '6-Point Star'),
('msoShape7PointStar', 148, '7-Point Star'),
('msoShape8pointStar', 93, '8-point star.'),
('msoShapeActionButtonBackorPrevious', 129,
'Back or Previous button. Supports mouse-click and mouse-over actio'
'ns.'),
('msoShapeActionButtonBeginning', 131,
'Beginning button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonCustom', 125,
'Button with no default picture or text. Supports mouse-click and m'
'ouse-over actions.'),
('msoShapeActionButtonDocument', 134,
'Document button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonEnd', 132,
'End button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonForwardorNext', 130,
'Forward or Next button. Supports mouse-click and mouse-over action'
's.'),
('msoShapeActionButtonHelp', 127,
'Help button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonHome', 126,
'Home button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonInformation', 128,
'Information button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonMovie', 136,
'Movie button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonReturn', 133,
'Return button. Supports mouse-click and mouse-over actions.'),
('msoShapeActionButtonSound', 135,
'Sound button. Supports mouse-click and mouse-over actions.'),
('msoShapeArc', 25, 'Arc.'),
('msoShapeBalloon', 137, 'Balloon.'),
('msoShapeBentArrow', 41,
'Block arrow that follows a curved 90-degree angle.'),
('msoShapeBentUpArrow', 44,
'Block arrow that follows a sharp 90-degree angle. Points up by def'
'ault.'),
('msoShapeBevel', 15, 'Bevel.'),
('msoShapeBlockArc', 20, 'Block arc.'),
('msoShapeCan', 13, 'Can.'),
('msoShapeChartPlus', 182, 'Chart Plus'),
('msoShapeChartStar', 181, 'Chart Star'),
('msoShapeChartX', 180, 'Chart X'),
('msoShapeChevron', 52, 'Chevron.'),
('msoShapeChord', 161, 'Geometric chord shape'),
('msoShapeCircularArrow', 60,
'Block arrow that follows a curved 180-degree angle.'),
('msoShapeCloud', 179, 'Cloud'),
('msoShapeCloudCallout', 108, 'Cloud callout.'),
('msoShapeCorner', 162, 'Corner'),
('msoShapeCornerTabs', 169, 'Corner Tabs'),
('msoShapeCross', 11, 'Cross.'),
('msoShapeCube', 14, 'Cube.'),
('msoShapeCurvedDownArrow', 48, 'Block arrow that curves down.'),
('msoShapeCurvedDownRibbon', 100, 'Ribbon banner that curves down.'),
('msoShapeCurvedLeftArrow', 46, 'Block arrow that curves left.'),
('msoShapeCurvedRightArrow', 45, 'Block arrow that curves right.'),
('msoShapeCurvedUpArrow', 47, 'Block arrow that curves up.'),
('msoShapeCurvedUpRibbon', 99, 'Ribbon banner that curves up.'),
('msoShapeDecagon', 144, 'Decagon'),
('msoShapeDiagonalStripe', 141, 'Diagonal Stripe'),
('msoShapeDiamond', 4, 'Diamond'),
('msoShapeDodecagon', 146, 'Dodecagon'),
('msoShapeDonut', 18, 'Donut.'),
('msoShapeDoubleBrace', 27, 'Double brace.'),
('msoShapeDoubleBracket', 26, 'Double bracket.'),
('msoShapeDoubleWave', 104, 'Double wave.'),
('msoShapeDownArrow', 36, 'Block arrow that points down.'),
('msoShapeDownArrowCallout', 56,
'Callout with arrow that points down.'),
('msoShapeDownRibbon', 98,
'Ribbon banner with center area below ribbon ends.'),
('msoShapeExplosion1', 89, 'Explosion.'),
('msoShapeExplosion2', 90, 'Explosion.'),
('msoShapeFlowchartAlternateProcess', 62,
'Alternate process flowchart symbol.'),
('msoShapeFlowchartCard', 75, 'Card flowchart symbol.'),
('msoShapeFlowchartCollate', 79, 'Collate flowchart symbol.'),
('msoShapeFlowchartConnector', 73, 'Connector flowchart symbol.'),
('msoShapeFlowchartData', 64, 'Data flowchart symbol.'),
('msoShapeFlowchartDecision', 63, 'Decision flowchart symbol.'),
('msoShapeFlowchartDelay', 84, 'Delay flowchart symbol.'),
('msoShapeFlowchartDirectAccessStorage', 87,
'Direct access storage flowchart symbol.'),
('msoShapeFlowchartDisplay', 88, 'Display flowchart symbol.'),
('msoShapeFlowchartDocument', 67, 'Document flowchart symbol.'),
('msoShapeFlowchartExtract', 81, 'Extract flowchart symbol.'),
('msoShapeFlowchartInternalStorage', 66,
'Internal storage flowchart symbol.'),
('msoShapeFlowchartMagneticDisk', 86,
'Magnetic disk flowchart symbol.'),
('msoShapeFlowchartManualInput', 71,
'Manual input flowchart symbol.'),
('msoShapeFlowchartManualOperation', 72,
'Manual operation flowchart symbol.'),
('msoShapeFlowchartMerge', 82, 'Merge flowchart symbol.'),
('msoShapeFlowchartMultidocument', 68,
'Multi-document flowchart symbol.'),
('msoShapeFlowchartOfflineStorage', 139, 'Offline Storage'),
('msoShapeFlowchartOffpageConnector', 74,
'Off-page connector flowchart symbol.'),
('msoShapeFlowchartOr', 78, '"Or" flowchart symbol.'),
('msoShapeFlowchartPredefinedProcess', 65,
'Predefined process flowchart symbol.'),
('msoShapeFlowchartPreparation', 70,
'Preparation flowchart symbol.'),
('msoShapeFlowchartProcess', 61, 'Process flowchart symbol.'),
('msoShapeFlowchartPunchedTape', 76,
'Punched tape flowchart symbol.'),
('msoShapeFlowchartSequentialAccessStorage', 85,
'Sequential access storage flowchart symbol.'),
('msoShapeFlowchartSort', 80, 'Sort flowchart symbol.'),
('msoShapeFlowchartStoredData', 83, 'Stored data flowchart symbol.'),
('msoShapeFlowchartSummingJunction', 77,
'Summing junction flowchart symbol.'),
('msoShapeFlowchartTerminator', 69, 'Terminator flowchart symbol.'),
('msoShapeFoldedCorner', 16, 'Folded corner.'),
('msoShapeFrame', 158, 'Frame'),
('msoShapeFunnel', 174, 'Funnel'),
('msoShapeGear6', 172, 'Gear 6'),
('msoShapeGear9', 173, 'Gear 9'),
('msoShapeHalfFrame', 159, 'Half Frame'),
('msoShapeHeart', 21, 'Heart.'),
('msoShapeHeptagon', 145, 'Heptagon'),
('msoShapeHexagon', 10, 'Hexagon.'),
('msoShapeHorizontalScroll', 102, 'Horizontal scroll.'),
('msoShapeIsoscelesTriangle', 7, 'Isosceles triangle.'),
('msoShapeLeftArrow', 34, 'Block arrow that points left.'),
('msoShapeLeftArrowCallout', 54,
'Callout with arrow that points left.'),
('msoShapeLeftBrace', 31, 'Left brace.'),
('msoShapeLeftBracket', 29, 'Left bracket.'),
('msoShapeLeftCircularArrow', 176, 'Left Circular Arrow'),
('msoShapeLeftRightArrow', 37,
'Block arrow with arrowheads that point both left and right.'),
('msoShapeLeftRightArrowCallout', 57,
'Callout with arrowheads that point both left and right.'),
('msoShapeLeftRightCircularArrow', 177, 'Left Right Circular Arrow'),
('msoShapeLeftRightRibbon', 140, 'Left Right Ribbon'),
('msoShapeLeftRightUpArrow', 40,
'Block arrow with arrowheads that point left, right, and up.'),
('msoShapeLeftUpArrow', 43,
'Block arrow with arrowheads that point left and up.'),
('msoShapeLightningBolt', 22, 'Lightning bolt.'),
('msoShapeLineCallout1', 109,
'Callout with border and horizontal callout line.'),
('msoShapeLineCallout1AccentBar', 113,
'Callout with horizontal accent bar.'),
('msoShapeLineCallout1BorderandAccentBar', 121,
'Callout with border and horizontal accent bar.'),
('msoShapeLineCallout1NoBorder', 117,
'Callout with horizontal line.'),
('msoShapeLineCallout2', 110,
'Callout with diagonal straight line.'),
('msoShapeLineCallout2AccentBar', 114,
'Callout with diagonal callout line and accent bar.'),
('msoShapeLineCallout2BorderandAccentBar', 122,
'Callout with border, diagonal straight line, and accent bar.'),
('msoShapeLineCallout2NoBorder', 118,
'Callout with no border and diagonal callout line.'),
('msoShapeLineCallout3', 111, 'Callout with angled line.'),
('msoShapeLineCallout3AccentBar', 115,
'Callout with angled callout line and accent bar.'),
('msoShapeLineCallout3BorderandAccentBar', 123,
'Callout with border, angled callout line, and accent bar.'),
('msoShapeLineCallout3NoBorder', 119,
'Callout with no border and angled callout line.'),
('msoShapeLineCallout4', 112,
'Callout with callout line segments forming a U-shape.'),
('msoShapeLineCallout4AccentBar', 116,
'Callout with accent bar and callout line segments forming a U-shap'
'e.'),
('msoShapeLineCallout4BorderandAccentBar', 124,
'Callout with border, accent bar, and callout line segments forming'
'a U-shape.'),
('msoShapeLineCallout4NoBorder', 120,
'Callout with no border and callout line segments forming a U-shape'
'.'),
('msoShapeLineInverse', 183, 'Straight Connector'),
('msoShapeMathDivide', 166, 'Division'),
('msoShapeMathEqual', 167, 'Equal'),
('msoShapeMathMinus', 164, 'Minus'),
('msoShapeMathMultiply', 165, 'Multiply'),
('msoShapeMathNotEqual', 168, 'Not Equal'),
('msoShapeMathPlus', 163, 'Plus'),
('msoShapeMoon', 24, 'Moon.'),
('msoShapeNoSymbol', 19, '"No" symbol.'),
('msoShapeNonIsoscelesTrapezoid', 143, 'Non-isosceles Trapezoid'),
('msoShapeNotPrimitive', 138, 'Not supported.'),
('msoShapeNotchedRightArrow', 50,
'Notched block arrow that points right.'),
('msoShapeOctagon', 6, 'Octagon'),
('msoShapeOval', 9, 'Oval'),
('msoShapeOvalCallout', 107, 'Oval-shaped callout.'),
('msoShapeParallelogram', 2, 'Parallelogram'),
('msoShapePentagon', 51, 'Pentagon.'),
('msoShapePie', 142, 'Pie'),
('msoShapePieWedge', 175, 'Pie'),
('msoShapePlaque', 28, 'Plaque.'),
('msoShapePlaqueTabs', 171, 'Plaque Tabs'),
('msoShapeQuadArrow', 39,
'Block arrows that point up, down, left, and right.'),
('msoShapeQuadArrowCallout', 59,
'Callout with arrows that point up, down, left, and right.'),
('msoShapeRectangle', 1, 'Rectangle'),
('msoShapeRectangularCallout', 105, 'Rectangular callout.'),
('msoShapeRegularPentagon', 12, 'Pentagon.'),
('msoShapeRightArrow', 33, 'Block arrow that points right.'),
('msoShapeRightArrowCallout', 53,
'Callout with arrow that points right.'),
('msoShapeRightBrace', 32, 'Right brace.'),
('msoShapeRightBracket', 30, 'Right bracket.'),
('msoShapeRightTriangle', 8, 'Right triangle.'),
('msoShapeRound1Rectangle', 151, 'Round Single Corner Rectangle'),
('msoShapeRound2DiagRectangle', 153,
'Round Diagonal Corner Rectangle'),
('msoShapeRound2SameRectangle', 152,
'Round Same Side Corner Rectangle'),
('msoShapeRoundedRectangle', 5, 'Rounded rectangle.'),
('msoShapeRoundedRectangularCallout', 106,
'Rounded rectangle-shaped callout.'),
('msoShapeSmileyFace', 17, 'Smiley face.'),
('msoShapeSnip1Rectangle', 155, 'Snip Single Corner Rectangle'),
('msoShapeSnip2DiagRectangle', 157,
'Snip Diagonal Corner Rectangle'),
('msoShapeSnip2SameRectangle', 156,
'Snip Same Side Corner Rectangle'),
('msoShapeSnipRoundRectangle', 154,
'Snip and Round Single Corner Rectangle'),
('msoShapeSquareTabs', 170, 'Square Tabs'),
('msoShapeStripedRightArrow', 49,
'Block arrow that points right with stripes at the tail.'),
('msoShapeSun', 23, 'Sun.'),
('msoShapeSwooshArrow', 178, 'Swoosh Arrow'),
('msoShapeTear', 160, 'Teardrop'),
('msoShapeTrapezoid', 3, 'Trapezoid'),
('msoShapeUTurnArrow', 42, 'Block arrow forming a U shape.'),
('msoShapeUpArrow', 35, 'Block arrow that points up.'),
('msoShapeUpArrowCallout', 55, 'Callout with arrow that points up.'),
('msoShapeUpDownArrow', 38, 'Block arrow that points up and down.'),
('msoShapeUpDownArrowCallout', 58,
'Callout with arrows that point up and down.'),
('msoShapeUpRibbon', 97,
'Ribbon banner with center area above ribbon ends.'),
('msoShapeVerticalScroll', 101, 'Vertical scroll.'),
('msoShapeWave', 103, 'Wave.')
)
def prst_map():
"""
Sequence of tuples representing the mapping of names in the
msoAutoShapeType enumeration to the 'prst' and 'name' values used in the
XML to specify and identify that auto shape type. These were discovered
using the VBA editor in PowerPoint for Windows.
Access with::
for ms_name, prst, base_name in prst_map():
...
"""
return (
('msoShape10PointStar', 'star10', '10-Point Star'),
('msoShape12PointStar', 'star12', '12-Point Star'),
('msoShape16pointStar', 'star16', '16-Point Star'),
('msoShape24pointStar', 'star24', '24-Point Star'),
('msoShape32pointStar', 'star32', '32-Point Star'),
('msoShape4pointStar', 'star4', '4-Point Star'),
('msoShape5pointStar', 'star5', '5-Point Star'),
('msoShape6PointStar', 'star6', '6-Point Star'),
('msoShape7PointStar', 'star7', '7-Point Star'),
('msoShape8pointStar', 'star8', '8-Point Star'),
('msoShapeActionButtonBackorPrevious', 'actionButtonBackPrevious',
'Action Button: Back or Previous'),
('msoShapeActionButtonBeginning', 'actionButtonBeginning',
'Action Button: Beginning'),
('msoShapeActionButtonCustom', 'actionButtonBlank',
'Action Button: Custom'),
('msoShapeActionButtonDocument', 'actionButtonDocument',
'Action Button: Document'),
('msoShapeActionButtonEnd', 'actionButtonEnd', 'Action Button: End'),
('msoShapeActionButtonForwardorNext', 'actionButtonForwardNext',
'Action Button: Forward or Next'),
('msoShapeActionButtonHelp', 'actionButtonHelp',
'Action Button: Help'),
('msoShapeActionButtonHome', 'actionButtonHome',
'Action Button: Home'),
('msoShapeActionButtonInformation', 'actionButtonInformation',
'Action Button: Information'),
('msoShapeActionButtonMovie', 'actionButtonMovie',
'Action Button: Movie'),
('msoShapeActionButtonReturn', 'actionButtonReturn',
'Action Button: Return'),
('msoShapeActionButtonSound', 'actionButtonSound',
'Action Button: Sound'),
('msoShapeArc', 'arc', 'Arc'),
('msoShapeBalloon', 'wedgeRoundRectCallout',
'Rounded Rectangular Callout'),
('msoShapeBentArrow', 'bentArrow', 'Bent Arrow'),
('msoShapeBentUpArrow', 'bentUpArrow', 'Bent-Up Arrow'),
('msoShapeBevel', 'bevel', 'Bevel'),
('msoShapeBlockArc', 'blockArc', 'Block Arc'),
('msoShapeCan', 'can', 'Can'),
('msoShapeChartPlus', 'chartPlus', 'Chart Plus'),
('msoShapeChartStar', 'chartStar', 'Chart Star'),
('msoShapeChartX', 'chartX', 'Chart X'),
('msoShapeChevron', 'chevron', 'Chevron'),
('msoShapeChord', 'chord', 'Chord'),
('msoShapeCircularArrow', 'circularArrow', 'Circular Arrow'),
('msoShapeCloud', 'cloud', 'Cloud'),
('msoShapeCloudCallout', 'cloudCallout', 'Cloud Callout'),
('msoShapeCorner', 'corner', 'Corner'),
('msoShapeCornerTabs', 'cornerTabs', 'Corner Tabs'),
('msoShapeCross', 'plus', 'Cross'),
('msoShapeCube', 'cube', 'Cube'),
('msoShapeCurvedDownArrow', 'curvedDownArrow', 'Curved Down Arrow'),
('msoShapeCurvedDownRibbon', 'ellipseRibbon', 'Curved Down Ribbon'),
('msoShapeCurvedLeftArrow', 'curvedLeftArrow', 'Curved Left Arrow'),
('msoShapeCurvedRightArrow', 'curvedRightArrow',
'Curved Right Arrow'),
('msoShapeCurvedUpArrow', 'curvedUpArrow', 'Curved Up Arrow'),
('msoShapeCurvedUpRibbon', 'ellipseRibbon2', 'Curved Up Ribbon'),
('msoShapeDecagon', 'decagon', 'Decagon'),
('msoShapeDiagonalStripe', 'diagStripe', 'Diagonal Stripe'),
('msoShapeDiamond', 'diamond', 'Diamond'),
('msoShapeDodecagon', 'dodecagon', 'Dodecagon'),
('msoShapeDonut', 'donut', 'Donut'),
('msoShapeDoubleBrace', 'bracePair', 'Double Brace'),
('msoShapeDoubleBracket', 'bracketPair', 'Double Bracket'),
('msoShapeDoubleWave', 'doubleWave', 'Double Wave'),
('msoShapeDownArrow', 'downArrow', 'Down Arrow'),
('msoShapeDownArrowCallout', 'downArrowCallout', 'Down Arrow Callout'),
('msoShapeDownRibbon', 'ribbon', 'Down Ribbon'),
('msoShapeExplosion1', 'irregularSeal1', 'Explosion'),
('msoShapeExplosion2', 'irregularSeal2', 'Explosion'),
('msoShapeFlowchartAlternateProcess', 'flowChartAlternateProcess',
'Alternate process'),
('msoShapeFlowchartCard', 'flowChartPunchedCard', 'Card'),
('msoShapeFlowchartCollate', 'flowChartCollate', 'Collate'),
('msoShapeFlowchartConnector', 'flowChartConnector', 'Connector'),
('msoShapeFlowchartData', 'flowChartInputOutput', 'Data'),
('msoShapeFlowchartDecision', 'flowChartDecision', 'Decision'),
('msoShapeFlowchartDelay', 'flowChartDelay', 'Delay'),
('msoShapeFlowchartDirectAccessStorage', 'flowChartMagneticDrum',
'Direct Access Storage'),
('msoShapeFlowchartDisplay', 'flowChartDisplay', 'Display'),
('msoShapeFlowchartDocument', 'flowChartDocument', 'Document'),
('msoShapeFlowchartExtract', 'flowChartExtract', 'Extract'),
('msoShapeFlowchartInternalStorage', 'flowChartInternalStorage',
'Internal Storage'),
('msoShapeFlowchartMagneticDisk', 'flowChartMagneticDisk',
'Magnetic Disk'),
('msoShapeFlowchartManualInput', 'flowChartManualInput',
'Manual Input'),
('msoShapeFlowchartManualOperation', 'flowChartManualOperation',
'Manual Operation'),
('msoShapeFlowchartMerge', 'flowChartMerge', 'Merge'),
('msoShapeFlowchartMultidocument', 'flowChartMultidocument',
'Multidocument'),
('msoShapeFlowchartOfflineStorage', 'flowChartOfflineStorage',
'Offline Storage'),
('msoShapeFlowchartOffpageConnector', 'flowChartOffpageConnector',
'Off-page Connector'),
('msoShapeFlowchartOr', 'flowChartOr', 'Or'),
('msoShapeFlowchartPredefinedProcess', 'flowChartPredefinedProcess',
'Predefined Process'),
('msoShapeFlowchartPreparation', 'flowChartPreparation',
'Preparation'),
('msoShapeFlowchartProcess', 'flowChartProcess', 'Process'),
('msoShapeFlowchartPunchedTape', 'flowChartPunchedTape',
'Punched Tape'),
('msoShapeFlowchartSequentialAccessStorage',
'flowChartMagneticTape', 'Sequential Access Storage'),
('msoShapeFlowchartSort', 'flowChartSort', 'Sort'),
('msoShapeFlowchartStoredData', 'flowChartOnlineStorage',
'Stored Data'),
('msoShapeFlowchartSummingJunction', 'flowChartSummingJunction',
'Summing Junction'),
('msoShapeFlowchartTerminator', 'flowChartTerminator', 'Terminator'),
('msoShapeFoldedCorner', 'folderCorner', 'Folded Corner'),
('msoShapeFrame', 'frame', 'Frame'),
('msoShapeFunnel', 'funnel', 'Funnel'),
('msoShapeGear6', 'gear6', 'Gear 6'),
('msoShapeGear9', 'gear9', 'Gear 9'),
('msoShapeHalfFrame', 'halfFrame', 'Half Frame'),
('msoShapeHeart', 'heart', 'Heart'),
('msoShapeHeptagon', 'heptagon', 'Heptagon'),
('msoShapeHexagon', 'hexagon', 'Hexagon'),
('msoShapeHorizontalScroll', 'horizontalScroll',
'Horizontal Scroll'),
('msoShapeIsoscelesTriangle', 'triangle', 'Isosceles Triangle'),
('msoShapeLeftArrow', 'leftArrow', 'Left Arrow'),
('msoShapeLeftArrowCallout', 'leftArrowCallout',
'Left Arrow Callout'),
('msoShapeLeftBrace', 'leftBrace', 'Left Brace'),
('msoShapeLeftBracket', 'leftBracket', 'Left Bracket'),
('msoShapeLeftCircularArrow', 'leftCircularArrow',
'Left Circular Arrow'),
('msoShapeLeftRightArrow', 'leftRightArrow', 'Left-Right Arrow'),
('msoShapeLeftRightArrowCallout', 'leftRightArrowCallout',
'Left-Right Arrow Callout'),
('msoShapeLeftRightCircularArrow', 'leftRightCircularArrow',
'Left Right Circular Arrow'),
('msoShapeLeftRightRibbon', 'leftRightRibbon', 'Left Right Ribbon'),
('msoShapeLeftRightUpArrow', 'leftRightUpArrow',
'Left-Right-Up Arrow'),
('msoShapeLeftUpArrow', 'leftUpArrow', 'Left-Up Arrow'),
('msoShapeLightningBolt', 'lightningBolt', 'Lightning Bolt'),
('msoShapeLineCallout1', 'borderCallout1', 'Line Callout 1'),
('msoShapeLineCallout1AccentBar', 'accentCallout1',
'Line Callout 1 (Accent Bar)'),
('msoShapeLineCallout1BorderandAccentBar', 'accentBorderCallout1',
'Line Callout 1 (Border and Accent Bar)'),
('msoShapeLineCallout1NoBorder', 'callout1',
'Line Callout 1 (No Border)'),
('msoShapeLineCallout2', 'borderCallout2', 'Line Callout 2'),
('msoShapeLineCallout2AccentBar', 'accentCallout2',
'Line Callout 2 (Accent Bar)'),
('msoShapeLineCallout2BorderandAccentBar', 'accentBorderCallout2',
'Line Callout 2 (Border and Accent Bar)'),
('msoShapeLineCallout2NoBorder', 'callout2',
'Line Callout 2 (No Border)'),
('msoShapeLineCallout3', 'borderCallout3', 'Line Callout 3'),
('msoShapeLineCallout3AccentBar', 'accentCallout3',
'Line Callout 3 (Accent Bar)'),
('msoShapeLineCallout3BorderandAccentBar', 'accentBorderCallout3',
'Line Callout 3 (Border and Accent Bar)'),
('msoShapeLineCallout3NoBorder', 'callout3',
'Line Callout 3 (No Border)'),
('msoShapeLineCallout4', 'borderCallout3', 'Line Callout 3'),
('msoShapeLineCallout4AccentBar', 'accentCallout3',
'Line Callout 3 (Accent Bar)'),
('msoShapeLineCallout4BorderandAccentBar', 'accentBorderCallout3',
'Line Callout 3 (Border and Accent Bar)'),
('msoShapeLineCallout4NoBorder', 'callout3',
'Line Callout 3 (No Border)'),
('msoShapeLineInverse', 'lineInv', 'Straight Connector'),
('msoShapeMathDivide', 'mathDivide', 'Division'),
('msoShapeMathEqual', 'mathEqual', 'Equal'),
('msoShapeMathMinus', 'mathMinus', 'Minus'),
('msoShapeMathMultiply', 'mathMultiply', 'Multiply'),
('msoShapeMathNotEqual', 'mathNotEqual', 'Not Equal'),
('msoShapeMathPlus', 'mathPlus', 'Plus'),
('msoShapeMoon', 'moon', 'Moon'),
('msoShapeNoSymbol', 'noSmoking', '"No" symbol'),
('msoShapeNonIsoscelesTrapezoid', 'nonIsoscelesTrapezoid',
'Non-isosceles Trapezoid'),
('msoShapeNotchedRightArrow', 'notchedRightArrow',
'Notched Right Arrow'),
('msoShapeOctagon', 'octagon', 'Octagon'),
('msoShapeOval', 'ellipse', 'Oval'),
('msoShapeOvalCallout', 'wedgeEllipseCallout', 'Oval Callout'),
('msoShapeParallelogram', 'parallelogram', 'Parallelogram'),
('msoShapePentagon', 'homePlate', 'Pentagon'),
('msoShapePie', 'pie', 'Pie'),
('msoShapePieWedge', 'pieWedge', 'Pie'),
('msoShapePlaque', 'plaque', 'Plaque'),
('msoShapePlaqueTabs', 'plaqueTabs', 'Plaque Tabs'),
('msoShapeQuadArrow', 'quadArrow', 'Quad Arrow'),
('msoShapeQuadArrowCallout', 'quadArrowCallout',
'Quad Arrow Callout'),
('msoShapeRectangle', 'rect', 'Rectangle'),
('msoShapeRectangularCallout', 'wedgeRectCallout',
'Rectangular Callout'),
('msoShapeRegularPentagon', 'pentagon', 'Regular Pentagon'),
('msoShapeRightArrow', 'rightArrow', 'Right Arrow'),
('msoShapeRightArrowCallout', 'rightArrowCallout',
'Right Arrow Callout'),
('msoShapeRightBrace', 'rightBrace', 'Right Brace'),
('msoShapeRightBracket', 'rightBracket', 'Right Bracket'),
('msoShapeRightTriangle', 'rtTriangle', 'Right Triangle'),
('msoShapeRound1Rectangle', 'round1Rect',
'Round Single Corner Rectangle'),
('msoShapeRound2DiagRectangle', 'round2DiagRect',
'Round Diagonal Corner Rectangle'),
('msoShapeRound2SameRectangle', 'round2SameRect',
'Round Same Side Corner Rectangle'),
('msoShapeRoundedRectangle', 'roundRect', 'Rounded Rectangle'),
('msoShapeRoundedRectangularCallout', 'wedgeRoundRectCallout',
'Rounded Rectangular Callout'),
('msoShapeSmileyFace', 'smileyFace', 'Smiley Face'),
('msoShapeSnip1Rectangle', 'snip1Rect',
'Snip Single Corner Rectangle'),
('msoShapeSnip2DiagRectangle', 'snip2DiagRect',
'Snip Diagonal Corner Rectangle'),
('msoShapeSnip2SameRectangle', 'snip2SameRect',
'Snip Same Side Corner Rectangle'),
('msoShapeSnipRoundRectangle', 'snipRoundRect',
'Snip and Round Single Corner Rectangle'),
('msoShapeSquareTabs', 'squareTabs', 'Square Tabs'),
('msoShapeStripedRightArrow', 'stripedRightArrow',
'Striped Right Arrow'),
('msoShapeSun', 'sun', 'Sun'),
('msoShapeSwooshArrow', 'swooshArrow', 'Swoosh Arrow'),
('msoShapeTear', 'teardrop', 'Teardrop'),
('msoShapeTrapezoid', 'trapezoid', 'Trapezoid'),
('msoShapeUTurnArrow', 'uturnArrow', 'U-Turn Arrow'),
('msoShapeUpArrow', 'upArrow', 'Up Arrow'),
('msoShapeUpArrowCallout', 'upArrowCallout', 'Up Arrow Callout'),
('msoShapeUpDownArrow', 'upDownArrow', 'Up-Down Arrow'),
('msoShapeUpDownArrowCallout', 'upDownArrowCallout',
'Up-Down Arrow Callout'),
('msoShapeUpRibbon', 'ribbon2', 'Up Ribbon'),
('msoShapeVerticalScroll', 'verticalScroll', 'Vertical Scroll'),
('msoShapeWave', 'wave', 'Wave')
)
def const_name_map():
"""
Sequence of tuples representing the mapping of msoAutoShapeType
enumeration names to the constant names used in python-pptx to identify
an auto shape type. The mapping is largely coercing the camel case to
upper snake case, but some names produced by that transformation require
transformation to be suitable.
Access with::
for ms_name, const_name in const_name_map():
...
"""
return (
('msoShape10PointStar', 'STAR_10_POINT'),
('msoShape12PointStar', 'STAR_12_POINT'),
('msoShape16pointStar', 'STAR_16_POINT'),
('msoShape24pointStar', 'STAR_24_POINT'),
('msoShape32pointStar', 'STAR_32_POINT'),
('msoShape4pointStar', 'STAR_4_POINT'),
('msoShape5pointStar', 'STAR_5_POINT'),
('msoShape6PointStar', 'STAR_6_POINT'),
('msoShape7PointStar', 'STAR_7_POINT'),
('msoShape8pointStar', 'STAR_8_POINT'),
('msoShapeActionButtonBackorPrevious',
'ACTION_BUTTON_BACK_OR_PREVIOUS'),
('msoShapeActionButtonBeginning', 'ACTION_BUTTON_BEGINNING'),
('msoShapeActionButtonCustom', 'ACTION_BUTTON_CUSTOM'),
('msoShapeActionButtonDocument', 'ACTION_BUTTON_DOCUMENT'),
('msoShapeActionButtonEnd', 'ACTION_BUTTON_END'),
('msoShapeActionButtonForwardorNext',
'ACTION_BUTTON_FORWARD_OR_NEXT'),
('msoShapeActionButtonHelp', 'ACTION_BUTTON_HELP'),
('msoShapeActionButtonHome', 'ACTION_BUTTON_HOME'),
('msoShapeActionButtonInformation', 'ACTION_BUTTON_INFORMATION'),
('msoShapeActionButtonMovie', 'ACTION_BUTTON_MOVIE'),
('msoShapeActionButtonReturn', 'ACTION_BUTTON_RETURN'),
('msoShapeActionButtonSound', 'ACTION_BUTTON_SOUND'),
('msoShapeArc', 'ARC'),
('msoShapeBalloon', 'BALLOON'),
('msoShapeBentArrow', 'BENT_ARROW'),
('msoShapeBentUpArrow', 'BENT_UP_ARROW'),
('msoShapeBevel', 'BEVEL'),
('msoShapeBlockArc', 'BLOCK_ARC'),
('msoShapeCan', 'CAN'),
('msoShapeChartPlus', 'CHART_PLUS'),
('msoShapeChartStar', 'CHART_STAR'),
('msoShapeChartX', 'CHART_X'),
('msoShapeChevron', 'CHEVRON'),
('msoShapeChord', 'CHORD'),
('msoShapeCircularArrow', 'CIRCULAR_ARROW'),
('msoShapeCloud', 'CLOUD'),
('msoShapeCloudCallout', 'CLOUD_CALLOUT'),
('msoShapeCorner', 'CORNER'),
('msoShapeCornerTabs', 'CORNER_TABS'),
('msoShapeCross', 'CROSS'),
('msoShapeCube', 'CUBE'),
('msoShapeCurvedDownArrow', 'CURVED_DOWN_ARROW'),
('msoShapeCurvedDownRibbon', 'CURVED_DOWN_RIBBON'),
('msoShapeCurvedLeftArrow', 'CURVED_LEFT_ARROW'),
('msoShapeCurvedRightArrow', 'CURVED_RIGHT_ARROW'),
('msoShapeCurvedUpArrow', 'CURVED_UP_ARROW'),
('msoShapeCurvedUpRibbon', 'CURVED_UP_RIBBON'),
('msoShapeDecagon', 'DECAGON'),
('msoShapeDiagonalStripe', 'DIAGONAL_STRIPE'),
('msoShapeDiamond', 'DIAMOND'),
('msoShapeDodecagon', 'DODECAGON'),
('msoShapeDonut', 'DONUT'),
('msoShapeDoubleBrace', 'DOUBLE_BRACE'),
('msoShapeDoubleBracket', 'DOUBLE_BRACKET'),
('msoShapeDoubleWave', 'DOUBLE_WAVE'),
('msoShapeDownArrow', 'DOWN_ARROW'),
('msoShapeDownArrowCallout', 'DOWN_ARROW_CALLOUT'),
('msoShapeDownRibbon', 'DOWN_RIBBON'),
('msoShapeExplosion1', 'EXPLOSION1'),
('msoShapeExplosion2', 'EXPLOSION2'),
('msoShapeFlowchartAlternateProcess', 'FLOWCHART_ALTERNATE_PROCESS'),
('msoShapeFlowchartCard', 'FLOWCHART_CARD'),
('msoShapeFlowchartCollate', 'FLOWCHART_COLLATE'),
('msoShapeFlowchartConnector', 'FLOWCHART_CONNECTOR'),
('msoShapeFlowchartData', 'FLOWCHART_DATA'),
('msoShapeFlowchartDecision', 'FLOWCHART_DECISION'),
('msoShapeFlowchartDelay', 'FLOWCHART_DELAY'),
('msoShapeFlowchartDirectAccessStorage',
'FLOWCHART_DIRECT_ACCESS_STORAGE'),
('msoShapeFlowchartDisplay', 'FLOWCHART_DISPLAY'),
('msoShapeFlowchartDocument', 'FLOWCHART_DOCUMENT'),
('msoShapeFlowchartExtract', 'FLOWCHART_EXTRACT'),
('msoShapeFlowchartInternalStorage', 'FLOWCHART_INTERNAL_STORAGE'),
('msoShapeFlowchartMagneticDisk', 'FLOWCHART_MAGNETIC_DISK'),
('msoShapeFlowchartManualInput', 'FLOWCHART_MANUAL_INPUT'),
('msoShapeFlowchartManualOperation', 'FLOWCHART_MANUAL_OPERATION'),
('msoShapeFlowchartMerge', 'FLOWCHART_MERGE'),
('msoShapeFlowchartMultidocument', 'FLOWCHART_MULTIDOCUMENT'),
('msoShapeFlowchartOfflineStorage', 'FLOWCHART_OFFLINE_STORAGE'),
('msoShapeFlowchartOffpageConnector', 'FLOWCHART_OFFPAGE_CONNECTOR'),
('msoShapeFlowchartOr', 'FLOWCHART_OR'),
('msoShapeFlowchartPredefinedProcess',
'FLOWCHART_PREDEFINED_PROCESS'),
('msoShapeFlowchartPreparation', 'FLOWCHART_PREPARATION'),
('msoShapeFlowchartProcess', 'FLOWCHART_PROCESS'),
('msoShapeFlowchartPunchedTape', 'FLOWCHART_PUNCHED_TAPE'),
('msoShapeFlowchartSequentialAccessStorage',
'FLOWCHART_SEQUENTIAL_ACCESS_STORAGE'),
('msoShapeFlowchartSort', 'FLOWCHART_SORT'),
('msoShapeFlowchartStoredData', 'FLOWCHART_STORED_DATA'),
('msoShapeFlowchartSummingJunction', 'FLOWCHART_SUMMING_JUNCTION'),
('msoShapeFlowchartTerminator', 'FLOWCHART_TERMINATOR'),
('msoShapeFoldedCorner', 'FOLDED_CORNER'),
('msoShapeFrame', 'FRAME'),
('msoShapeFunnel', 'FUNNEL'),
('msoShapeGear6', 'GEAR_6'),
('msoShapeGear9', 'GEAR_9'),
('msoShapeHalfFrame', 'HALF_FRAME'),
('msoShapeHeart', 'HEART'),
('msoShapeHeptagon', 'HEPTAGON'),
('msoShapeHexagon', 'HEXAGON'),
('msoShapeHorizontalScroll', 'HORIZONTAL_SCROLL'),
('msoShapeIsoscelesTriangle', 'ISOSCELES_TRIANGLE'),
('msoShapeLeftArrow', 'LEFT_ARROW'),
('msoShapeLeftArrowCallout', 'LEFT_ARROW_CALLOUT'),
('msoShapeLeftBrace', 'LEFT_BRACE'),
('msoShapeLeftBracket', 'LEFT_BRACKET'),
('msoShapeLeftCircularArrow', 'LEFT_CIRCULAR_ARROW'),
('msoShapeLeftRightArrow', 'LEFT_RIGHT_ARROW'),
('msoShapeLeftRightArrowCallout', 'LEFT_RIGHT_ARROW_CALLOUT'),
('msoShapeLeftRightCircularArrow', 'LEFT_RIGHT_CIRCULAR_ARROW'),
('msoShapeLeftRightRibbon', 'LEFT_RIGHT_RIBBON'),
('msoShapeLeftRightUpArrow', 'LEFT_RIGHT_UP_ARROW'),
('msoShapeLeftUpArrow', 'LEFT_UP_ARROW'),
('msoShapeLightningBolt', 'LIGHTNING_BOLT'),
('msoShapeLineCallout1', 'LINE_CALLOUT_1'),
('msoShapeLineCallout1AccentBar', 'LINE_CALLOUT_1_ACCENT_BAR'),
('msoShapeLineCallout1BorderandAccentBar',
'LINE_CALLOUT_1_BORDER_AND_ACCENT_BAR'),
('msoShapeLineCallout1NoBorder', 'LINE_CALLOUT_1_NO_BORDER'),
('msoShapeLineCallout2', 'LINE_CALLOUT_2'),
('msoShapeLineCallout2AccentBar', 'LINE_CALLOUT_2_ACCENT_BAR'),
('msoShapeLineCallout2BorderandAccentBar',
'LINE_CALLOUT_2_BORDER_AND_ACCENT_BAR'),
('msoShapeLineCallout2NoBorder', 'LINE_CALLOUT_2_NO_BORDER'),
('msoShapeLineCallout3', 'LINE_CALLOUT_3'),
('msoShapeLineCallout3AccentBar', 'LINE_CALLOUT_3_ACCENT_BAR'),
('msoShapeLineCallout3BorderandAccentBar',
'LINE_CALLOUT_3_BORDER_AND_ACCENT_BAR'),
('msoShapeLineCallout3NoBorder', 'LINE_CALLOUT_3_NO_BORDER'),
('msoShapeLineCallout4', 'LINE_CALLOUT_4'),
('msoShapeLineCallout4AccentBar', 'LINE_CALLOUT_4_ACCENT_BAR'),
('msoShapeLineCallout4BorderandAccentBar',
'LINE_CALLOUT_4_BORDER_AND_ACCENT_BAR'),
('msoShapeLineCallout4NoBorder', 'LINE_CALLOUT_4_NO_BORDER'),
('msoShapeLineInverse', 'LINE_INVERSE'),
('msoShapeMathDivide', 'MATH_DIVIDE'),
('msoShapeMathEqual', 'MATH_EQUAL'),
('msoShapeMathMinus', 'MATH_MINUS'),
('msoShapeMathMultiply', 'MATH_MULTIPLY'),
('msoShapeMathNotEqual', 'MATH_NOT_EQUAL'),
('msoShapeMathPlus', 'MATH_PLUS'),
('msoShapeMoon', 'MOON'),
('msoShapeNoSymbol', 'NO_SYMBOL'),
('msoShapeNonIsoscelesTrapezoid', 'NON_ISOSCELES_TRAPEZOID'),
('msoShapeNotchedRightArrow', 'NOTCHED_RIGHT_ARROW'),
('msoShapeOctagon', 'OCTAGON'),
('msoShapeOval', 'OVAL'),
('msoShapeOvalCallout', 'OVAL_CALLOUT'),
('msoShapeParallelogram', 'PARALLELOGRAM'),
('msoShapePentagon', 'PENTAGON'),
('msoShapePie', 'PIE'),
('msoShapePieWedge', 'PIE_WEDGE'),
('msoShapePlaque', 'PLAQUE'),
('msoShapePlaqueTabs', 'PLAQUE_TABS'),
('msoShapeQuadArrow', 'QUAD_ARROW'),
('msoShapeQuadArrowCallout', 'QUAD_ARROW_CALLOUT'),
('msoShapeRectangle', 'RECTANGLE'),
('msoShapeRectangularCallout', 'RECTANGULAR_CALLOUT'),
('msoShapeRegularPentagon', 'REGULAR_PENTAGON'),
('msoShapeRightArrow', 'RIGHT_ARROW'),
('msoShapeRightArrowCallout', 'RIGHT_ARROW_CALLOUT'),
('msoShapeRightBrace', 'RIGHT_BRACE'),
('msoShapeRightBracket', 'RIGHT_BRACKET'),
('msoShapeRightTriangle', 'RIGHT_TRIANGLE'),
('msoShapeRound1Rectangle', 'ROUND_1_RECTANGLE'),
('msoShapeRound2DiagRectangle', 'ROUND_2_DIAG_RECTANGLE'),
('msoShapeRound2SameRectangle', 'ROUND_2_SAME_RECTANGLE'),
('msoShapeRoundedRectangle', 'ROUNDED_RECTANGLE'),
('msoShapeRoundedRectangularCallout', 'ROUNDED_RECTANGULAR_CALLOUT'),
('msoShapeSmileyFace', 'SMILEY_FACE'),
('msoShapeSnip1Rectangle', 'SNIP_1_RECTANGLE'),
('msoShapeSnip2DiagRectangle', 'SNIP_2_DIAG_RECTANGLE'),
('msoShapeSnip2SameRectangle', 'SNIP_2_SAME_RECTANGLE'),
('msoShapeSnipRoundRectangle', 'SNIP_ROUND_RECTANGLE'),
('msoShapeSquareTabs', 'SQUARE_TABS'),
('msoShapeStripedRightArrow', 'STRIPED_RIGHT_ARROW'),
('msoShapeSun', 'SUN'),
('msoShapeSwooshArrow', 'SWOOSH_ARROW'),
('msoShapeTear', 'TEAR'),
('msoShapeTrapezoid', 'TRAPEZOID'),
('msoShapeUTurnArrow', 'U_TURN_ARROW'),
('msoShapeUpArrow', 'UP_ARROW'),
('msoShapeUpArrowCallout', 'UP_ARROW_CALLOUT'),
('msoShapeUpDownArrow', 'UP_DOWN_ARROW'),
('msoShapeUpDownArrowCallout', 'UP_DOWN_ARROW_CALLOUT'),
('msoShapeUpRibbon', 'UP_RIBBON'),
('msoShapeVerticalScroll', 'VERTICAL_SCROLL'),
('msoShapeWave', 'WAVE')
)
|
|
#!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2004/11/26 11:13:06 $
Pearu Peterson
"""
__version__ = "$Revision: 1.16 $"[10:-1]
f2py_version = 'See `f2py -v`'
import copy
from .auxfuncs import (
getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in,
isintent_out, islogicalfunction, ismoduleroutine, isscalar,
issubroutine, issubroutine_wrap, outmess, show
)
def var2fixfortran(vars, a, fa=None, f90mode=None):
if fa is None:
fa = a
if a not in vars:
show(vars)
outmess('var2fixfortran: No definition for argument "%s".\n' % a)
return ''
if 'typespec' not in vars[a]:
show(vars[a])
outmess('var2fixfortran: No typespec for argument "%s".\n' % a)
return ''
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
lk = ''
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
lk = 'kind'
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
lk = 'len'
if '*' in selector:
if f90mode:
if selector['*'] in ['*', ':', '(*)']:
vardef = '%s(len=*)' % (vardef)
else:
vardef = '%s(%s=%s)' % (vardef, lk, selector['*'])
else:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
vardef = '%s %s' % (vardef, fa)
if 'dimension' in vars[a]:
vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension']))
return vardef
def createfuncwrapper(rout, signature=0):
assert isfunction(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
newname = '%sf2pywrap' % (name)
if newname not in vars:
vars[newname] = vars[name]
args = [newname] + rout['args'][1:]
else:
args = [newname] + rout['args']
l = var2fixfortran(vars, name, newname, f90mode)
if l[:13] == 'character*(*)':
if f90mode:
l = 'character(len=10)' + l[13:]
else:
l = 'character*10' + l[13:]
charselect = vars[name]['charselector']
if charselect.get('*', '') == '(*)':
charselect['*'] = '10'
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
l = l + ', ' + fortranname
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
args = args[1:]
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isintent_in(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
add(l)
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
add(rout['saved_interface'].lstrip())
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
if islogicalfunction(rout):
add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs))
else:
add('%s = %s(%s)' % (newname, fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def createsubrwrapper(rout, signature=0):
assert issubroutine(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
args = rout['args']
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' in line:
continue
add(line)
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
add('call %s(%s)' % (fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def assubr(rout):
if isfunction_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
fname = name
rname = fname
if 'result' in rout:
rname = rout['result']
rout['vars'][fname] = rout['vars'][rname]
fvar = rout['vars'][fname]
if not isintent_out(fvar):
if 'intent' not in fvar:
fvar['intent'] = []
fvar['intent'].append('out')
flag = 1
for i in fvar['intent']:
if i.startswith('out='):
flag = 0
break
if flag:
fvar['intent'].append('out=%s' % (rname))
rout['args'][:] = [fname] + rout['args']
return rout, createfuncwrapper(rout)
if issubroutine_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
return rout, createsubrwrapper(rout)
return rout, ''
|
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Model classes and utility functions for handling
Quotes, Votes and Voters in the Overheard application.
"""
import datetime
import hashlib
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
PAGE_SIZE = 20
DAY_SCALE = 4
class Quote(db.Model):
"""Storage for a single quote and its metadata
Properties
quote: The quote as a string
uri: An optional URI that is the source of the quotation
rank: A calculated ranking based on the number of votes and when the quote was added.
created: When the quote was created, recorded in the number of days since the beginning of our local epoch.
creation_order: Totally unique index on all quotes in order of their creation.
creator: The user that added this quote.
"""
quote = db.StringProperty(required=True, multiline=True)
uri = db.StringProperty()
rank = db.StringProperty()
created = db.IntegerProperty(default=0)
creation_order = db.StringProperty(default=" ")
votesum = db.IntegerProperty(default=0)
creator = db.UserProperty()
class Vote(db.Model):
"""Storage for a single vote by a single user on a single quote.
Index
key_name: The email address of the user that voted.
parent: The quote this is a vote for.
Properties
vote: The value of 1 for like, -1 for dislike.
"""
vote = db.IntegerProperty(default=0)
class Voter(db.Model):
"""Storage for metadata about each user
Properties
count: An integer that gets incremented with users addition of a quote.
Used to build a unique index for quote creation.
hasVoted: Has this user ever voted on a quote.
hasAddedQuote: Has this user ever added a quote.
"""
count = db.IntegerProperty(default=0)
hasVoted = db.BooleanProperty(default=False)
hasAddedQuote = db.BooleanProperty(default=False)
def _get_or_create_voter(user):
"""
Find a matching Voter or create a new one with the
email as the key_name.
Returns a Voter for the given user.
"""
voter = Voter.get_by_key_name(user.email())
if voter is None:
voter = Voter(key_name=user.email())
return voter
def get_progress(user):
"""
Returns (hasVoted, hasAddedQuote) for the given user
"""
voter = _get_or_create_voter(user)
return voter.hasVoted, voter.hasAddedQuote
def _set_progress_hasVoted(user):
"""
Sets Voter.hasVoted = True for the given user.
"""
def txn():
voter = _get_or_create_voter(user)
if not voter.hasVoted:
voter.hasVoted = True
voter.put()
db.run_in_transaction(txn)
def _unique_user(user):
"""
Creates a unique string by using an increasing
counter sharded per user. The resulting string
is hashed to keep the users email address private.
"""
def txn():
voter = _get_or_create_voter(user)
voter.count += 1
voter.hasAddedQuote = True
voter.put()
return voter.count
count = db.run_in_transaction(txn)
return hashlib.md5(user.email() + "|" + str(count)).hexdigest()
def add_quote(text, user, uri=None, _created=None):
"""
Add a new quote to the datastore.
Parameters
text: The text of the quote
user: User who is adding the quote
uri: Optional URI pointing to the origin of the quote.
_created: Allows the caller to override the calculated created
value, used only for testing.
Returns
The id of the quote or None if the add failed.
"""
try:
now = datetime.datetime.now()
unique_user = _unique_user(user)
if _created:
created = _created
else:
created = (now - datetime.datetime(2008, 10, 1)).days
q = Quote(
quote=text,
created=created,
creator=user,
creation_order = now.isoformat()[:19] + "|" + unique_user,
uri=uri
)
q.put()
return q.key().id()
except db.Error:
return None
def del_quote(quote_id, user):
"""
Remove a quote.
User must be the creator of the quote or a site administrator.
"""
q = Quote.get_by_id(quote_id)
if q is not None and (users.is_current_user_admin() or q.creator == user):
q.delete()
def get_quote(quote_id):
"""
Retrieve a single quote.
"""
return Quote.get_by_id(quote_id)
def get_quotes_newest(offset=None):
"""
Returns 10 quotes per page in created order.
Args
offset: The id to use to start the page at. This is the value of 'extra'
returned from a previous call to this function.
Returns
(quotes, extra)
"""
extra = None
if offset is None:
quotes = Quote.gql('ORDER BY creation_order DESC').fetch(PAGE_SIZE + 1)
else:
quotes = Quote.gql("""WHERE creation_order <= :1
ORDER BY creation_order DESC""", offset).fetch(PAGE_SIZE + 1)
if len(quotes) > PAGE_SIZE:
extra = quotes[-1].creation_order
quotes = quotes[:PAGE_SIZE]
return quotes, extra
def set_vote(quote_id, user, newvote):
"""
Record 'user' casting a 'vote' for a quote with an id of 'quote_id'.
The 'newvote' is usually an integer in [-1, 0, 1].
"""
if user is None:
return
email = user.email()
def txn():
quote = Quote.get_by_id(quote_id)
vote = Vote.get_by_key_name(key_names = user.email(), parent = quote)
if vote is None:
vote = Vote(key_name = user.email(), parent = quote)
if vote.vote == newvote:
return
quote.votesum = quote.votesum - vote.vote + newvote
vote.vote = newvote
# See the docstring of main.py for an explanation of
# the following formula.
quote.rank = "%020d|%s" % (
long(quote.created * DAY_SCALE + quote.votesum),
quote.creation_order
)
db.put([vote, quote])
memcache.set("vote|" + user.email() + "|" + str(quote_id), vote.vote)
db.run_in_transaction(txn)
_set_progress_hasVoted(user)
def get_quotes(page=0):
"""Returns PAGE_SIZE quotes per page in rank order. Limit to 20 pages."""
assert page >= 0
assert page < 20
extra = None
quotes = Quote.gql('ORDER BY rank DESC').fetch(PAGE_SIZE+1, page*PAGE_SIZE)
if len(quotes) > PAGE_SIZE:
if page < 19:
extra = quotes[-1]
quotes = quotes[:PAGE_SIZE]
return quotes, extra
def voted(quote, user):
"""Returns the value of a users vote on the specified quote, a value in [-1, 0, 1]."""
val = 0
if user:
memcachekey = "vote|" + user.email() + "|" + str(quote.key().id())
val = memcache.get(memcachekey)
if val is not None:
return val
vote = Vote.get_by_key_name(key_names = user.email(), parent = quote)
if vote is not None:
val = vote.vote
memcache.set(memcachekey, val)
return val
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexacognal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q, r, size, orientation, aspect_scale=1):
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relationto a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x, y, size, orientation, aspect_scale=1):
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1):
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophiscticated use cases, e.g. weighted binning or scaling
individual tiles proprtional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (insted of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q, r):
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
from __future__ import print_function, division
try:
import cPickle as pickle
except:
import pickle
from pdb import set_trace
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from collections import Counter
from sklearn import svm
from sklearn import linear_model
import matplotlib.pyplot as plt
import time
import os
import pandas as pd
class MAR(object):
def __init__(self):
self.fea_num = 4000
self.step = 10
self.enough = 30
self.kept=50
self.atleast=100
def create(self,filename):
self.filename=filename
self.name=self.filename.split(".")[0]
self.flag=True
self.hasLabel=True
self.record={"x":[],"pos":[]}
self.body={}
self.est = []
self.est_num = 0
self.last_pos=0
self.last_neg=0
try:
## if model already exists, load it ##
self = self.load()
except:
## otherwise read from file ##
try:
self.loadfile()
self.preprocess()
self.save()
except:
## cannot find file in workspace ##
self.flag=False
self.enable_est=False
return self
### Depreciated
### Use previous knowledge, labeled only
def create_old(self, filename):
with open("../workspace/coded/" + str(filename), "r") as csvfile:
content = [x for x in csv.reader(csvfile, delimiter=',')]
fields = ["Document Title", "Abstract", "Year", "PDF Link", "code", "time"]
header = content[0]
ind0 = header.index("code")
self.last_pos = len([c[ind0] for c in content[1:] if c[ind0] == "yes"])
self.last_neg = len([c[ind0] for c in content[1:] if c[ind0] == "no"])
for field in fields:
ind = header.index(field)
if field == "time":
self.body[field].extend([float(c[ind]) for c in content[1:] if c[ind0] != "undetermined"])
else:
self.body[field].extend([c[ind] for c in content[1:] if c[ind0] != "undetermined"])
try:
ind = header.index("label")
self.body["label"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["label"].extend(["unknown"] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
try:
ind = header.index("fixed")
self.body["fixed"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["fixed"].extend([0] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
self.preprocess()
self.save()
def loadfile(self):
self.body = pd.read_csv("../workspace/data/" + str(self.filename),encoding = "ISO-8859-1")
fields = ["Document Title", "Abstract", "Year", "PDF Link"]
columns = self.body.columns
n = len(self.body)
for field in fields:
if field not in columns:
self.body[field] = [""]*n
if "label" not in columns:
self.body["label"] = ["unknown"]*n
if "code" not in columns:
self.body["code"] = ["undetermined"]*n
if "time" not in columns:
self.body["time"] = [0]*n
if "fixed" not in columns:
self.body["fixed"] = [0]*n
self.body = self.body.fillna("")
return
def export_feature(self):
with open("../workspace/coded/feature_" + str(self.name) + ".csv", "wb") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for i in range(self.csr_mat.shape[0]):
for j in range(self.csr_mat.indptr[i],self.csr_mat.indptr[i+1]):
csvwriter.writerow([i+1,self.csr_mat.indices[j]+1,self.csr_mat.data[j]])
return
def get_numbers(self):
total = len(self.body["code"]) - self.last_pos - self.last_neg
pos = Counter(self.body["code"])["yes"] - self.last_pos
neg = Counter(self.body["code"])["no"] - self.last_neg
try:
tmp=self.record['x'][-1]
except:
tmp=-1
if int(pos+neg)>tmp:
self.record['x'].append(int(pos+neg))
self.record['pos'].append(int(pos))
self.pool = np.where(np.array(self.body['code']) == "undetermined")[0]
self.labeled = list(set(range(len(self.body['code']))) - set(self.pool))
return pos, neg, total
def export(self):
fields = ["Document Title", "Abstract", "Year", "PDF Link", "label", "code","time"]
body = self.body[fields]
body.sort_values(by=['time'], ascending=False)
yes = body.loc[body['code'] == 'yes']
no = body.loc[body['code'] == 'no']
und = body.loc[body['code'] == 'undetermined']
out = pd.concat([yes, no, und], ignore_index=True)
out.to_csv("../workspace/coded/" + str(self.name) + ".csv",columns=fields,index=False)
return
def preprocess(self):
### Combine title and abstract for training ##################
content = [str(self.body["Document Title"][index]) + " " + str(self.body["Abstract"][index]) for index in range(len(self.body))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=True, smooth_idf=False,
sublinear_tf=False,decode_error="ignore")
tfidf = tfidfer.fit_transform(content)
weight = tfidf.sum(axis=0).tolist()[0]
kept = np.argsort(weight)[-self.fea_num:]
self.voc = np.array(list(tfidfer.vocabulary_.keys()))[np.argsort(list(tfidfer.vocabulary_.values()))][kept]
##############################################################
### Term frequency as feature, L2 normalization ##########
tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=u'l2', use_idf=False,
vocabulary=self.voc,decode_error="ignore")
# tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False,
# vocabulary=self.voc,decode_error="ignore")
self.csr_mat=tfer.fit_transform(content)
########################################################
return
## save model ##
def save(self):
with open("memory/"+str(self.name)+".pickle","wb") as handle:
pickle.dump(self,handle)
## load model ##
def load(self):
with open("memory/" + str(self.name) + ".pickle", "rb") as handle:
tmp = pickle.load(handle)
return tmp
def estimate_curve(self, clf, reuse=False, num_neg=0):
def prob_sample(probs):
order = np.argsort(probs)[::-1]
count = 0
can = []
sample = []
for i, x in enumerate(probs[order]):
count = count + x
can.append(order[i])
if count >= 1:
# sample.append(np.random.choice(can,1)[0])
sample.append(can[0])
count -= 1
can = []
return sample
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
###############################################
# prob = clf.predict_proba(self.csr_mat)[:,:1]
prob1 = clf.decision_function(self.csr_mat)
prob = np.array([[x] for x in prob1])
# prob = self.csr_mat
y = np.array([1 if x == 'yes' else 0 for x in self.body['code']])
y0 = np.copy(y)
if len(poses) and reuse:
all = list(set(poses) | set(negs) | set(self.pool))
else:
all = range(len(y))
pos_num_last = Counter(y0)[1]
lifes = 3
life = lifes
while (True):
C = Counter(y[all])[1]/ num_neg
es = linear_model.LogisticRegression(penalty='l2', fit_intercept=True, C=C)
es.fit(prob[all], y[all])
pos_at = list(es.classes_).index(1)
pre = es.predict_proba(prob[self.pool])[:, pos_at]
y = np.copy(y0)
sample = prob_sample(pre)
for x in self.pool[sample]:
y[x] = 1
pos_num = Counter(y)[1]
if pos_num == pos_num_last:
life = life - 1
if life == 0:
break
else:
life = lifes
pos_num_last = pos_num
esty = pos_num - self.last_pos
pre = es.predict_proba(prob)[:, pos_at]
return esty, pre
## Train model ##
def train(self,pne=True,weighting=True):
clf = svm.SVC(kernel='linear', probability=True, class_weight='balanced') if weighting else svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = poses
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled,size=np.max((len(decayed),2*len(left),self.atleast)),replace=False)
except:
pass
if not pne:
unlabeled=[]
labels=np.array([x if x!='undetermined' else 'no' for x in self.body['code']])
all_neg=list(negs)+list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses)>=self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
if self.last_pos>0 and len(poses)-self.last_pos>0:
self.est_num, self.est = self.estimate_curve(clf, reuse=True, num_neg=len(sample)-len(left))
else:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## reuse
def train_reuse(self,pne=True):
pne=True
clf = svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
if len(left)==0:
return [], [], self.random(), []
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled, size=np.max((len(decayed), self.atleast)), replace=False)
except:
pass
if not pne:
unlabeled = []
labels = np.array([x if x != 'undetermined' else 'no' for x in self.body['code']])
all_neg = list(negs) + list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses) >= self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## Get suspecious codes
def susp(self,clf):
thres_pos = 1
thres_neg = 0.5
length_pos = 10
length_neg = 10
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
# poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
# negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
poses = np.array(poses)[np.where(np.array(self.body['fixed'])[poses] == 0)[0]]
negs = np.array(negs)[np.where(np.array(self.body['fixed'])[negs] == 0)[0]]
if len(poses)>0:
pos_at = list(clf.classes_).index("yes")
prob_pos = clf.predict_proba(self.csr_mat[poses])[:,pos_at]
# se_pos = np.argsort(prob_pos)[:length_pos]
se_pos = np.argsort(prob_pos)
# se_pos = [s for s in se_pos if prob_pos[s]<thres_pos]
sel_pos = poses[se_pos]
probs_pos = prob_pos[se_pos]
else:
sel_pos = np.array([])
probs_pos = np.array([])
if len(negs)>0:
if clf:
neg_at = list(clf.classes_).index("no")
prob_neg = clf.predict_proba(self.csr_mat[negs])[:,neg_at]
# se_neg = np.argsort(prob_neg)[:length_neg]
se_neg = np.argsort(prob_neg)
# se_neg = [s for s in se_neg if prob_neg[s]<thres_neg]
sel_neg = negs[se_neg]
probs_neg = prob_neg[se_neg]
else:
sel_neg = negs
probs_neg = np.array([])
else:
sel_neg = np.array([])
probs_neg = np.array([])
return sel_pos, probs_pos, sel_neg, probs_neg
## BM25 ##
def BM25(self,query):
b=0.75
k1=1.5
### Combine title and abstract for training ###########
content = [str(self.body["Document Title"][index]) + " " + str(self.body["Abstract"][index]) for index in
range(len(self.body["Document Title"]))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False, smooth_idf=False,
sublinear_tf=False, decode_error="ignore")
tf = tfidfer.fit_transform(content)
d_avg = np.mean(np.sum(tf, axis=1))
score = {}
for word in query:
score[word]=[]
id= tfidfer.vocabulary_[word]
df = sum([1 for wc in tf[:,id] if wc>0])
idf = np.log((len(content)-df+0.5)/(df+0.5))
for i in range(len(content)):
score[word].append(idf*tf[i,id]/(tf[i,id]+k1*((1-b)+b*np.sum(tf[0],axis=1)[0,0]/d_avg)))
self.bm = np.sum(list(score.values()),axis=0)
def BM25_get(self):
ids = self.pool[np.argsort(self.bm[self.pool])[::-1][:self.step]]
scores = self.bm[ids]
return ids, scores
## Get certain ##
def certain(self,clf):
pos_at = list(clf.classes_).index("yes")
if len(self.pool)==0:
return [],[]
prob = clf.predict_proba(self.csr_mat[self.pool])[:,pos_at]
order = np.argsort(prob)[::-1][:self.step]
return np.array(self.pool)[order],np.array(prob)[order]
## Get uncertain ##
def uncertain(self,clf):
pos_at = list(clf.classes_).index("yes")
if len(self.pool)==0:
return [],[]
prob = clf.predict_proba(self.csr_mat[self.pool])[:, pos_at]
train_dist = clf.decision_function(self.csr_mat[self.pool])
order = np.argsort(np.abs(train_dist))[:self.step] ## uncertainty sampling by distance to decision plane
# order = np.argsort(np.abs(prob-0.5))[:self.step] ## uncertainty sampling by prediction probability
return np.array(self.pool)[order], np.array(prob)[order]
## Get random ##
def random(self):
return np.random.choice(self.pool,size=np.min((self.step,len(self.pool))),replace=False)
## Format ##
def format(self,id,prob=[]):
result=[]
for ind,i in enumerate(id):
tmp = {key: str(self.body[key][i]) for key in self.body}
tmp["id"]=str(i)
if prob!=[]:
tmp["prob"]=prob[ind]
result.append(tmp)
return result
## Code candidate studies ##
def code(self,id,label):
if self.body['code'][id] == label:
self.body['fixed'][id] = 1
self.body["code"][id] = label
self.body["time"][id] = time.time()
## Plot ##
def plot(self):
font = {'family': 'normal',
'weight': 'bold',
'size': 20}
plt.rc('font', **font)
paras = {'lines.linewidth': 5, 'legend.fontsize': 20, 'axes.labelsize': 30, 'legend.frameon': False,
'figure.autolayout': True, 'figure.figsize': (16, 8)}
plt.rcParams.update(paras)
if len(self.labeled)<=0:
return
fig = plt.figure()
order = np.argsort(np.array(self.body['time'])[self.labeled])
seq = np.array(self.body['code'])[np.array(self.labeled)[order]]
counter = 0
rec = [0]
for s in seq:
if s=='yes':
counter+=1
rec.append(counter)
plt.plot(range(len(rec)), rec)
plt.ylabel("Relevant Found")
plt.xlabel("Documents Reviewed")
name=self.name+ "_" + str(int(time.time()))+".png"
dir = "./static/image"
for file in os.listdir(dir):
os.remove(os.path.join(dir, file))
plt.savefig("./static/image/" + name)
plt.close(fig)
return name
def get_allpos(self):
return len([1 for c in self.body["label"] if c=="yes"])-self.last_pos
## Restart ##
def restart(self):
os.remove("./memory/"+self.name+".pickle")
## Get missed relevant docs ##
def get_rest(self):
rest=[x for x in range(len(self.body['label'])) if self.body['label'][x]=='yes' and self.body['code'][x]!='yes']
rests={}
# fields = ["Document Title", "Abstract", "Year", "PDF Link"]
fields = ["Document Title"]
for r in rest:
rests[r]={}
for f in fields:
rests[r][f]=self.body[f][r]
return rests
def latest_labeled(self):
order = np.argsort(np.array(self.body['time'])[self.labeled])[::-1]
return np.array(self.labeled)[order]
|
|
'''This is the Android implementation of NFC Scanning using the
built in NFC adapter of some android phones.
'''
from kivy.app import App
from kivy.clock import Clock
#Detect which platform we are on
from kivy.utils import platform
if platform != 'android':
raise ImportError
import threading
from . import NFCBase
from jnius import autoclass, cast
from android.runnable import run_on_ui_thread
from android import activity
BUILDVERSION = autoclass('android.os.Build$VERSION').SDK_INT
NfcAdapter = autoclass('android.nfc.NfcAdapter')
PythonActivity = autoclass('org.kivy.android.PythonActivity')
JString = autoclass('java.lang.String')
Charset = autoclass('java.nio.charset.Charset')
locale = autoclass('java.util.Locale')
Intent = autoclass('android.content.Intent')
IntentFilter = autoclass('android.content.IntentFilter')
PendingIntent = autoclass('android.app.PendingIntent')
Ndef = autoclass('android.nfc.tech.Ndef')
NdefRecord = autoclass('android.nfc.NdefRecord')
NdefMessage = autoclass('android.nfc.NdefMessage')
app = None
class ScannerAndroid(NFCBase):
''' This is the class responsible for handling the interface with the
Android NFC adapter. See Module Documentation for details.
'''
name = 'NFCAndroid'
def nfc_init(self):
''' This is where we initialize NFC adapter.
'''
# Initialize NFC
global app
app = App.get_running_app()
# Make sure we are listening to new intent
activity.bind(on_new_intent=self.on_new_intent)
# Configure nfc
self.j_context = context = PythonActivity.mActivity
self.nfc_adapter = NfcAdapter.getDefaultAdapter(context)
# Check if adapter exists
if not self.nfc_adapter:
return False
# specify that we want our activity to remain on top when a new intent
# is fired
self.nfc_pending_intent = PendingIntent.getActivity(context, 0,
Intent(context, context.getClass()).addFlags(
Intent.FLAG_ACTIVITY_SINGLE_TOP), 0)
# Filter for different types of action, by default we enable all.
# These are only for handling different NFC technologies when app is in foreground
self.ndef_detected = IntentFilter(NfcAdapter.ACTION_NDEF_DISCOVERED)
#self.tech_detected = IntentFilter(NfcAdapter.ACTION_TECH_DISCOVERED)
#self.tag_detected = IntentFilter(NfcAdapter.ACTION_TAG_DISCOVERED)
# setup tag discovery for ourt tag type
try:
self.ndef_detected.addCategory(Intent.CATEGORY_DEFAULT)
# setup the foreground dispatch to detect all mime types
self.ndef_detected.addDataType('*/*')
self.ndef_exchange_filters = [self.ndef_detected]
except Exception as err:
raise Exception(repr(err))
return True
def get_ndef_details(self, tag):
''' Get all the details from the tag.
'''
details = {}
try:
#print 'id'
details['uid'] = ':'.join(['{:02x}'.format(bt & 0xff) for bt in tag.getId()])
#print 'technologies'
details['Technologies'] = tech_list = [tech.split('.')[-1] for tech in tag.getTechList()]
#print 'get NDEF tag details'
ndefTag = cast('android.nfc.tech.Ndef', Ndef.get(tag))
#print 'tag size'
details['MaxSize'] = ndefTag.getMaxSize()
#details['usedSize'] = '0'
#print 'is tag writable?'
details['writable'] = ndefTag.isWritable()
#print 'Data format'
# Can be made readonly
# get NDEF message details
ndefMesg = ndefTag.getCachedNdefMessage()
# get size of current records
details['consumed'] = len(ndefMesg.toByteArray())
#print 'tag type'
details['Type'] = ndefTag.getType()
# check if tag is empty
if not ndefMesg:
details['Message'] = None
return details
ndefrecords = ndefMesg.getRecords()
length = len(ndefrecords)
#print 'length', length
# will contain the NDEF record types
recTypes = []
for record in ndefrecords:
recTypes.append({
'type': ''.join(map(unichr, record.getType())),
'payload': ''.join(map(unichr, record.getPayload()))
})
details['recTypes'] = recTypes
except Exception as err:
print(str(err))
return details
def on_new_intent(self, intent):
''' This function is called when the application receives a
new intent, for the ones the application has registered previously,
either in the manifest or in the foreground dispatch setup in the
nfc_init function above.
'''
action_list = (NfcAdapter.ACTION_NDEF_DISCOVERED,)
# get TAG
#tag = cast('android.nfc.Tag', intent.getParcelableExtra(NfcAdapter.EXTRA_TAG))
#details = self.get_ndef_details(tag)
if intent.getAction() not in action_list:
print('unknow action, avoid.')
return
rawmsgs = intent.getParcelableArrayExtra(NfcAdapter.EXTRA_NDEF_MESSAGES)
if not rawmsgs:
return
for message in rawmsgs:
message = cast(NdefMessage, message)
payload = message.getRecords()[0].getPayload()
print('payload: {}'.format(''.join(map(chr, payload))))
def nfc_disable(self):
'''Disable app from handling tags.
'''
self.disable_foreground_dispatch()
def nfc_enable(self):
'''Enable app to handle tags when app in foreground.
'''
self.enable_foreground_dispatch()
def create_AAR(self):
'''Create the record responsible for linking our application to the tag.
'''
return NdefRecord.createApplicationRecord(JString("org.electrum.kivy"))
def create_TNF_EXTERNAL(self, data):
'''Create our actual payload record.
'''
if BUILDVERSION >= 14:
domain = "org.electrum"
stype = "externalType"
extRecord = NdefRecord.createExternal(domain, stype, data)
else:
# Creating the NdefRecord manually:
extRecord = NdefRecord(
NdefRecord.TNF_EXTERNAL_TYPE,
"org.electrum:externalType",
'',
data)
return extRecord
def create_ndef_message(self, *recs):
''' Create the Ndef message that will be written to tag
'''
records = []
for record in recs:
if record:
records.append(record)
return NdefMessage(records)
@run_on_ui_thread
def disable_foreground_dispatch(self):
'''Disable foreground dispatch when app is paused.
'''
self.nfc_adapter.disableForegroundDispatch(self.j_context)
@run_on_ui_thread
def enable_foreground_dispatch(self):
'''Start listening for new tags
'''
self.nfc_adapter.enableForegroundDispatch(self.j_context,
self.nfc_pending_intent, self.ndef_exchange_filters, self.ndef_tech_list)
@run_on_ui_thread
def _nfc_enable_ndef_exchange(self, data):
# Enable p2p exchange
# Create record
ndef_record = NdefRecord(
NdefRecord.TNF_MIME_MEDIA,
'org.electrum.kivy', '', data)
# Create message
ndef_message = NdefMessage([ndef_record])
# Enable ndef push
self.nfc_adapter.enableForegroundNdefPush(self.j_context, ndef_message)
# Enable dispatch
self.nfc_adapter.enableForegroundDispatch(self.j_context,
self.nfc_pending_intent, self.ndef_exchange_filters, [])
@run_on_ui_thread
def _nfc_disable_ndef_exchange(self):
# Disable p2p exchange
self.nfc_adapter.disableForegroundNdefPush(self.j_context)
self.nfc_adapter.disableForegroundDispatch(self.j_context)
def nfc_enable_exchange(self, data):
'''Enable Ndef exchange for p2p
'''
self._nfc_enable_ndef_exchange()
def nfc_disable_exchange(self):
''' Disable Ndef exchange for p2p
'''
self._nfc_disable_ndef_exchange()
|
|
from __future__ import absolute_import
from functools import wraps
import os
import string
from google.appengine.api import (
namespace_manager,
oauth,
)
from google.appengine.ext import (
db,
ndb,
)
from protorpc import (
messages,
remote,
)
from webapp2_extras import sessions
import endpoints
import webob
import tap
# Google Cloud Endpoints
def get_user_from_endpoints_service(endpoints_service):
request_state = endpoints_service.request_state
request_state.app = tap.get_app()
request_state.cookies = webob.request.RequestCookies({
"HTTP_COOKIE": request_state.headers.get("cookie"),
})
session_store = sessions.SessionStore(request_state)
session_store.config["secret_key"] = tap.get_namespaced_secret_key(namespace_manager.get_namespace())
return tap.User.load_from_session(session_store.get_session())
def get_user_id_from_endpoints_service(raises=True):
current_user = endpoints.get_current_user()
if current_user is None:
if raises:
raise endpoints.UnauthorizedException("Invalid token.")
else:
return
user_id = current_user.user_id()
# for dev_appserver
# http://stackoverflow.com/questions/16661109
if user_id is None:
oauth_user = oauth.get_current_user(os.getenv("OAUTH_LAST_SCOPE"))
if oauth_user is None or oauth_user.user_id() is None:
if raises:
raise endpoints.UnauthorizedException()
else:
return
user_id = oauth_user.user_id()
return user_id
def get_user_id(_self=None, raises=True):
user_id = get_user_id_from_endpoints_service(raises)
if user_id:
return tap.base62_encode(int(user_id))
def get_user_id_or_ip(self=None):
user_id = get_user_id_from_endpoints_service(raises=False)
if user_id:
return int(user_id)
elif self:
return self.request_state.remote_address
def rate_limit(rate, size, key=None, tag=None):
def decorator(func):
prefix = tag
if prefix is None:
prefix = ".".join((func.__module__, func.__name__))
token_bucket = tap.TokenBucket(rate, size, prefix=prefix)
@wraps(func)
@ndb.synctasklet
def inner(self, *argv, **kwargv):
token_buket_key = key
if key is not None:
if callable(key):
token_buket_key = key(self)
if not isinstance(token_buket_key, basestring):
token_buket_key = str(token_buket_key)
is_acceptable = yield token_bucket.is_acceptable_async(key=token_buket_key)
if is_acceptable:
raise ndb.Return(func(self, *argv, **kwargv))
else:
raise endpoints.ForbiddenException("Too many requests")
return inner
return decorator
class CRUDServiceClass(remote._ServiceClass):
@staticmethod
def __add_prefix(name, dct):
new_dct = dict()
for key, value in dct.iteritems():
if not key.startswith("_"):
key = "_{0}_{1}".format(name, key)
new_dct[key] = value
return new_dct
def __new__(cls, name, bases, dct):
new_dct = CRUDServiceClass.__add_prefix(name, dct)
return super(CRUDServiceClass, cls).__new__(cls, name, bases, new_dct)
def __init__(cls, name, bases, dct):
new_dct = CRUDServiceClass.__add_prefix(name, dct)
super(CRUDServiceClass, cls).__init__(name, bases, new_dct)
class CRUDService(remote.Service):
__metaclass__ = CRUDServiceClass
class ValidationError(endpoints.BadRequestException, messages.ValidationError, db.BadValueError, ValueError):
pass
try:
from endpoints_proto_datastore.ndb import EndpointsAliasProperty
except (ImportError, IOError):
pass
else:
class EndpointsModelUserAdapter(object):
# Refs:
# https://github.com/GoogleCloudPlatform/endpoints-proto-datastore/blob/758032a/examples/custom_alias_properties/main.py
# http://endpoints-proto-datastore.appspot.com/examples/custom_alias_properties.html
@ndb.synctasklet
def IdSet(self, value):
if not isinstance(value, basestring):
raise TypeError("ID must be a string.")
self.key = ndb.Key(self.__class__, value)
entity = yield self.key.get_async()
if entity is not None:
self._CopyFromEntity(entity)
self._from_datastore = True
@EndpointsAliasProperty(setter=IdSet, required=True)
def id(self):
if self.key is not None:
return self.key.string_id()
class EndpointsUserIDProperty(ndb.StringProperty):
"""A custom user property for interacting with user ID tokens.
Uses the tools provided in the endpoints module to detect the current user.
In addition, has an optional parameter raise_unauthorized which will return
a 401 to the endpoints API request if a user can't be detected.
"""
def __init__(self, *args, **kwargs):
"""Constructor for string property.
NOTE: Have to pop custom arguments from the keyword argument dictionary
to avoid corrupting argument order when sent to the superclass.
Attributes:
_raise_unauthorized: An optional boolean, defaulting to False. If True,
the property will return a 401 to the API request if a user can't
be deteced.
"""
self._raise_unauthorized = kwargs.pop('raise_unauthorized', False)
super(EndpointsUserIDProperty, self).__init__(*args, **kwargs)
def _set_value(self, entity, value):
"""Internal helper to set value on model entity.
If the value to be set is null, will try to retrieve the current user and
will return a 401 if a user can't be found and raise_unauthorized is True.
Args:
entity: An instance of some NDB model.
value: The value of this property to be set on the instance.
"""
if value is None:
value = get_user_id_from_endpoints_service(raises=self._raise_unauthorized)
super(EndpointsUserIDProperty, self)._set_value(entity, value)
def _fix_up(self, cls, code_name):
"""Internal helper called to register the property with the model class.
Overrides the _set_attributes method on the model class to interject this
attribute in to the keywords passed to it. Since the method _set_attributes
is called by the model class constructor to set values, this -- in congress
with the custom defined _set_value -- will make sure this property always
gets set when an instance is created, even if not passed in.
Args:
cls: The model class that owns the property.
code_name: The name of the attribute on the model class corresponding
to the property.
"""
original_set_attributes = cls._set_attributes
def CustomSetAttributes(setattr_self, kwds):
"""Custom _set_attributes which makes sure this property is always set."""
if self._code_name not in kwds:
kwds[self._code_name] = None
original_set_attributes(setattr_self, kwds)
cls._set_attributes = CustomSetAttributes
super(EndpointsUserIDProperty, self)._fix_up(cls, code_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.