prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Here is a code file:\n```python\nimport re, os, logging, time\nfrom autotest.client.shared import utils, error\nfrom virttest import qemu_monitor, storage, env_process, data_dir, utils_misc\n\n\n@error.context_aware\ndef run_block_mirror(test, params, env):\n \"\"\"\n Test block mirroring fu... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport re, os, logging, time\nfrom autotest.client.shared import utils, error\nfrom virttest import qemu_monitor, storage, env_process, data_dir, utils_misc\n\n\n@error.context_aware\ndef run_block_mirror(test, params, env):\n \"\"\"\n Test bl... | ```python
import re, os, logging, time
from autotest.client.shared import utils, error
from virttest import qemu_monitor, storage, env_process, data_dir, utils_misc
@error.context_aware
def run_block_mirror(test, params, env):
"""
Test block mirroring functionality
Test consists of two subtests:
1) Mirror the guest and switch to the mirrored one
2) Synchronize disk and then do continuous backup
"qemu-img compare" is used to verify disk is mirrored successfully.
"""
image_name = params.get("image_name", "image")
image_format = params.get("image_format", "qcow2")
image_orig = "%s.%s" % (image_name, image_format)
image_mirror = utils_misc.get_path(data_dir.get_data_dir(),
"%s-mirror.%s" % (image_name, image_format))
drive_format = params["drive_format"]
block_mirror_cmd = params.get("block_mirror_cmd", "drive-mirror")
device_id = "None"
qemu_img = params["qemu_img_binary"]
def check_block_jobs_info():
"""
Verify block-jobs status reported by monitor command info block-jobs.
@return: parsed output of info block-jobs
"""
fail = 0
try:
output = vm.monitor.info("block-jobs")
except qemu_monitor.MonitorError, e:
logging.error(e)
fail += 1
return None, None
return (re.match("[\w ]+", str(output)), re.findall("\d+", str(output)))
def run_mirroring(vm, cmd, device, dest, complete = True):
"""
Run block mirroring.
@param vm: Virtual machine object
@param cmd: Command for start mirroring
@param device: Guest device that has to be mirrored
@param dest: Location image has to be mirrored into
@param complete: If True, mirroring will complete (switch to mirror),
If False, finish image synchronization and keep
mirroring running (any changes will be mirrored)
"""
vm.monitor.cmd("%s %s %s" % (cmd, device, dest))
while True:
blkjobout, blkjobstatus = check_block_jobs_info()
if 'mirror' in blkjobout.group(0):
logging.info("[(Completed bytes): %s (Total bytes): %s "
"(Speed limit in bytes/s): %s]", blkjobstatus[-3],
blkjobstatus[-2], blkjobstatus[-1])
if int(blkjobstatus[-3]) != int(blkjobstatus[-2]):
time.sleep(10)
continue
else:
logging.info("Target synchronized with source")
if complete:
logging.info("Start mirroring completing")
vm.monitor.cmd("stop")
vm.monitor.cmd("block_job_complete %s" % device)
time.sleep(5)
else:
break
elif 'No' in blkjobout.group(0):
logging.info("Block job completed")
break
def compare_images(cmd, img1, img2):
"""
Check if images are equal. Raise error.TestFail if images not equal.
@param cmd: qemu-img executable
@param img1: First image to compare
@param img2: Second image to compare
"""
logging.info("Comparing images")
compare_cmd = "%s compare %s %s" % (cmd, img1, img2)
rv = utils.run(compare_cmd, ignore_status=True)
if rv.exit_status == 0:
logging.info("Images are equal")
elif rv.exit_status == 1:
raise error.TestFail("Images differ - test failed")
else:
raise error.TestError("Error during image comparison")
try:
# Setup phase
vm_name = params['main_vm']
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.create()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
img_path = storage.get_image_filename(params, data_dir.get_data_dir())
if 'ide' in drive_format:
device_id = " id0-hd0"
elif 'virtio' in drive_format:
device_id = " virtio0"
else:
raise error.TestNAError("Drive format %s is not supported" %
drive_format)
# Subtest 1 - Complete mirroring
error.context("Testing complete mirroring")
run_mirroring(vm, block_mirror_cmd, device_id, image_mirror)
output = vm.monitor.info("block")
if image_orig in output or image_mirror not in output:
raise error.TestError("Mirrored image not being used by guest")
error.context("Compare fully mirrored images")
compare_images(qemu_img, img_path, image_mirror)
vm.destroy()
# Subtest 2 - Continuous backup
error.context("Testing continuous backup")
vm.create()
session = vm.wait_for_login(timeout=timeout)
run_mirroring(vm, block_mirror_cmd, device_id, image_mirror,False)
if image_orig in output or image_mirror not in output:
raise error.TestError("Mirrored image not used by guest")
for fn in range(0,128):
session.cmd("dd bs=1024 count=1024 if=/dev/urandom of=tmp%d.file"
% fn)
time.sleep(10)
vm.monitor.cmd("stop")
time.sleep(5)
error.context("Compare original and backup images")
compare_images(qemu_img, img_path, image_mirror)
vm.destroy()
finally:
if os.path.isfile(image_mirror):
os.remove(image_mirror)
``` |
[
{
"content": "```python\nimport numpy\nimport xraylib\n\n\"\"\"\ntransfocator_id30b : transfocator for id13b:\n It can:\n\n 1) guess the lens configuration (number of lenses for each type) for a given photon energy\n and target image size. Use transfocator_compute_configuration() fo... | [
{
"content": "<|memory_start|>```python\nimport numpy\nimport xraylib\n\n\"\"\"\ntransfocator_id30b : transfocator for id13b:\n It can:\n\n 1) guess the lens configuration (number of lenses for each type) for a given photon energy\n and target image size. Use transfocator_compute_co... | ```python
import numpy
import xraylib
"""
transfocator_id30b : transfocator for id13b:
It can:
1) guess the lens configuration (number of lenses for each type) for a given photon energy
and target image size. Use transfocator_compute_configuration() for this task
2) for a given transfocator configuration, compute the main optical parameters
(image size, focal distance, focal position and divergence).
Use transfocator_compute_parameters() for this task
3) Performs full ray tracing. Use id30b_ray_tracing() for this task
Note that for the optimization and parameters calculations the transfocator configuration is
given in keywords. For ray tracing calculations many parameters of the transfocator are hard coded
with the values of id30b
See main program for examples.
Dependencies:
Numpy
xraylib (to compute refracion indices)
Shadow (for ray tracing only)
matplotlib (for some plots of ray=tracing)
Side effects:
When running ray tracing some files are created.
MODIFICATION HISTORY:
2015-03-25 srio@esrf.eu, written
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "srio@esrf.eu"
__copyright__ = "ESRF, 2015"
def transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=1 ):
"""
Computes the optimum transfocator configuration for a given photon energy and target image size.
All length units are cm
:param photon_energy_ev: the photon energy in eV
:param s_target: the target image size in cm.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:param:verbose: set to 1 for verbose text output
:return: a list with the number of lenses of each type.
"""
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return None
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \
lens_diameter=lens_diameter,method=2)
focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p)
div_q_target = alpha * lens_diameter / focal_q_target
#corrections for extreme cases
source_demagnified = 2.35*sigmaz*focal_q_target/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2)
nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\
nlenses_max=nlenses_max,radii=nlenses_radii, )
if verbose:
print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target))
print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target))
print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4))
print("transfocator_compute_configuration: nlenses_target: ",nlenses_target)
return nlenses_target
def transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 ):
"""
Computes the parameters of the optical performances of a given transgocator configuration.
returns a l
All length units are cm
:param photon_energy_ev:
:param nlenses_target: a list with the lens configuration, i.e. the number of lenses of each type.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
TODO: remove (not used)
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:return: a list with parameters (image_siza, lens_focal_distance,
focal_position from transfocator center, divergence of beam after the transfocator)
"""
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_f = _transfocator_calculate_focal_distance( deltas=deltas,\
nlenses=nlenses_target,radii=nlenses_radii)
focal_q = 1.0 / (1.0/focal_f - 1.0/tf_p)
div_q = alpha * lens_diameter / focal_q
#corrections
source_demagnified = 2.35*sigmaz*focal_q/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target = numpy.sqrt( (div_q*(tf_q-focal_q))**2 + (source_demagnified)**2 )
return (s_target,focal_f,focal_q,div_q)
def transfocator_nlenses_to_slots(nlenses,nlenses_max=None):
"""
converts the transfocator configuration from a list of the number of lenses of each type,
into a list of active (1) or inactive (0) actuators for the slots.
:param nlenses: the list with number of lenses (e.g., [5,2,0]
:param nlenses_max: the maximum number of lenses of each type, usually powers of two minus one.
E.g. [15,3,1]
:return: a list of on (1) and off (0) slots, e.g., [1, 0, 1, 0, 0, 1, 0]
(first type: 1*1+0*2+1*4+0*8=5, second type: 0*1+1*2=2, third type: 0*1=0)
"""
if nlenses_max == None:
nlenses_max = nlenses
ss = []
for i,iopt in enumerate(nlenses):
if iopt > nlenses_max[i]:
print("Error: i:%d, nlenses: %d, nlenses_max: %d"%(i,iopt,nlenses_max[i]))
ncharacters = len("{0:b}".format(nlenses_max[i]))
si = list( ("{0:0%db}"%(ncharacters)).format(int(iopt)) )
si.reverse()
ss += si
on_off = [int(i) for i in ss]
#print("transfocator_nlenses_to_slots: nlenses_max: ",nlenses_max," nlenses: ",nlenses," slots: ",on_off)
return on_off
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _tansfocator_guess_focal_position( s_target, p=5960., q=3800.0, sigmaz=6.46e-4, \
alpha=0.66, lens_diameter=0.05, method=2):
x = 1e15
if method == 1: # simple sum
AA = 2.35*sigmaz/p
BB = -(s_target + alpha * lens_diameter)
CC = alpha*lens_diameter*q
cc = numpy.roots([AA,BB,CC])
x = cc[1]
return x
if method == 2: # sum in quadrature
AA = ( (2.35*sigmaz)**2)/(p**2)
BB = 0.0
CC = alpha**2 * lens_diameter**2 - s_target**2
DD = - 2.0 * alpha**2 * lens_diameter**2 * q
EE = alpha**2 * lens_diameter**2 * q**2
cc = numpy.roots([AA,BB,CC,DD,EE])
for i,cci in enumerate(cc):
if numpy.imag(cci) == 0:
return numpy.real(cci)
return x
def _transfocator_guess_configuration(focal_f_target,deltas=[0.999998],nlenses_max=[15],radii=[500e-4]):
nn = len(nlenses_max)
ncombinations = (1+nlenses_max[0]) * (1+nlenses_max[1]) * (1+nlenses_max[2])
icombinations = 0
aa = numpy.zeros((3,ncombinations),dtype=int)
bb = numpy.zeros(ncombinations)
for i0 in range(1+nlenses_max[0]):
for i1 in range(1+nlenses_max[1]):
for i2 in range(1+nlenses_max[2]):
aa[0,icombinations] = i0
aa[1,icombinations] = i1
aa[2,icombinations] = i2
bb[icombinations] = focal_f_target - _transfocator_calculate_focal_distance(deltas=deltas,nlenses=[i0,i1,i2],radii=radii)
icombinations += 1
bb1 = numpy.abs(bb)
ibest = bb1.argmin()
return (aa[:,ibest]).tolist()
#
#
#
def id30b_ray_tracing(emittH=4e-9,emittV=1e-11,betaH=35.6,betaV=3.0,number_of_rays=50000,\
density=1.845,symbol="Be",tf_p=1000.0,tf_q=1000.0,lens_diameter=0.05,\
slots_max=None,slots_on_off=None,photon_energy_ev=14000.0,\
slots_lens_thickness=None,slots_steps=None,slots_radii=None,\
s_target=10e-4,focal_f=10.0,focal_q=10.0,div_q=1e-6):
#=======================================================================================================================
# Gaussian undulator source
#=======================================================================================================================
import Shadow
#import Shadow.ShadowPreprocessorsXraylib as sx
sigmaXp = numpy.sqrt(emittH/betaH)
sigmaZp = numpy.sqrt(emittV/betaV)
sigmaX = emittH/sigmaXp
sigmaZ = emittV/sigmaZp
print("\n\nElectron sizes H:%f um, V:%fu m;\nelectron divergences: H:%f urad, V:%f urad"%\
(sigmaX*1e6, sigmaZ*1e6, sigmaXp*1e6, sigmaZp*1e6))
# set Gaussian source
src = Shadow.Source()
src.set_energy_monochromatic(photon_energy_ev)
src.set_gauss(sigmaX*1e2,sigmaZ*1e2,sigmaXp,sigmaZp)
print("\n\nElectron sizes stored H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8, user_unit_to_m=1e-2, verbose=1)
print("\n\nElectron sizes stored (undulator) H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
print("\n\nSource size in vertical FWHM: %f um\n"%\
(2.35*src.SIGMAZ*1e4))
src.NPOINT = number_of_rays
src.ISTAR1 = 0 # 677543155
src.write("start.00")
# create source
beam = Shadow.Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
#=======================================================================================================================
# complete the (detailed) transfocator description
#=======================================================================================================================
print("\nSetting detailed Transfocator for ID30B")
slots_nlenses = numpy.array(slots_max)*numpy.array(slots_on_off)
slots_empty = (numpy.array(slots_max)-slots_nlenses)
#
####interactive=True, SYMBOL="SiC",DENSITY=3.217,FILE="prerefl.dat",E_MIN=100.0,E_MAX=20000.0,E_STEP=100.0
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=False,E_MIN=2000.0,E_MAX=55000.0,E_STEP=100.0,\
DENSITY=density,SYMBOL=symbol,FILE="Be2_55.dat" )
nslots = len(slots_max)
prerefl_file = ["Be2_55.dat" for i in range(nslots)]
print("slots_max: ",slots_max)
#print("slots_target: ",slots_target)
print("slots_on_off: ",slots_on_off)
print("slots_steps: ",slots_steps)
print("slots_radii: ",slots_radii)
print("slots_nlenses: ",slots_nlenses)
print("slots_empty: ",slots_empty)
#calculate distances, nlenses and slots_empty
# these are distances p and q with TF length removed
tf_length = numpy.array(slots_steps).sum() #tf length in cm
tf_fs_before = tf_p - 0.5*tf_length #distance from source to center of transfocator
tf_fs_after = tf_q - 0.5*tf_length # distance from center of transfocator to image
# for each slot, these are the empty distances before and after the lenses
tf_p0 = numpy.zeros(nslots)
tf_q0 = numpy.array(slots_steps) - (numpy.array(slots_max) * slots_lens_thickness)
# add now the p q distances
tf_p0[0] += tf_fs_before
tf_q0[-1] += tf_fs_after
print("tf_p0: ",tf_p0)
print("tf_q0: ",tf_q0)
print("tf_length: %f cm"%(tf_length))
# build transfocator
tf = Shadow.CompoundOE(name='TF ID30B')
tf.append_transfocator(tf_p0.tolist(), tf_q0.tolist(), \
nlenses=slots_nlenses.tolist(), radius=slots_radii, slots_empty=slots_empty.tolist(),\
thickness=slots_lens_thickness, prerefl_file=prerefl_file,\
surface_shape=4, convex_to_the_beam=0, diameter=lens_diameter,\
cylinder_angle=0.0,interthickness=50e-4,use_ccc=0)
itmp = input("SHADOW Source complete. Do you want to run SHADOR trace? [1=Yes,0=No]: ")
if str(itmp) != "1":
return
#trace system
tf.dump_systemfile()
beam.traceCompoundOE(tf,write_start_files=0,write_end_files=0,write_star_files=0, write_mirr_files=0)
#write only last result file
beam.write("star_tf.dat")
print("\nFile written to disk: star_tf.dat")
#
# #ideal calculations
#
print("\n\n\n")
print("=============================================== TRANSFOCATOR OUTPUTS ==========================================")
print("\nTHEORETICAL results: ")
print("REMIND-----With these lenses we obtained (analytically): ")
print("REMIND----- focal_f: %f cm"%(focal_f))
print("REMIND----- focal_q: %f cm"%(focal_q))
print("REMIND----- s_target: %f um"%(s_target*1e4))
demagnification_factor = tf_p/focal_q
theoretical_focal_size = src.SIGMAZ*2.35/demagnification_factor
# analyze shadow results
print("\nSHADOW results: ")
st1 = beam.get_standard_deviation(3,ref=0)
st2 = beam.get_standard_deviation(3,ref=1)
print(" stDev*2.35: unweighted: %f um, weighted: %f um "%(st1*2.35*1e4,st2*2.35*1e4))
tk = beam.histo1(3, nbins=75, ref=1, nolost=1, write="HISTO1")
print(" Histogram FWHM: %f um "%(1e4*tk["fwhm"]))
print(" Transmitted intensity: %f (source was: %d) (transmission is %f %%) "%(beam.intensity(nolost=1), src.NPOINT, beam.intensity(nolost=1)/src.NPOINT*100))
#scan around image
xx1 = numpy.linspace(0.0,1.1*tf_fs_after,11) # position from TF exit plane
#xx0 = focal_q - tf_length*0.5
xx0 = focal_q - tf_length*0.5 # position of focus from TF exit plane
xx2 = numpy.linspace(xx0-100.0,xx0+100,21) # position from TF exit plane
xx3 = numpy.array([tf_fs_after])
xx = numpy.concatenate(([-0.5*tf_length],xx1,xx2,[tf_fs_after]))
xx.sort()
f = open("id30b.spec","w")
f.write("#F id30b.spec\n")
f.write("\n#S 1 calculations for id30b transfocator\n")
f.write("#N 8\n")
labels = " %18s %18s %18s %18s %18s %18s %18s %18s"%\
("pos from source","pos from image","[pos from TF]", "pos from TF center", "pos from focus",\
"fwhm shadow(stdev)","fwhm shadow(histo)","fwhm theoretical")
f.write("#L "+labels+"\n")
out = numpy.zeros((8,xx.size))
for i,pos in enumerate(xx):
beam2 = beam.duplicate()
beam2.retrace(-tf_fs_after+pos)
fwhm1 = 2.35*1e4*beam2.get_standard_deviation(3,ref=1,nolost=1)
tk = beam2.histo1(3, nbins=75, ref=1, nolost=1)
fwhm2 = 1e4*tk["fwhm"]
#fwhm_th = 1e4*transfocator_calculate_estimated_size(pos,diameter=diameter,focal_distance=focal_q)
fwhm_th2 = 1e4*numpy.sqrt( (div_q*(pos+0.5*tf_length-focal_q))**2 + theoretical_focal_size**2 )
#fwhm_th2 = 1e4*( numpy.abs(div_q*(pos-focal_q+0.5*tf_length)) + theoretical_focal_size )
out[0,i] = tf_fs_before+tf_length+pos
out[1,i] = -tf_fs_after+pos
out[2,i] = pos
out[3,i] = pos+0.5*tf_length
out[4,i] = pos+0.5*tf_length-focal_q
out[5,i] = fwhm1
out[6,i] = fwhm2
out[7,i] = fwhm_th2
f.write(" %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f \n"%\
(tf_fs_before+tf_length+pos,\
-tf_fs_after+pos,\
pos,\
pos+0.5*tf_length,\
pos+0.5*tf_length-focal_q,\
fwhm1,fwhm2,fwhm_th2))
f.close()
print("File with beam evolution written to disk: id30b.spec")
#
# plots
#
itmp = input("Do you want to plot the intensity distribution and beam evolution? [1=yes,0=No]")
if str(itmp) != "1":
return
import matplotlib.pylab as plt
plt.figure(1)
plt.plot(out[1,:],out[5,:],'blue',label="fwhm shadow(stdev)")
plt.plot(out[1,:],out[6,:],'green',label="fwhm shadow(histo1)")
plt.plot(out[1,:],out[7,:],'red',label="fwhm theoretical")
plt.xlabel("Distance from image plane [cm]")
plt.ylabel("spot size [um] ")
ax = plt.subplot(111)
ax.legend(bbox_to_anchor=(1.1, 1.05))
print("Kill graphic to continue.")
plt.show()
Shadow.ShadowTools.histo1(beam,3,nbins=75,ref=1,nolost=1,calfwhm=1)
input("<Enter> to finish.")
return None
def id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=None):
if nlenses_target == None:
force_nlenses = 0
else:
force_nlenses = 1
#
# define lens setup (general)
#
xrl_symbol = ["Be","Be","Be"]
xrl_density = [1.845,1.845,1.845]
lens_diameter = 0.05
nlenses_max = [15,3,1]
nlenses_radii = [500e-4,1000e-4,1500e-4]
sigmaz=6.46e-4
alpha = 0.55
tf_p = 5960 # position of the TF measured from the center of the transfocator
tf_q = 9760 - tf_p # position of the image plane measured from the center of the transfocator
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return
print("================================== TRANSFOCATOR INPUTS ")
print("Photon energy: %f eV"%(photon_energy_ev))
if force_nlenses:
print("Forced_nlenses: ",nlenses_target)
else:
print("target size: %f cm"%(s_target))
print("materials: ",xrl_symbol)
print("densities: ",xrl_density)
print("Lens diameter: %f cm"%(lens_diameter))
print("nlenses_max:",nlenses_max,"nlenses_radii: ",nlenses_radii)
print("Source size (sigma): %f um, FWHM: %f um"%(1e4*sigmaz,2.35*1e4*sigmaz))
print("Distances: tf_p: %f cm, tf_q: %f cm"%(tf_p,tf_q))
print("alpha: %f"%(alpha))
print("========================================================")
if force_nlenses != 1:
nlenses_target = transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, lens_diameter=lens_diameter, \
sigmaz=sigmaz, alpha=alpha, \
tf_p=tf_p,tf_q=tf_q, verbose=1)
(s_target,focal_f,focal_q,div_q) = \
transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, \
lens_diameter=lens_diameter,\
sigmaz=sigmaz, alpha=alpha,\
tf_p=tf_p,tf_q=tf_q)
slots_max = [ 1, 2, 4, 8, 1, 2, 1] # slots
slots_on_off = transfocator_nlenses_to_slots(nlenses_target,nlenses_max=nlenses_max)
print("=============================== TRANSFOCATOR SET")
#print("deltas: ",deltas)
if force_nlenses != 1:
print("nlenses_target (optimized): ",nlenses_target)
else:
print("nlenses_target (forced): ",nlenses_target)
print("With these lenses we obtain: ")
print(" focal_f: %f cm"%(focal_f))
print(" focal_q: %f cm"%(focal_q))
print(" s_target: %f um"%(s_target*1e4))
print(" slots_max: ",slots_max)
print(" slots_on_off: ",slots_on_off)
print("==================================================")
# for theoretical calculations use the focal position and distances given by the target nlenses
itmp = input("Start SHADOW simulation? [1=yes,0=No]: ")
if str(itmp) != "1":
return
#=======================================================================================================================
# Inputs
#=======================================================================================================================
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
number_of_rays = 50000
nslots = len(slots_max)
slots_lens_thickness = [0.3 for i in range(nslots)] #total thickness of a single lens in cm
# for each slot, positional gap of the first lens in cm
slots_steps = [ 4, 4, 1.9, 6.1, 4, 4, slots_lens_thickness[-1]]
slots_radii = [.05, .05, .05, .05, 0.1, 0.1, 0.15] # radii of the lenses in cm
AAA= 333
id30b_ray_tracing(emittH=emittH,emittV=emittV,betaH=betaH,betaV=betaV,number_of_rays=number_of_rays,\
density=xrl_density[0],symbol=xrl_symbol[0],tf_p=tf_p,tf_q=tf_q,lens_diameter=lens_diameter,\
slots_max=slots_max,slots_on_off=slots_on_off,photon_energy_ev=photon_energy_ev,\
slots_lens_thickness=slots_lens_thickness,slots_steps=slots_steps,slots_radii=slots_radii,\
s_target=s_target,focal_f=focal_f,focal_q=focal_q,div_q=div_q)
def main():
# this performs the full simulation: calculates the optimum configuration and do the ray-tracing
itmp = input("Enter: \n 0 = optimization calculation only \n 1 = full simulation (ray tracing) \n?> ")
photon_energy_kev = float(input("Enter photon energy in keV: "))
s_target_um = float(input("Enter target focal dimension in microns: "))
if str(itmp) == "1":
id30b_full_simulation(photon_energy_ev=photon_energy_kev*1e3,s_target=s_target_um*1e-4,nlenses_target=None)
#id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=[3,1,1])
else:
#this performs the calculation of the optimizad configuration
nlenses_optimum = transfocator_compute_configuration(photon_energy_kev*1e3,s_target_um*1e-4,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=0 )
print("Optimum lens configuration is: ",nlenses_optimum)
if nlenses_optimum == None:
return
print("Activate slots: ",transfocator_nlenses_to_slots(nlenses_optimum,nlenses_max=[15,3,1]))
# this calculates the parameters (image size, etc) for a given lens configuration
(size, f, q_f, div) = transfocator_compute_parameters(photon_energy_kev*1e3, nlenses_optimum,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 )
print("For given configuration ",nlenses_optimum," we get: ")
print(" size: %f cm, focal length: %f cm, focal distance: %f cm, divergence: %f rad: "%(size, f, q_f, div))
if __name__ == "__main__":
main()
``` |
[
{
"content": "Recreate the original code text:\n```python\n\"\"\"Create strings representing relationships on GO Terms.\n\n +------- has 'part_of' relationship(s)\n | +-- pointed to by a GO ID with a 'part_of' relationship\n | |\n V ... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n\"\"\"Create strings representing relationships on GO Terms.\n\n +------- has 'part_of' relationship(s)\n | +-- pointed to by a GO ID with a 'part_of' relationship\n | |\n ... | ```python
"""Create strings representing relationships on GO Terms.
+------- has 'part_of' relationship(s)
| +-- pointed to by a GO ID with a 'part_of' relationship
| |
V V
GO:0008150 L00 D00 .... .rdu biological_process
GO:0050896 L01 D01 .... .rdu response to stimulus
GO:0042221 L02 D02 .... p... response to chemical
GO:0032501 L01 D01 .... .rdu multicellular organismal process
GO:0003008 L02 D02 .... .r.. system process
GO:0051606 L02 D02 .... .... detection of stimulus
GO:0050877 L03 D03 .... .rdu nervous system process
GO:0009593 L03 D03 P... .... detection of chemical stimulus
GO:0007600 L04 D04 .... pr.. sensory perception
GO:0050906 L03 D03 P... .... detection of stimulus involved in sensory perception
GO:0050890 L04 D04 .... .... cognition
GO:0050907 L04 D04 P... .... detection of chemical stimulus involved in sensory perception
GO:0007606 L05 D05 .... p... sensory perception of chemical stimulus
GO:0050893 L05 D05 P... .... sensory processing
GO:0050911 L05 D05 P... .... detection of chemical stimulus involved in sensory perception of smell
GO:0007608 L06 D06 .... p... sensory perception of smell
"""
__copyright__ = "Copyright (C) 2010-2019, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
from collections import OrderedDict
from goatools.godag.consts import RELATIONSHIP_LIST
from goatools.godag.consts import RELATIONSHIP_SET
# pylint: disable=too-few-public-methods,bad-whitespace
class RelationshipStr(object):
"""Create strings representing relationships on GO Terms."""
# go-basic.obo: fmt(1.2) rel(2019-02-20) 47,177 GO Terms; optional_attrs(relationship)
# relationship:
# 6,882 part_of
# 3,230 regulates
# 2,804 negatively_regulates
# 2,785 positively_regulates
rel2chr = OrderedDict([
('part_of', 'P'),
('regulates', 'R'),
('negatively_regulates', 'D'),
('positively_regulates', 'U')])
rev2chr = OrderedDict([
('part_of', 'p'),
('regulates', 'r'),
('negatively_regulates', 'd'),
('positively_regulates', 'u')])
def __init__(self, relationships=None):
assert set(self.rel2chr.keys()) == RELATIONSHIP_SET
# Ordered relationships
_rels = relationships if relationships else set()
self.rels = [r for r in RELATIONSHIP_LIST if r in _rels]
def str_relationships(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: P..."""
rel_cur = goobj.relationship
return "".join([self.rel2chr.get(r, '?') if r in rel_cur else '.' for r in self.rels])
def str_rel_short(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: P"""
if not goobj.relationship:
return ''
rel_cur = goobj.relationship
return "".join([self.rel2chr.get(r, '?') for r in self.rels if r in rel_cur])
def str_relationships_rev(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: pr.."""
rel_cur = goobj.relationship_rev
return "".join([self.rev2chr[r] if r in rel_cur else '.' for r in self.rels])
def prt_keys(self, prt, pre):
"""Print the alias for a relationship and its alias."""
prt.write('{PRE}Relationship to parent: {ABC}\n'.format(
PRE=pre, ABC=''.join(self.rel2chr.values())))
for rel, alias in self.rel2chr.items():
prt.write('{PRE} {A} {DESC}\n'.format(PRE=pre, A=alias, DESC=rel))
prt.write('\n{PRE}Relationship to child: {ABC}\n'.format(
PRE=pre, ABC=''.join(self.rev2chr.values())))
for rel, alias in self.rev2chr.items():
prt.write('{PRE} {A} {DESC}\n'.format(PRE=pre, A=alias, DESC=rel))
# Copyright (C) 2010-2019, DV Klopfenstein, H Tang, All rights reserved.
``` |
[
{
"content": "```python\n#!/usr/bin/python\n# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# Originally written by Mario and Luigi at Google.\n\n\"\"\"A code generator for TPM utility ... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python\n# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# Originally written by Mario and Luigi at Google.\n\n\"\"\"A code generator ... | ```python
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Originally written by Mario and Luigi at Google.
"""A code generator for TPM utility functions.
The generator inputs the Trousers header file "tpm.h" (here renamed
"tss_tpm_h") as well as massaged representation of TPM commands from
the TPM specs, and outputs marshalling/unmarshalling functions and
type declarations for the TPM structures and commands.
"""
import re, sys, os
# Global copyright header.
_COPYRIGHT_HEADER = """\
/* Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
"""
# Header file include guards.
_HEADER_FILE_GUARD_HEADER = """
#ifndef %(name)s
#define %(name)s
"""
_HEADER_FILE_GUARD_FOOTER = """
#endif /* %(name)s */
"""
# The tag name for the following structures does not follow the convention.
_STRUCTURE_TAG_EXCEPTIONS = dict(
TPM_SIGN_INFO="TPM_TAG_SIGNINFO",
TPM_CONTEXT_BLOB="TPM_TAG_CONTEXTBLOB",
TPM_DELEGATE_KEY_BLOB="TPM_TAG_DELG_KEY_BLOB")
# A dictionary of commands that are ignored.
IGNORED_COMMANDS = set(["TPM_FieldUpgrade", "TPM_CertifySelfTest"])
# A set of struct declarations that are ignored.
IGNORED_STRUCTS = set([
"TPM_VERSION_BYTE",
"TPM_NV_DATA_SENSITIVE",
"TPM_KEY_HANDLE_LIST"])
# Bytecodes
BC_INT8 = "BC_INT8"
BC_INT16 = "BC_INT16"
BC_INT32 = "BC_INT32"
BC_REF = "BC_REF"
BC_ARRAY = "BC_ARRAY"
BC_RECORD = "BC_RECORD"
BC_FIELD_KIND_NORMAL = "BC_FIELD_KIND_NORMAL"
BC_FIELD_KIND_VARIABLE = "BC_FIELD_KIND_VARIABLE"
# This variable keeps track of bytecode positions
BYTECODE_OFFSET = 0
# Structures whose bytecode offset is required in hand-written C code.
MANUALLY_MARSHALLED_STRUCTS = set([
"TPM_NONCE",
"TPM_NV_DATA_PUBLIC",
"TPM_PUBKEY",
"TPM_RSA_KEY_PARMS",
])
# Variable-length integers (varints) are encoded as 7-bit digits, most
# significant first (big endian, for readability). Each digit is stored in a
# byte. The most significant bit is 1 when there are more digits, 0 otherwise.
# For instance:
#
# 4 -> 0x04
# 257 -> 0x81, 0x01
#
# We can use varints only for known integer values, for instance bytecode
# offsets. A bunch of values are only known at C compilation time.
def IntToByteCode(x):
return IntToByteCode1(x / 128) + [x % 128]
def IntToByteCode1(x):
if x == 0:
return []
else:
return IntToByteCode1(x / 128) + [x % 128 + 128]
def OutputVarInt(x, file):
global BYTECODE_OFFSET
file.write("/* (%04d) varint: %d */" % (BYTECODE_OFFSET, x))
bytes = IntToByteCode(x)
file.write("".join(map(lambda x: " %d," % x, bytes)))
BYTECODE_OFFSET += len(bytes)
file.write("\n")
def OutputByte(byte, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ %s,\n" % (BYTECODE_OFFSET, str(byte)))
BYTECODE_OFFSET += 1
def OutputTwoBytes(x, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ TWO_BYTES_INT(%s),\n" % (BYTECODE_OFFSET, str(x)))
BYTECODE_OFFSET += 2
def OutputOffsetOf(field_name, record_name, file):
global BYTECODE_OFFSET
file.write("/* (%04d) */ OFFSETOF_TWO_BYTES(%s, %s),\n" %
(BYTECODE_OFFSET, field_name, record_name))
BYTECODE_OFFSET += 2
# We parse a C header file (MIDL, actually) and produce descriptors for each
# type defined by the header file. Then we use the descriptors to output
# useful code.
#
# (Before we go any further: confusion may arise in the uninitiated from the
# use of Python objects to describe C types. The Python objects have types
# themselves. To reduce the confusion we try to call them "classes" and
# "subclasses" since, luckily, that's what they are. We reserve the words
# "struct", "record", and "type" for the C types.)
#
# Every named type has a descriptor. Each kind of type (struct, typedef, etc)
# has an associated class, which is a subclass of TypeDesc. Other classes are
# used internally to type descriptors, as described below.
#
# There are four kinds of types: built-in types, types defined by typedef,
# types defined by a struct declaration, and pointers.
class TypeDesc(object):
"""Generic type desriptor."""
def __init__(self):
self.bytecode_offset = -1
def OutputByteCodeOffset(self, file):
OutputVarInt(self.bytecode_offset, file)
def OutputByteCodeRef(self, file):
assert self.bytecode_offset >= 0
OutputByte(BC_REF, file)
self.OutputByteCodeOffset(file)
class NamedTypeDesc(TypeDesc):
"""Desriptor for named types."""
def __init__(self, name):
super(NamedTypeDesc, self).__init__()
self.name = name
def Format(self):
return self.name
class BuiltInTypeDesc(NamedTypeDesc):
"""Desriptor for built-in types."""
def __init__(self, name, bytecode):
super(BuiltInTypeDesc, self).__init__(name)
self.bytecode = bytecode
def BaseType(self):
return self
def OutputByteCodeRef(self, file):
OutputByte(self.bytecode, file)
class TypedefDesc(NamedTypeDesc):
"""Types defined by another type (i.e. aliases)."""
def __init__(self, name, definition):
super(TypedefDesc, self).__init__(name)
self.definition = definition
def BaseType(self):
return self.definition.BaseType()
def OutputDeclarations(self, out_file):
# Type declaration only
out_file.write("typedef %s %s;\n" % (self.definition.name, self.name))
def OutputDefinitions(self, out_file):
pass
def OutputByteCode(self, out_file):
pass
def OutputByteCodeRef(self, out_file):
self.definition.OutputByteCodeRef(out_file)
class RecordDesc(NamedTypeDesc):
"""Descriptor for structs (also typedefs of structs, for simplicity)."""
def BaseType(self):
return self
def OutputByteCode(self, out_file):
if self.fields:
bytecode_offset = BYTECODE_OFFSET
out_file.write("/* Record: %s */\n" % self.name)
OutputByte(BC_RECORD, out_file)
OutputByte(len(self.fields), out_file)
for field in self.fields:
field.OutputByteCode(out_file)
self.bytecode_offset = bytecode_offset
def OutputDeclarations(self, out_file):
if self.fields:
self.OutputTypeDeclaration(out_file)
def OutputTypeDeclaration(self, out_file):
out_file.write("\ntypedef struct %s {\n" % self.name)
for field in self.fields:
field.OutputFieldDeclaration(out_file)
out_file.write("} %s;\n\n" % self.name)
def TagName(self):
if self.name in _STRUCTURE_TAG_EXCEPTIONS:
return _STRUCTURE_TAG_EXCEPTIONS[self.name]
else:
return "TPM_TAG_" + self.name[4:]
class FieldDesc(object):
"""A RecordDesc has a list of fields.
Each field is described by either a FieldDesc, a VarFieldDesc (for
variable-length fields), or an ImplicitVarFieldDesc (special case of
variable-length field where the length is defined implicitly by a
payload type).
"""
def __init__(self, record, index):
# RECORD is the containing record descriptor, used to emit code that lets
# the C compiler compute field offsets. INDEX is the position of the field
# in the record, used to find the size field for variable-length fields.
self.record = record
self.index = index
self.size_field = None
def OutputByteCode(self, out_file):
out_file.write("/* Field: %s */\n" % self.name)
OutputByte(BC_FIELD_KIND_NORMAL, out_file)
OutputOffsetOf(self.name, self.record.name, out_file)
self.type.OutputByteCodeRef(out_file)
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s %s;\n" % (self.type.name, self.name))
class VarFieldDesc(FieldDesc):
"""Descriptor for variable-length fields."""
def OutputByteCode(self, out_file):
out_file.write("/* Variable-length field: %s */\n" % self.name)
OutputByte(BC_FIELD_KIND_VARIABLE, out_file)
OutputOffsetOf(self.name, self.record.name, out_file)
OutputByte(self.size_field.index, out_file)
self.type.OutputByteCodeRef(out_file)
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s* %s;\n" % (self.type.name, self.name))
class ImplicitVarFieldDesc(VarFieldDesc):
"""Descriptor for implicit variable-length fields."""
pass
class ArrayFieldDesc(FieldDesc):
"""Descriptor for fixed-length array (e.g. TPM_SaveContext, TPM_NONCE)."""
def OutputFieldDeclaration(self, out_file):
out_file.write(" %s %s[%s];\n" % (self.element_type.name,
self.name,
self.length))
def OutputByteCode(self, out_file):
out_file.write("/* Array field: %s */\n" % self.name)
OutputByte(BC_ARRAY, out_file)
OutputTwoBytes(self.length, out_file)
self.element_type.OutputByteCodeRef(out_file)
class ArgDesc(object):
"""Descriptor for formal argument of a function."""
def __init__(self, argtype, name):
self.type = argtype
self.name = name
class PointerDesc(TypeDesc):
"""Pointer type (used in argument lists)."""
def __init__(self, base_type):
super(PointerDesc, self).__init__()
self.base_type = base_type
def Format(self):
return self.base_type.Format() + "*"
# The symbol table, i.e. a dictionary mapping type names to type descriptors.
# It is initialized here with the predefined types.
TYPES_DICT = dict(
BYTE=BuiltInTypeDesc("uint8_t", "BC_INT8"),
TSS_BOOL=BuiltInTypeDesc("uint8_t", "BC_INT8"),
BOOL=BuiltInTypeDesc("uint8_t", "BC_INT8"),
UINT16=BuiltInTypeDesc("uint16_t", "BC_INT16"),
UINT32=BuiltInTypeDesc("uint32_t", "BC_INT32"),
UINT64=BuiltInTypeDesc("uint64_t", "BC_INT64"),
APSession=BuiltInTypeDesc("APSession", "BC_APSESSION"),
TPM_RESULT=BuiltInTypeDesc("TPM_RESULT", "BC_INT32"),
int=BuiltInTypeDesc("int", "BC_INT32"),
)
class StructureParser(object):
"""Type structure parser.
"Doing It Right (TM)" would be overkill here. To parse the header file
"the right way" we would need a full C parser (MIDL, actually). So instead
we make heavy assumptions on the file format and the types we need to deal
with. Since the input is quite stable (no changes are expected), this
works well enough.
"""
# compile regular expressions
_STRUCT_RE = re.compile("^typedef\s+struct\s+td(\w+)")
_ENDSTRUCT_RE = re.compile("^}")
_TYPEDEF_RE = re.compile("^typedef\s+(\w+)\s+(\w+)")
# "type name" or "type name[...]" or "type *name"
_FIELD_RE = re.compile("^\s+(\w+(?:\s*[*])?)\s*(\w+)((?:[[].*[]])?)")
_SIZEIS_RE = re.compile("^\s+SIZEIS.(\w+)")
def Parse(self, filename):
"""Parses the TPM header file to extract structure information.
Args:
filename: Name of the TPM header file
Returns:
List of extracted type descriptors.
"""
types = []
header_file = open(filename)
for line in header_file:
match = self._STRUCT_RE.search(line)
if match:
name = match.group(1)
if name in IGNORED_STRUCTS:
continue
desc = RecordDesc(name)
TYPES_DICT[name] = desc
self.ParseRecord(header_file, desc)
types.append(desc)
continue
match = self._TYPEDEF_RE.search(line)
if match:
old_name = match.group(1)
new_name = match.group(2)
old_desc = TYPES_DICT[old_name]
desc = TypedefDesc(new_name, old_desc)
TYPES_DICT[new_name] = desc
types.append(desc)
continue
return types
def ParseRecord(self, in_file, record_desc):
"""Parses the body of a TPM struct declaration (all but the first line)."""
fields_list = []
i = 0
size_field_name = None
line = in_file.next() # skip open brace
while True:
line = in_file.next()
match = self._SIZEIS_RE.search(line)
if match:
size_field_name = match.group(1)
continue
match = self._FIELD_RE.search(line)
if match:
type_name = match.group(1) + match.group(3)
field_name = match.group(2)
field_desc = FieldDesc(record_desc, i)
field_desc.name = field_name
field_desc.type = type_name
if size_field_name:
size_field_index = next((f for f in xrange(len(fields_list))
if fields_list[f].name == size_field_name))
field_desc.size_field_index = size_field_index
size_field_name = None
fields_list.append(field_desc)
i = i + 1
continue
match = self._ENDSTRUCT_RE.search(line)
if match:
record_desc.fields = fields_list
return
class StructureGenerator(object):
"""TPM structure types and marshaling code generator."""
def Generate(self, types, filename_h, filename_c):
"""Generates the .c and .h file for the given types."""
# Declarations (.h file)
h = open(filename_h, "w")
h.write(_COPYRIGHT_HEADER)
guard_name = "TRUNKS_%s_" % filename_h.upper().replace(".", "_")
h.write(_HEADER_FILE_GUARD_HEADER % {"name": guard_name})
h.write("""
#include <stdint.h>
#include <string.h>
#include "trunks_tpm.h"
""")
for t in types:
t.OutputDeclarations(h)
h.write(_HEADER_FILE_GUARD_FOOTER % {"name": guard_name})
h.close()
# Bytecodes (.c file)
c = open(filename_c, "w")
c.write(_COPYRIGHT_HEADER)
c.write("""
#include <stdint.h>
#include <string.h>
#include "%s"
#include "trunks_internal.h"
#include "g_tpm_commands.h"
uint8_t StructureByteCodes[] = {
""" % filename_h)
for t in types:
t.OutputByteCode(c)
c.close()
class Command(object):
"""Descriptor for a TPM command."""
def __init__(self, name):
self.name = name
self.has_auth1 = False
self.has_auth2 = False
self.has_ins = False
self.has_outs = False
def OutputDeclarations(self, out_file):
self.request.OutputDeclarations(out_file)
self.response.OutputDeclarations(out_file)
out_file.write("\n")
self.OutputFunctionHeader(out_file, ";\n")
def OutputByteCode(self, out_file):
self.request.OutputByteCode(out_file)
self.response.OutputByteCode(out_file)
def OutputDefinitions(self, out_file):
self.OutputFunctionHeader(out_file, " {")
self.OutputFunctionBody(out_file)
def OutputFunctionHeader(self, out_file, suffix):
"""Outputs the function header for this command."""
out_file.write("""\
/* Sends a %s command to the TPM and reads the response. */
uint32_t Ttl_%s(%s)%s""" % (self.name, self.name, self.ArgList(), suffix))
def ArgList(self):
if self.args:
arg_list = map(lambda a: "%s %s" % (a.type.Format(), a.name), self.args)
return ", ".join(arg_list)
else:
return "void"
def OutputFunctionBody(self, out_file):
"""Outputs the function body for this command."""
body_template = """
%(initializers)s RETURN_ON_FAILURE(TtlRunCommand(%(ordinal)s,
%(rqu_bytecode)s, %(rsp_bytecode)s, in_parameters, out_parameters,
auth1, auth2, buffer, buffer_size));
return TPM_SUCCESS;
}
"""
initializers = ""
if not self.has_ins:
initializers += " void* in_parameters = NULL;\n"
if not self.has_outs:
initializers += " void* out_parameters = NULL;\n"
initializers += " uint8_t buffer[TPM_MAX_COMMAND_LENGTH];\n"
initializers += " int buffer_size = sizeof(buffer);\n"
if not self.has_auth1:
initializers += " void* auth1 = NULL;\n"
if not self.has_auth2:
initializers += " void* auth2 = NULL;\n"
# write function body
out_file.write(body_template % {
"initializers": initializers,
"ordinal": self.ordinal,
"rqu_bytecode": self.request.bytecode_offset,
"rsp_bytecode": self.response.bytecode_offset,
})
class CommandParser(object):
"""Command definition parser.
The text file is extracted from the PDF file containing the TPM
command specification from the Trusted Computing Group. The syntax
of the text file is ad-hoc.
"""
_LINE_SKIP_RE = re.compile("^(\s*$)|(^_COMMENT)")
_TYPE_RE = "(\w+(?:\s*[[].*[]])?)"
_COMMAND_RE = re.compile("^_TPM_COMMAND\s+(\w+)$")
_IN_PARAM_RE = re.compile("^_IN_PARAM\s+%s\s+(\w+)\s+(.*)$" % _TYPE_RE)
_OUT_PARAM_RE = re.compile("^_OUT_PARAM\s+%s\s+(\w+)\s+(.*)$" % _TYPE_RE)
# One line of lookahead
_line = None
def NextLine(self, in_file):
try:
while True:
self._line = in_file.next()
if not self._LINE_SKIP_RE.search(self._line):
break
except StopIteration:
self._line = None
def Parse(self, filename):
"""Parses a text version of the TPM command specification.
Args:
filename: Name of the TPM command specification file.
Returns:
List of extracted command descriptors.
"""
commands = []
f = open(filename)
self.NextLine(f)
while True:
cmd = self.ParseCommand(f)
if not cmd:
return commands
if cmd.name not in IGNORED_COMMANDS:
commands.append(cmd)
def ParseCommand(self, f):
"""Parses a TPM command structure."""
if not self._line:
return None
match = self._COMMAND_RE.search(self._line)
if not match:
print "cannot match command from line: %s\n" % self._line
name = match.group(1)
cmd = Command(name)
self.NextLine(f)
cmd.request = self.ParseCommandRR(f, self._IN_PARAM_RE, name + "_rqu")
cmd.response = self.ParseCommandRR(f, self._OUT_PARAM_RE, name + "_rsp")
assert ((cmd.request.fields and cmd.response.fields) or
cmd.name in IGNORED_COMMANDS)
return cmd
def ParseCommandRR(self, f, regexp, name):
"""Parses a request or response structure."""
fields = []
i = 0
record = RecordDesc(name)
while self._line:
match = regexp.search(self._line)
if not match:
break
field = FieldDesc(record, i)
field.name = match.group(2)
# For now assign the type name, not the descriptor, and resolve later.
# The type resolution also includes transforming a FIELD into a VARFIELD
# or ARRAYFIELD when applicable.
field.type = match.group(1)
field.description = match.group(3)
field.size_field_name = None
fields.append(field)
i = i + 1
self.NextLine(f)
record.fields = fields
return record
class Rewriter(object):
"""TPM type and command rewriter."""
_POINTER_RE = re.compile("^(\w+)\s*[*]$")
_ARRAY_RE = re.compile("^(\w+)\s*[[]\s*[]]$")
_FIXARRAY_RE = re.compile("^(\w+)\s*[[]\s*(\w+)\s*[]]$")
_TAG_RE = re.compile("^\s*(\w+)\s*$")
_ORD_RE = re.compile("^.*((TPM|TSC)_ORD_\w+).*$")
def Rewrite(self, commands):
for command in commands:
self.FixTypes(command.request)
self.FixTypes(command.response)
self.ExtractConstants(command)
self.FixRequestHeaderAndTrailer(command)
self.FixResponseHeaderAndTrailer(command)
self.ComputeCommandArgs(command)
def FixTypes(self, record):
"""Fixes the given command structures.
Args:
record: structure to be fixed.
The following modifications are applied:
1. Replace type names in fields with their type descriptors
2. Change Fields into VarFields as applicable.
3. Change Fields into ArrayFields as applicable.
"""
if not isinstance(record, RecordDesc):
return
new_fields = []
previous_old_field = None
previous_new_field = None
for field in record.fields:
match = Rewriter._POINTER_RE.match(field.type)
if not match:
match = Rewriter._ARRAY_RE.match(field.type)
if match:
new_field = VarFieldDesc(record, field.index)
new_field.name = field.name
new_field.type = TYPES_DICT[match.group(1)]
new_field.size_field = previous_new_field
else:
match = Rewriter._FIXARRAY_RE.match(field.type)
if match:
new_field = ArrayFieldDesc(record, field.index)
element_type = TYPES_DICT[match.group(1)]
new_field.element_type = element_type
new_field.name = field.name
new_field.length = match.group(2)
else:
new_field = field # recycle
new_field.type = TYPES_DICT[field.type]
new_fields.append(new_field)
previous_old_field = field
previous_new_field = new_field
record.fields = new_fields
def ExtractConstants(self, cmd):
"""Extracts the command tag and ordinal."""
match = Rewriter._TAG_RE.search(cmd.request.fields[0].description)
if match:
cmd.tag = match.group(1)
match = Rewriter._ORD_RE.search(cmd.request.fields[2].description)
if match:
cmd.ordinal = match.group(1)
if not cmd.tag or not cmd.ordinal:
print "Could not extract tag or ordinal for command %s" % cmd.name
def FixFields(self, fields, len):
fields = fields[3:len]
for field in fields:
field.index -= 3
return fields
def FixRequestHeaderAndTrailer(self, cmd):
"""Fixes the request header and trailer according to the command type."""
req_params_len = len(cmd.request.fields)
if cmd.tag == "TPM_TAG_RQU_AUTH2_COMMAND":
req_params_len -= 10
cmd.has_auth1 = True
cmd.has_auth2 = True
elif cmd.tag == "TPM_TAG_RQU_AUTH1_COMMAND":
req_params_len -= 5
cmd.has_auth1 = True
# remove first three fields
cmd.request.fields = self.FixFields(cmd.request.fields, req_params_len)
cmd.has_ins = len(cmd.request.fields) > 0
def FixResponseHeaderAndTrailer(self, cmd):
"""Fixes the response header and trailer according to the command type."""
rsp_params_len = len(cmd.response.fields)
if cmd.tag == "TPM_TAG_RQU_AUTH2_COMMAND":
rsp_params_len -= 6
elif cmd.tag == "TPM_TAG_RQU_AUTH1_COMMAND":
rsp_params_len -= 3
cmd.response.fields = self.FixFields(cmd.response.fields, rsp_params_len)
cmd.has_outs = len(cmd.response.fields) > 0
def ComputeCommandArgs(self, cmd):
"""Generates the argument list for the given command."""
cmd.args = []
if cmd.has_ins:
cmd.args.append(ArgDesc(PointerDesc(cmd.request), "in_parameters"))
if cmd.has_outs:
cmd.args.append(ArgDesc(PointerDesc(cmd.response), "out_parameters"))
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["BYTE"]), "buffer"))
cmd.args.append(ArgDesc(TYPES_DICT["int"], "buffer_size"))
if cmd.has_auth1:
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["APSession"]), "auth1"))
if cmd.has_auth2:
cmd.args.append(ArgDesc(PointerDesc(TYPES_DICT["APSession"]), "auth2"))
class CommandGenerator(object):
"""TPM command types and marshaling code generator."""
def Generate(self, commands, filename_h, filename_b, filename_c):
"""Generates the .c and .h file for the given commands."""
h = open(filename_h, "w")
h.write(_COPYRIGHT_HEADER)
guard_name = "TRUNKS_%s_" % filename_h.upper().replace(".", "_")
h.write(_HEADER_FILE_GUARD_HEADER % {"name": guard_name})
h.write("""
#include "g_tpm_structures.h"
#include "trunks_internal.h"
""")
b = open(filename_b, "a")
b.write("\n\n/* Command Structures (request and response) */\n\n")
c = open(filename_c, "w")
c.write(_COPYRIGHT_HEADER)
c.write("""
#include "%s"
""" % filename_h)
# Output addresses of bytecodes for some struct types.
for name in MANUALLY_MARSHALLED_STRUCTS:
struct = TYPES_DICT[name]
h.write("#define TTL_%s_BYTECODE_OFFSET %d\n" %
(name, struct.bytecode_offset))
h.write("\n")
# Output commands.
for command in commands:
command.OutputDeclarations(h)
command.OutputByteCode(b)
command.OutputDefinitions(c)
h.write(_HEADER_FILE_GUARD_FOOTER % {"name": guard_name})
h.close()
b.write("};\n")
b.close()
c.close()
def Run():
tpm_structures = StructureParser().Parse("tss_tpm_h")
for structure in tpm_structures:
Rewriter().FixTypes(structure)
StructureGenerator().Generate(
tpm_structures, "g_tpm_structures.h", "g_tpm_structures.c")
tpm_commands = CommandParser().Parse("g_tpm_commands_structure.txt")
Rewriter().Rewrite(tpm_commands)
CommandGenerator().Generate(tpm_commands, "g_tpm_commands.h",
"g_tpm_structures.c", "g_tpm_commands.c")
def Test():
print "no tests yet"
exit(1)
# main program
if __name__ == "__main__":
if len(sys.argv) == 1:
Run()
elif len(sys.argv) == 2 and sys.argv[1] == "test":
Test()
else:
sys.stderr.write("usage: %s [test]\n" % sys.argv[0])
exit(1)
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# coding=utf-8\n\n# Copyright 2012 Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2012 NTT DOCOMO, INC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); y... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# coding=utf-8\n\n# Copyright 2012 Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2012 NTT DOCOMO, INC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
default=None,
help='path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=5,
help='maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
f.write(password)
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address == None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user == None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password == None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"),
locals())
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power on")
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=0.5).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
self._exec_ipmitool("power off")
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=0.5).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom mpi4py import MPI\n\nfrom cplpy import CPL\nfrom draw_grid import draw_grid\n\n#initialise MPI and CPL\ncomm = MPI.COMM_WORLD\nCPL = CPL()\nCFD_COMM = CPL.init(CP... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom mpi4py import MPI\n\nfrom cplpy import CPL\nfrom draw_grid import draw_grid\n\n#initialise MPI and CPL\ncomm = MPI.COMM_WORLD\nCPL = CPL()\nCFD_CO... | ```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n#!/usr/bin/env python\nimport numpy as np\nimport json\nimport rospy\nimport scipy.interpolate\nimport threading\nimport argparse\nfrom std_msgs.msg import Header\nfrom sub8_msgs.msg import Thrust, ThrusterCmd, ThrusterStatus\nfrom sub8_ros_tools import wa... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\nimport numpy as np\nimport json\nimport rospy\nimport scipy.interpolate\nimport threading\nimport argparse\nfrom std_msgs.msg import Header\nfrom sub8_msgs.msg import Thrust, ThrusterCmd, ThrusterStatus\nfrom sub8_ros... | ```python
#!/usr/bin/env python
import numpy as np
import json
import rospy
import scipy.interpolate
import threading
import argparse
from std_msgs.msg import Header
from sub8_msgs.msg import Thrust, ThrusterCmd, ThrusterStatus
from sub8_ros_tools import wait_for_param, thread_lock
from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, FailThrusterResponse
from sub8_thruster_comm import thruster_comm_factory
from sub8_alarm import AlarmBroadcaster
lock = threading.Lock()
class ThrusterDriver(object):
def __init__(self, config_path, bus_layout):
'''Thruster driver, an object for commanding all of the sub's thrusters
- Gather configuration data and make it available to other nodes
- Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters
- Track a thrust_dict, which maps thruster names to the appropriate port
- Given a command message, route that command to the appropriate port/thruster
- Send a thruster status message describing the status of the particular thruster
'''
self.alarm_broadcaster = AlarmBroadcaster()
self.thruster_out_alarm = self.alarm_broadcaster.add_alarm(
name='thruster_out',
action_required=True,
severity=2
)
self.failed_thrusters = []
self.make_fake = rospy.get_param('simulate', False)
if self.make_fake:
rospy.logwarn("Running fake thrusters for simulation, based on parameter '/simulate'")
# Individual thruster configuration data
newtons, thruster_input = self.load_config(config_path)
self.interpolate = scipy.interpolate.interp1d(newtons, thruster_input)
# Bus configuration
self.port_dict = self.load_bus_layout(bus_layout)
thrust_service = rospy.Service('thrusters/thruster_range', ThrusterInfo, self.get_thruster_info)
self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)
self.status_pub = rospy.Publisher('thrusters/thruster_status', ThrusterStatus, queue_size=8)
# This is essentially only for testing
self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster)
def load_config(self, path):
'''Load the configuration data:
- Map force inputs from Newtons to [-1, 1] required by the thruster
'''
try:
_file = file(path)
except IOError, e:
rospy.logerr("Could not find thruster configuration file at {}".format(path))
raise(e)
json_data = json.load(_file)
newtons = json_data['calibration_data']['newtons']
thruster_input = json_data['calibration_data']['thruster_input']
return newtons, thruster_input
def get_thruster_info(self, srv):
'''Get the thruster info for a particular thruster ID
Right now, this is only the min and max thrust data
'''
# Unused right now
query_id = srv.thruster_id
min_thrust = min(self.interpolate.x)
max_thrust = max(self.interpolate.x)
thruster_info = ThrusterInfoResponse(
min_force=min_thrust,
max_force=max_thrust
)
return thruster_info
@thread_lock(lock)
def load_bus_layout(self, layout):
'''Load and handle the thruster bus layout'''
port_dict = {}
for port in layout:
thruster_port = thruster_comm_factory(port, fake=self.make_fake)
# Add the thrusters to the thruster dict
for thruster_name, thruster_info in port['thrusters'].items():
port_dict[thruster_name] = thruster_port
return port_dict
@thread_lock(lock)
def command_thruster(self, name, force):
'''Issue a a force command (in Newtons) to a named thruster
Example names are BLR, FLL, etc
TODO:
Make this still get a thruster status when the thruster is failed
(We could figure out if it has stopped being failed!)
'''
if name in self.failed_thrusters:
return
target_port = self.port_dict[name]
clipped_force = np.clip(force, min(self.interpolate.x), max(self.interpolate.x))
normalized_force = self.interpolate(clipped_force)
# We immediately get thruster_status back
thruster_status = target_port.command_thruster(name, force)
message_contents = [
'rpm',
'bus_voltage',
'bus_current',
'temperature',
'fault',
'response_node_id',
]
message_keyword_args = {key: thruster_status[key] for key in message_contents}
if message_keyword_args['fault'] != 0:
self.alert_thruster_loss(thruster_name, message_keyword_args)
self.status_pub.publish(
ThrusterStatus(
header=Header(stamp=rospy.Time.now()),
name=name,
**message_keyword_args
)
)
def thrust_cb(self, msg):
'''Callback for recieving thrust commands
These messages contain a list of instructions, one for each thruster
'''
for thrust_cmd in msg.thruster_commands:
self.command_thruster(thrust_cmd.name, thrust_cmd.thrust)
def alert_thruster_loss(self, thruster_name, fault_info):
self.thruster_out_alarm.raise_alarm(
problem_description='Thruster {} has failed'.format(thruster_name),
parameters={
'thruster_name': thruster_name,
'fault_info': fault_info
}
)
self.failed_thrusters.append(thruster_name)
def fail_thruster(self, srv):
self.alert_thruster_loss(srv.thruster_name, None)
return FailThrusterResponse()
if __name__ == '__main__':
usage_msg = "Interface to Sub8's VideoRay M5 thrusters"
desc_msg = "Specify a path to the configuration.json file containing the thrust calibration data"
parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg)
parser.add_argument('--configuration_path', dest='config_path',
help='Designate the absolute path of the calibration/configuration json file')
args = parser.parse_args(rospy.myargv()[1:])
config_path = args.config_path
rospy.init_node('videoray_m5_thruster_driver')
layout_parameter = '/busses'
rospy.loginfo("Thruster Driver waiting for parameter, {}".format(layout_parameter))
busses = wait_for_param(layout_parameter)
if busses is None:
raise(rospy.exceptions.ROSException("Failed to find parameter '{}'".format(layout_parameter)))
thruster_driver = ThrusterDriver(config_path, busses)
rospy.spin()
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport discord\nimport asyncio\nimport random\nimport steam\nfrom steam.steamid import SteamId\nfrom steam.steamprofile import SteamProfile\nfrom steam.steamaccountuniverse import SteamAccountUniverse\nfrom steam.steamaccounttype import SteamAc... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport discord\nimport asyncio\nimport random\nimport steam\nfrom steam.steamid import SteamId\nfrom steam.steamprofile import SteamProfile\nfrom steam.steamaccountuniverse import SteamAccountUniverse\nfrom steam.steamaccounttyp... | ```python
import discord
import asyncio
import random
import steam
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from discord.ext import commands
from utils import checks
from mods.cog import Cog
code = "```py\n{0}\n```"
class Verification(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.escape = bot.escape
self.bot.loop.create_task(self.verification_task())
async def remove_verification(self, server, idk=None):
role = discord.utils.get(server.roles, name='Awaiting Approval')
if role:
try:
await self.bot.delete_role(server, role)
except:
pass
sql = 'DELETE FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
sql = 'DELETE FROM `verification_queue` WHERE server={0}'
sql = sql.format(server.id)
self.cursor.execute(sql)
self.cursor.commit()
if idk is None:
try:
await self.bot.send_message(server.owner, ":warning: One of your server administrators (or you) have enabled approval/verification on user join.\n\nAdministrator permission was taken away from me making the feature unusable, I need Administrator permission to make/add a role to mute on join.\n\n`The system has been automatically disabled, re-enable anytime if you please.`")
except:
pass
@commands.group(pass_context=True, aliases=['onjoinverify', 'approval'], invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification(self, ctx, channel:discord.Channel=None, *, mentions:str=None):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if channel is None:
channel = ctx.message.channel
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
if mentions is None:
sql = "INSERT INTO `verification` (`server`, `channel`) VALUES (%s, %s)"
self.cursor.execute(sql, (ctx.message.server.id, channel.id))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification #<discord_channel>` to change)!".format(channel.mention))
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("invalid mention")
return
sql = "INSERT INTO `verification` (`server`, `channel`, `mentions`) VALUES (%s, %s, %s)"
mention_ids = []
mention_names = []
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
self.cursor.execute(sql, (ctx.message.server.id, channel.id, ' '.join(mention_ids)))
self.cursor.commit()
await self.bot.say(":white_check_mark: Enabled user approval/verification on join, all requests will go to {0} (`verification <#discord_channel>` to change) and mention `{0}`!".format(channel.mention, ', '.join(mention_names)))
permissions = discord.Permissions()
permissions.read_messages = True
try:
await self.bot.create_role(ctx.message.server, name='Awaiting Approval', color=discord.Colour(int("FF0000", 16)), permissions=permissions)
except Exception as e:
print(e)
await self.bot.say(":warning: For some reason I couldn't create the \"Awaiting Approval\" role and users won't be muted, please create it (same name) and disable all the permissions you don't want unapproved-users to have.\nMake sure I have the administrator permission!")
elif channel is None:
sql = 'UPDATE `verification` SET channel={0} WHERE server={1}'
sql = sql.format(channel.id, ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":white_check_mark: Set approval/verification channel to {0}".format(channel.mention))
else:
await self.bot.say(':warning: You are about to disable member verification/approval on join, type `yes` to proceed.')
while True:
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author, channel=ctx.message.channel)
if response is None or response.content != 'yes':
await self.bot.say('**Aborting**')
return
else:
break
await self.remove_verification(ctx.message.server, True)
try:
role = discord.utils.get(ctx.message.server.roles, name='Awaiting Approval')
if role != None:
await self.bot.delete_role(ctx.message.server, role)
except discord.errors.Forbidden:
await self.bot.say("could not remove role, you took my perms away :(")
role2 = discord.utils.get(ctx.message.server.roles, name='Approved')
if role2 != None:
try:
await self.bot.delete_role(ctx.message.server, role2)
except:
pass
await self.bot.say(":negative_squared_cross_mark: **Disabled** user approval on join")
@verification.command(name='mention', aliases=['mentions'], pass_context=True, invoke_without_command=True, no_pm=True)
@checks.admin_or_perm(manage_server=True)
async def verification_mention(self, ctx, *mentions:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(ctx.message.mentions) == 0 and '@everyone' not in mentions and '@here' not in mentions:
await self.bot.say(':no_entry: `Invalid mention(s).`')
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
if len(mentions) == 0:
sql = 'UPDATE `verification` SET mentions=NULL WHERE server={0}'
sql = sql.format(ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
await self.bot.say(":negative_squared_cross_mark: Disabled/Removed mentions on user join for approval")
else:
mention_ids = []
mention_names = []
everyone = False
for mention in mentions:
if mention == '@everyone':
mention_ids.append('@everyone')
elif mention == '@here':
mention_ids.append('@here')
for mention in ctx.message.mentions:
mention_ids.append(mention.id)
mention_names.append(mention.name)
sql = 'SELECT mentions FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
mention_results = self.cursor.execute(sql).fetchall()
update = False
if mention_results[0]['mentions'] != None:
update = True
things = mention_results[0]['mentions'].split()
for x in things:
mention_ids.append(x)
sql = "UPDATE `verification` SET mentions={0} WHERE server={1}"
sql = sql.format(self.escape(' '.join(mention_ids)), ctx.message.server.id)
self.cursor.execute(sql)
self.cursor.commit()
if update:
await self.bot.say(":white_check_mark: Updated mentions to include `{0}` on user join for approval".format(', '.join(mention_names)))
else:
await self.bot.say(":white_check_mark: Set `{0}` to be mentioned on user join for approval".format(', '.join(mention_names)))
@commands.group(pass_context=True, invoke_without_command=True, no_pm=True)
@checks.mod_or_perm(manage_server=True)
async def verify(self, ctx, *users:str):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
if len(users) == 0:
await self.bot.say("pls input users to verify thx")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned **on** (`verification <#discord_channel>` to do so)!!!")
return
role = discord.utils.get(ctx.message.server.roles, name="Awaiting Approval")
count = 0
count2 = 0
discord_user = None
for user in users:
if user.isdigit():
user = int(user)
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND id={1}'
sql = sql.format(ctx.message.server.id, user)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count]['user']))
count += 1
else:
if len(ctx.message.mentions) == 0:
await self.bot.say("If you're not gonna use approval id, atleast mention correctly!")
return
for x in ctx.message.mentions:
if count == len(ctx.message.mentions):
break
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(user))
if len(users) > 1:
continue
else:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(ctx.message.server.id, x.id)
self.cursor.execute(sql)
self.cursor.commit()
discord_user = discord.Server.get_member(ctx.message.server, user_id=str(result[count2]['user']))
count2 += 1
if discord_user is None:
continue
try:
await self.bot.remove_roles(discord_user, role)
except Exception as e:
await self.bot.say(code.format(e))
await self.bot.say(":warning: {0} was removed from the queue however his role could not be removed because I do not have Administrator permissions.\nPlease remove the role manually and give me **Administrator**.".format(user))
return
role = discord.utils.get(ctx.message.server.roles, name='Approved')
if role != None:
try:
await self.bot.add_roles(discord_user, role)
except:
pass
await self.bot.say(":white_check_mark: Removed `{0}` from queue!".format(user))
queue_removed_msg = 'You have been approved/verified for `{0}` and can now message!'.format(ctx.message.server.name)
await self.bot.send_message(discord_user, queue_removed_msg)
@verify.command(name='list', pass_context=True, invoke_without_command=True, no_pm=True)
async def verify_list(self, ctx):
perms = ctx.message.server.me.permissions_in(ctx.message.channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.bot.say(":warning: `I need Administrator permission to make/add a role to mute on join`")
return
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: This server does not have approval/verification turned on (`verification <#discord_channel>` to do so)!!!")
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0}'
sql = sql.format(ctx.message.server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: `There are no users in the verification/approval queue`")
return
users = []
for s in result:
user = discord.Server.get_member(ctx.message.server, user_id=str(s['user']))
if user is None:
continue
users.append('{0}#{1} ({2})'.format(user.name, user.discriminator, str(s['id'])))
await self.bot.say("**{0} Users in Queue**\n`{1}`".format(len(users), ', '.join(users)))
# steam_regex = r"^(http|https|)(\:\/\/|)steamcommunity\.com\/id\/(.*)$"
@verify.command(name='check', pass_context=True, aliases=['steam', 'link'])
async def verify_check(self, ctx, stem:str):
try:
if ctx.message.channel.is_private is False:
await self.bot.say(':no_entry: `Private Message only.`')
return
sql = 'SELECT * FROM `verification_queue` WHERE user={0}'
sql = sql.format(ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(':no_entry: You are not in the verification queue for any server.')
return
server_id = result[0]['server']
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server_id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":no_entry: Server you are in queue for disabled verification.")
return
sql = 'SELECT * FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) != 0:
await self.bot.say(":no_entry: You've already verified your steam account!")
return
sql = 'SELECT id,server FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server_id, ctx.message.author.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await self.bot.say(":warning: `{0}` is not in the verification queue.".format(ctx.message.author))
return
verification_id = str(result[0]['id'])
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("`:no_entry: `Bad Steam ID/64/URL`")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
if verification_id in steamProfile.displayName:
sql = 'INSERT INTO `verification_steam` (`user`, `server`, `steam`, `id`) VALUES (%s, %s, %s, %s)'
self.cursor.execute(sql, (ctx.message.author.id, server_id, steamId.profileUrl, verification_id))
self.cursor.commit()
await self.bot.say(':white_check_mark: `{0}` steam profile submitted and passed steam name check, awaiting moderator approval.'.format(ctx.message.author))
else:
await self.bot.say(':warning: **{0}** is not in the steam accounts name.'.format(verification_id))
except Exception as e:
await self.bot.say(code.format(e))
async def verification_task(self):
if self.bot.shard_id != 0:
return
while True:
sql = 'SELECT * FROM `verification_steam`'
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
await asyncio.sleep(60)
continue
for s in result:
server = self.bot.manager.get_server(str(s['server']))
if server:
user = server.get_member(str(s['user']))
if user is None:
continue
sql = 'SELECT channel FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
channel = server.get_channel(str(self.cursor.execute(sql).fetchall()[0]['channel']))
msg = '**Steam Account Check**\n`{0} (Verification ID: {1})` has submitted their steam profile and passed the name check.\n`Steam Profile:` {2}'.format(user, s['id'], s['steam'])
await self.bot.send_message(channel, msg)
sql = 'DELETE FROM `verification_steam` WHERE server={0} AND user={1}'
sql = sql.format(server.id, user.id)
self.cursor.execute(sql)
self.cursor.commit()
await asyncio.sleep(60)
async def on_member_join(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
channel = server.get_channel(str(result[0]['channel']))
if channel is None:
raise discord.errors.NotFound
perms = server.me.permissions_in(channel)
if perms.manage_roles is False or perms.manage_channels is False:
if perms.administrator is False:
await self.remove_verification(server)
return
sql = "INSERT INTO `verification_queue` (`user`, `server`, `id`) VALUES (%s, %s, %s)"
rand = random.randint(0, 99999)
self.cursor.execute(sql, (member.id, server.id, rand))
self.cursor.commit()
role = discord.utils.get(server.roles, name='Awaiting Approval')
await self.bot.add_roles(member, role)
for s in server.channels:
perms = member.permissions_in(s)
if perms.read_messages is False:
continue
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = False
await self.bot.edit_channel_permissions(s, role, overwrite)
msg = ''
if result[0]['mentions']:
for x in result[0]['mentions'].split(' '):
if 'everyone' in x or 'here' in x:
msg += '{0} '.format(x)
else:
msg += '<@{0}> '.format(x)
msg += '\n'
msg += ':warning: `{0}` has joined the server and is awaiting approval\n\nRun `verify {1} or mention` to approve, kick user to remove from the queue.'.format(member, rand)
await self.bot.send_message(channel, msg, replace_everyone=False, replace_mentions=False)
join_msg = "You've been placed in the approval queue for `{0}`, please be patient and wait until a staff member approves your join!\n\nIf you'd like to expedite approval (and have a steam account), place **{1}** in your steam name and then run `.verify check <stean_url/id/vanity>`.".format(server.name, rand)
await self.bot.send_message(member, join_msg)
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_remove(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for leaving the server or being kicked.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
async def on_member_ban(self, member):
try:
if member.bot:
return
server = member.server
sql = 'SELECT * FROM `verification` WHERE server={0}'
sql = sql.format(server.id)
result = self.cursor.execute(sql).fetchall()
if len(result) == 0:
return
sql = 'SELECT * FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
result2 = self.cursor.execute(sql).fetchall()
if len(result2) == 0:
return
sql = 'DELETE FROM `verification_queue` WHERE server={0} AND user={1}'
sql = sql.format(server.id, member.id)
self.cursor.execute(sql)
self.cursor.commit()
channel = self.bot.get_channel(id=str(result[0]['channel']))
await self.bot.send_message(channel, ':exclamation: `{0}` has been removed from the approval/verification queue for being banned from the server.'.format(member))
except (discord.errors.Forbidden, discord.errors.InvalidArgument, discord.errors.NotFound):
await self.remove_verification(server)
def setup(bot):
bot.add_cog(Verification(bot))
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n#\n# This file is part of TracForge Project\n#\n# Copyright (C) 2008 TracForge Project\n#\n# See AUTHORS for more informations\n#\n# This program is free software: you can redistribute it and/or modif... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n#\n# This file is part of TracForge Project\n#\n# Copyright (C) 2008 TracForge Project\n#\n# See AUTHORS for more informations\n#\n# This program is free software: you can redistribute... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of TracForge Project
#
# Copyright (C) 2008 TracForge Project
#
# See AUTHORS for more informations
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from tracforge.config import Config
from tracforge.tests import dirname
here = dirname()
class ConfigTestCase(unittest.TestCase):
def setUp(self):
self.pathFile = here+"/forge.ini"
self.config = Config(self.pathFile)
def test_ReadForgeHref(self):
"""Il lit le fichier et remplit la propriete forge_href"""
self.assertEquals('/forge', self.config.forge_href)
def test_ReadTemplatesDir(self):
"""Il lit l'emplacement des templates"""
self.assertEquals('/home/ter', self.config.template_dir)
def test_ReadTrac(self):
"""Il lit les infos du trac"""
self.assertEquals('/var/trac', self.config.trac_dir)
self.assertEquals('/trac', self.config.trac_href)
def test_ReadMediaHref(self):
"""Il lit l'uri de media"""
self.assertEquals('/media', self.config.media_href)
def test_ReadDemo(self):
"""Il lit les informations de la demo"""
self.assertEquals('/demohref', self.config.demo_href)
self.assertEquals('/demodir', self.config.demo_dir)
def test_ReadArchives(self):
"""Il lit les informations de l'archives """
self.assertEquals('/archivehref', self.config.archives_href)
self.assertEquals('/archivedir', self.config.archives_dir)
def suite():
return unittest.makeSuite(ConfigTestCase, 'test')
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n# Output\n#\n# $ ./triton ./examples/callback_after.py ./samples/crackmes/crackme_xor a\n# 0x40056d: push rbp\n# -> #0 = (bvsub (_ bv140735022953896 64) (_ bv8 64)) ; Aligns stack\n# -> #1 = (_ bv140735022953936 64) \n# ->... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# Output\n#\n# $ ./triton ./examples/callback_after.py ./samples/crackmes/crackme_xor a\n# 0x40056d: push rbp\n# -> #0 = (bvsub (_ bv140735022953896 64) (_ bv8 64)) ; Aligns stack\n# -> #1 = (_ bv140735022953936 64)... | ```python
# Output
#
# $ ./triton ./examples/callback_after.py ./samples/crackmes/crackme_xor a
# 0x40056d: push rbp
# -> #0 = (bvsub (_ bv140735022953896 64) (_ bv8 64)) ; Aligns stack
# -> #1 = (_ bv140735022953936 64)
# -> #2 = (_ bv4195694 64) ; RIP
#
# 0x40056e: mov rbp, rsp
# -> #3 = ((_ extract 63 0) #0)
# -> #4 = (_ bv4195697 64) ; RIP
#
# 0x400571: mov qword ptr [rbp-0x18], rdi
# -> #5 = (_ bv140735022960969 64)
# -> #6 = (_ bv4195701 64) ; RIP
#
# 0x400575: mov dword ptr [rbp-0x4], 0x0
# -> #7 = (_ bv0 32)
# -> #8 = (_ bv4195708 64) ; RIP
#
# 0x40057c: jmp 0x4005bd
# -> #9 = (_ bv4195773 64) ; RIP
#
# 0x4005bd: cmp dword ptr [rbp-0x4], 0x4
# -> #10 = (bvsub #7 ((_ sign_extend 0) (_ bv4 32)))
# -> #11 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #10 (bvxor #7 ((_ sign_extend 0) (_ bv4 32)))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #12 = (ite (bvult #7 ((_ sign_extend 0) (_ bv4 32))) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #13 = (ite (= ((_ extract 31 31) (bvand (bvxor #7 ((_ sign_extend 0) (_ bv4 32))) (bvxor #7 #10))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #14 = (ite (= (parity_flag ((_ extract 7 0) #10)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #15 = (ite (= ((_ extract 31 31) #10) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #16 = (ite (= #10 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #17 = (_ bv4195777 64) ; RIP
#
# 0x40057e: mov eax, dword ptr [rbp-0x4]
# -> #19 = ((_ extract 31 0) #10)
# -> #20 = (_ bv4195713 64) ; RIP
#
# 0x400581: movsxd rdx, eax
# -> #21 = ((_ sign_extend 32) ((_ extract 31 0) #19))
# -> #22 = (_ bv4195716 64) ; RIP
#
# 0x400584: mov rax, qword ptr [rbp-0x18]
# -> #23 = ((_ extract 63 0) #5)
# -> #24 = (_ bv4195720 64) ; RIP
#
# 0x400588: add rax, rdx
# -> #25 = (bvadd ((_ extract 63 0) #23) ((_ extract 63 0) #21))
# -> #26 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #25 (bvxor ((_ extract 63 0) #23) ((_ extract 63 0) #21))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #27 = (ite (bvult #25 ((_ extract 63 0) #23)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #28 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #23) (bvnot ((_ extract 63 0) #21))) (bvxor ((_ extract 63 0) #23) #25))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #29 = (ite (= (parity_flag ((_ extract 7 0) #25)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #30 = (ite (= ((_ extract 63 63) #25) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #31 = (ite (= #25 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #32 = (_ bv4195723 64) ; RIP
#
# 0x40058b: movzx eax, byte ptr [rax]
# -> #33 = ((_ zero_extend 24) (_ bv97 8))
# -> #34 = (_ bv4195726 64) ; RIP
#
# 0x40058e: movsx eax, al
# -> #35 = ((_ sign_extend 24) ((_ extract 7 0) #33))
# -> #36 = (_ bv4195729 64) ; RIP
#
# 0x400591: sub eax, 0x1
# -> #37 = (bvsub ((_ extract 31 0) #35) (_ bv1 32))
# -> #38 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #37 (bvxor ((_ extract 31 0) #35) (_ bv1 32))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #39 = (ite (bvult ((_ extract 31 0) #35) (_ bv1 32)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #40 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #35) (_ bv1 32)) (bvxor ((_ extract 31 0) #35) #37))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #41 = (ite (= (parity_flag ((_ extract 7 0) #37)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #42 = (ite (= ((_ extract 31 31) #37) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #43 = (ite (= #37 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #44 = (_ bv4195732 64) ; RIP
#
# 0x400594: xor eax, 0x55
# -> #45 = (bvxor ((_ extract 31 0) #37) (_ bv85 32))
# -> #46 = (_ bv0 1) ; Clears carry flag
# -> #47 = (_ bv0 1) ; Clears overflow flag
# -> #48 = (ite (= (parity_flag ((_ extract 7 0) #45)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #49 = (ite (= ((_ extract 31 31) #45) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #50 = (ite (= #45 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #51 = (_ bv4195735 64) ; RIP
#
# 0x400597: mov ecx, eax
# -> #52 = ((_ extract 31 0) #45)
# -> #53 = (_ bv4195737 64) ; RIP
#
# 0x400599: mov rdx, qword ptr [rip+0x200aa0]
# -> #54 = (_ bv4196036 64)
# -> #55 = (_ bv4195744 64) ; RIP
#
# 0x4005a0: mov eax, dword ptr [rbp-0x4]
# -> #56 = ((_ extract 31 0) #10)
# -> #57 = (_ bv4195747 64) ; RIP
#
# 0x4005a3: cdqe
# -> #58 = ((_ sign_extend 32) ((_ extract 31 0) #56))
# -> #59 = (_ bv4195749 64) ; RIP
#
# 0x4005a5: add rax, rdx
# -> #60 = (bvadd ((_ extract 63 0) #58) ((_ extract 63 0) #54))
# -> #61 = (ite (= (_ bv16 64) (bvand (_ bv16 64) (bvxor #60 (bvxor ((_ extract 63 0) #58) ((_ extract 63 0) #54))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #62 = (ite (bvult #60 ((_ extract 63 0) #58)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #63 = (ite (= ((_ extract 63 63) (bvand (bvxor ((_ extract 63 0) #58) (bvnot ((_ extract 63 0) #54))) (bvxor ((_ extract 63 0) #58) #60))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #64 = (ite (= (parity_flag ((_ extract 7 0) #60)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #65 = (ite (= ((_ extract 63 63) #60) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #66 = (ite (= #60 (_ bv0 64)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #67 = (_ bv4195752 64) ; RIP
#
# 0x4005a8: movzx eax, byte ptr [rax]
# -> #68 = ((_ zero_extend 24) (_ bv49 8))
# -> #69 = (_ bv4195755 64) ; RIP
#
# 0x4005ab: movsx eax, al
# -> #70 = ((_ sign_extend 24) ((_ extract 7 0) #68))
# -> #71 = (_ bv4195758 64) ; RIP
#
# 0x4005ae: cmp ecx, eax
# -> #72 = (bvsub ((_ extract 31 0) #52) ((_ extract 31 0) #70))
# -> #73 = (ite (= (_ bv16 32) (bvand (_ bv16 32) (bvxor #72 (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70))))) (_ bv1 1) (_ bv0 1)) ; Adjust flag
# -> #74 = (ite (bvult ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (_ bv1 1) (_ bv0 1)) ; Carry flag
# -> #75 = (ite (= ((_ extract 31 31) (bvand (bvxor ((_ extract 31 0) #52) ((_ extract 31 0) #70)) (bvxor ((_ extract 31 0) #52) #72))) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Overflow flag
# -> #76 = (ite (= (parity_flag ((_ extract 7 0) #72)) (_ bv0 1)) (_ bv1 1) (_ bv0 1)) ; Parity flag
# -> #77 = (ite (= ((_ extract 31 31) #72) (_ bv1 1)) (_ bv1 1) (_ bv0 1)) ; Sign flag
# -> #78 = (ite (= #72 (_ bv0 32)) (_ bv1 1) (_ bv0 1)) ; Zero flag
# -> #79 = (_ bv4195760 64) ; RIP
#
# 0x4005b0: jz 0x4005b9
# -> #80 = (ite (= #78 (_ bv1 1)) (_ bv4195769 64) (_ bv4195762 64)) ; RIP
#
# 0x4005b2: mov eax, 0x1
# -> #81 = (_ bv1 32)
# -> #82 = (_ bv4195767 64) ; RIP
#
# 0x4005b7: jmp 0x4005c8
# -> #83 = (_ bv4195784 64) ; RIP
#
# 0x4005c8: pop rbp
# -> #84 = #1
# -> #85 = (bvadd #0 (_ bv8 64)) ; Aligns stack
# -> #86 = (_ bv4195785 64) ; RIP
#
# loose
# $
from triton import *
# A callback must be a function with one argument. This argument is
# always the Instruction class and contains all information
def my_callback_after(instruction):
print '%#x: %s' %(instruction.address, instruction.assembly)
for se in instruction.symbolicExpressions:
print '\t -> #%d = %s %s' %(se.getId(), se.getAst(), (('; ' + se.getComment()) if se.getComment() is not None else ''))
print
if __name__ == '__main__':
# Start the symbolic analysis from the 'check' function
startAnalysisFromSymbol('check')
# Add a callback.
# BEFORE: Add the callback before the instruction processing
# AFTER: Add the callback after the instruction processing
# FINI: Add the callback at the end of the execution
addCallback(my_callback_after, IDREF.CALLBACK.AFTER)
# Run the instrumentation - Never returns
runProgram()
``` |
[
{
"content": "Here is the code block:\n```python\nimport xbmcaddon\nimport thread, threading\nimport urllib, urllib2\nimport datetime, time\nimport xbmc\nimport logging\nfrom bs4 import BeautifulSoup\nimport requests\nimport html5lib\n\nclass Item:\n\tdef __init__(self, link, title, img):\n\t\tself.link = link\... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nimport xbmcaddon\nimport thread, threading\nimport urllib, urllib2\nimport datetime, time\nimport xbmc\nimport logging\nfrom bs4 import BeautifulSoup\nimport requests\nimport html5lib\n\nclass Item:\n\tdef __init__(self, link, title, img):\n\t\ts... | ```python
import xbmcaddon
import thread, threading
import urllib, urllib2
import datetime, time
import xbmc
import logging
from bs4 import BeautifulSoup
import requests
import html5lib
class Item:
def __init__(self, link, title, img):
self.link = link
self.title = title
self.img = img
def getListVideos(url):
r = requests.get(url+"videos")
html = r.text
#xbmc.log(html.encode("utf-8"))
soup = BeautifulSoup(html)
list_a = soup.findAll('a')
list_links=[]
for a in list_a:
a_href = a.get("href")
a_title = a.get("title")
if (a_href!=None) and (a_href.startswith("/watch?v=")) and (a_title!=None):
a_img = "https://i.ytimg.com/vi/"+a_href[9:]+"/mqdefault.jpg"
list_links.append(Item("https://www.youtube.com" + a_href, a_title, a_img))
return list_links
def getLinkFromKeepVid(link):
r = requests.get("http://keepvid.com/" + '?' + urllib.urlencode({"url":link}))
html = r.text
soup = BeautifulSoup(html, "html5lib")
list_a = soup.findAll('a', attrs = {"class":"l"})
#xbmc.log(list_a.text)
links=[]
for a in list_a:
links.append(a.get("href"))
return links
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"\nDjango settings for aplicaciones_informaticas project.\n\nGenerated by 'django-admin startproject' using Django 1.10.3.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\nDjango settings for aplicaciones_informaticas project.\n\nGenerated by 'django-admin startproject' using Django 1.10.3.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full ... | ```python
"""
Django settings for aplicaciones_informaticas project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r@@6sgb_j9v#=x!u3!j%1jvfs6c02)#948k^sffb-)0i1by4zx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.5.20', 'ariwainer.com.ar', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'geoposition',
'backend',
'rest_framework',
'corsheaders'
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny',),
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aplicaciones_informaticas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aplicaciones_informaticas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import geo_settings
GEOPOSITION_GOOGLE_MAPS_API_KEY = geo_settings.api_key
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = 'localhost:3000'
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n\"\"\"\nORM definitions for mapping tracks data stored in PostgreSQL database\n\"\"\"\n\nfrom sqlalchemy import create_engine, Column, Integer, Float, String\nfrom sqlalchemy.dialects.postgresql import INTERVAL, TIMESTAMP\... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n\"\"\"\nORM definitions for mapping tracks data stored in PostgreSQL database\n\"\"\"\n\nfrom sqlalchemy import create_engine, Column, Integer, Float, String\nfrom sqlalchemy.dialects.postgresql import INTE... | ```python
"""
ORM definitions for mapping tracks data stored in PostgreSQL database
"""
from sqlalchemy import create_engine, Column, Integer, Float, String
from sqlalchemy.dialects.postgresql import INTERVAL, TIMESTAMP
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
from geoalchemy2 import Geometry
from geoalchemy2.functions import ST_AsGeoJSON as st_asgeojson
from settings import PG_DB_CONF
import json
import logging
logger = logging.getLogger(__name__)
engine = create_engine(URL(**PG_DB_CONF))
Base = declarative_base(bind=engine)
Session = scoped_session(sessionmaker(engine))
class Track(Base):
__tablename__ = 'tracks'
ogc_fid = Column(Integer, primary_key=True)
gpx_id = Column(Integer)
name = Column(String)
cmt = Column(String)
desc = Column(String)
src = Column(String)
number = Column(Integer)
wkb_geometry = Column(Geometry(geometry_type='MULTILINESTRING', srid=4326))
class TrackInfo(Base):
__tablename__ = 'trackinfo'
ogc_fid = Column(Integer, primary_key=True)
gpx_id = Column(Integer)
segments = Column(Integer)
length_2d = Column(Float)
length_3d = Column(Float)
moving_time = Column(INTERVAL)
stopped_time = Column(INTERVAL)
max_speed = Column(Float)
uphill = Column(Float)
downhill = Column(Float)
started = Column(TIMESTAMP)
ended = Column(TIMESTAMP)
points = Column(Integer)
start_lon = Column(Float)
start_lat = Column(Float)
end_lon = Column(Float)
end_lat = Column(Float)
#start_geom = Column(Geometry(geometry_type='POINT', srid=4326))
#end_geom = Column(Geometry(geometry_type='POINT', srid=4326))
def track_serializer(instance):
track_dict = {}
track_dict['ID'] = instance.ogc_fid
track_dict['GPX ID'] = instance.gpx_id
track_dict['Name'] = instance.name
track_dict['CMT'] = instance.cmt
track_dict['Description'] = instance.desc
track_dict['Source'] = instance.src
track_dict['Number'] = instance.number
track_dict['GeoJSON'] = json.loads(
Session.scalar(st_asgeojson(instance.wkb_geometry)))
logger.debug("Serialized track: %s", track_dict)
return track_dict
def trackinfo_serializer(instance):
ti_dict = {}
ti_dict['ID'] = instance.ogc_fid
ti_dict['GPX ID'] = instance.gpx_id
ti_dict['Segments'] = instance.segments
ti_dict['2D length'] = instance.length_2d
ti_dict['3D length'] = instance.length_3d
ti_dict['Moving time'] = str(instance.moving_time)
ti_dict['Stopped time'] = str(instance.stopped_time)
ti_dict['Max speed'] = instance.max_speed
ti_dict['Uphill'] = instance.uphill
ti_dict['Downhill'] = instance.downhill
ti_dict['Started at'] = str(instance.started)
ti_dict['Ended at'] = str(instance.ended)
ti_dict['Points'] = instance.points
ti_dict['Start lon'] = instance.start_lon
ti_dict['Start lat'] = instance.start_lat
ti_dict['End lon'] = instance.end_lon
ti_dict['End lat'] = instance.end_lat
#ti_dict['Start point GeoJSON'] = json.loads(
#Session.scalar(st_asgeojson(instance.start_geom)))
#ti_dict['End point GeoJSON'] = json.loads(
#Session.scalar(st_asgeojson(instance.end_geom)))
logger.debug("Serialized trackinfo: %s", ti_dict)
return ti_dict
``` |
[
{
"content": "Here is the source code:\n```python\n# Copyright 2009, Peter A. Bigot\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain a\n# copy of the License at:\n#\n# http://www.apache.org/licen... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n# Copyright 2009, Peter A. Bigot\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain a\n# copy of the License at:\n#\n# http://www.... | ```python
# Copyright 2009, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes related to XMLSchema facets.
The definitions herein are from sections U{4.2<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>}
and U{4.3<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>} of
U{XML Schema Part 2: Datatypes<http://www.w3.org/TR/xmlschema-2/>}.
Facets are attributes of a datatype that constrain its lexical and
value spaces.
"""
from xml.dom import Node
import pyxb_114
import types
import datatypes
import basis
from pyxb_114.utils import utility
from pyxb_114.utils import domutils
import re
class Facet (pyxb_114.cscRoot):
"""The base class for facets.
This provides association with STDs, a name, and a value for the facet.
"""
_Name = None
@classmethod
def Name (self):
"""The name of a facet is a class constant."""
return self._Name
__baseTypeDefinition = None
def baseTypeDefinition (self):
"""The SimpleTypeDefinition component restricted by this facet.
Note: this is NOT the STD to which the facet belongs, but is
usually that STD's base type. I.e., this jumps us through all
the containing restrictions and extensions to get to the core
type definition."""
return self.__baseTypeDefinition
__ownerTypeDefinition = None
def ownerTypeDefinition (self):
"""The SimpleTypeDefinition component to which this facet belongs.
I.e., the one in which the hasFacet specification was found.
This value is None if the facet is not associated with an
STD."""
return self.__ownerTypeDefinition
# The default valueDatatype to use for instances of this class.
# This is overridden in subclasses that do not use late value
# datatype bindings.
_ValueDatatype = None
# The datatype used for facet values.
__valueDatatype = None
def valueDatatype (self):
"""Get the datatype used to represent values of the facet.
This usually has nothing to do with the owner datatype; for
example, the length facet may apply to any STD but the value
of the facet is an integer. In generated bindings this is
usually set explicitly in the facet constructor; when
processing a schema, it is derived from the value's type
definition.
"""
if self.__valueDatatype is None:
assert self.baseTypeDefinition() is not None
return self.baseTypeDefinition().pythonSupport()
return self.__valueDatatype
__value = None
def _value (self, v): self.__value = v
def value (self): return self.__value
__annotation = None
def annotation (self): return self.__annotation
def __init__ (self, **kw):
"""Create a facet instance, initializing it from the keyword parameters."""
super(Facet, self).__init__(**kw)
# Can't create base class instances
assert Facet != self.__class__
self.setFromKeywords(_reset=True, _constructor=True, **kw)
def _setFromKeywords_vb (self, **kw):
"""Configure values of the facet from a set of keywords.
This method is pre-extended; subclasses should invoke the
parent method after setting their local configuration.
@keyword _reset: If C{False} or missing, existing values will
be retained if they do not appear in the
keywords. If C{True}, members not defined in
the keywords are set to a default.
@keyword base_type_definition:
@keyword owner_type_definition:
@keyword value_datatype:
"""
if not kw.get('_reset', False):
kw.setdefault('base_type_definition', self.__baseTypeDefinition)
kw.setdefault('owner_type_definition', self.__ownerTypeDefinition)
kw.setdefault('value_datatype', self.__valueDatatype)
self.__baseTypeDefinition = kw.get('base_type_definition', None)
self.__ownerTypeDefinition = kw.get('owner_type_definition', None)
self.__valueDatatype = kw.get('value_datatype', self._ValueDatatype)
# Verify that there's enough information that we should be
# able to identify a PST suitable for representing facet
# values.
assert (self.__valueDatatype is not None) or (self.__baseTypeDefinition is not None)
super_fn = getattr(super(Facet, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
def setFromKeywords (self, **kw):
"""Public entrypoint to the _setFromKeywords_vb call hierarchy."""
return self._setFromKeywords_vb(**kw)
@classmethod
def ClassForFacet (cls, name):
"""Given the name of a facet, return the Facet subclass that represents it."""
assert cls != Facet
if 0 <= name.find(':'):
name = name.split(':', 1)[1]
facet_class = globals().get('%s_%s' % (cls._FacetPrefix, name), None)
if facet_class is None:
raise pyxb_114.LogicError('Unrecognized facet name %s: expect %s' % (name, ','.join([_f._Name for _f in cls.Facets])))
assert facet_class is not None
return facet_class
def _valueString (self):
if isinstance(self, _CollectionFacet_mixin):
return ','.join([ str(_i) for _i in self.items() ])
if (self.valueDatatype() is not None) and (self.value() is not None):
try:
return self.valueDatatype().XsdLiteral(self.value())
except Exception, e:
print 'Stringize facet %s produced %s' % (self.Name(), e)
raise
return str(self.value())
def __str__ (self):
rv = []
rv.append('%s="%s"' % (self.Name(), self._valueString()))
if isinstance(self, _Fixed_mixin) and self.fixed():
rv.append('[fixed]')
return ''.join(rv)
class ConstrainingFacet (Facet):
"""One of the facets defined in section 4.3, which provide
constraints on the lexical space of a type definition."""
# The prefix used for Python classes used for a constraining
# facet. Note that this is not the prefix used when generating a
# Python class member that specifies a constraining instance, even
# if it happens to be the same digraph.
_FacetPrefix = 'CF'
def __init__ (self, **kw):
super(ConstrainingFacet, self).__init__(**kw)
def _validateConstraint_vx (self, value):
raise pyxb_114.LogicError("Facet %s does not implement constraints" % (self.Name(),))
def validateConstraint (self, value):
"""Return True iff the given value satisfies the constraint represented by this facet instance.
The actual test is delegated to the subclasses."""
return self._validateConstraint_vx(value)
def __setFromKeywords(self, **kw):
kwv = kw.get('value', None)
if kwv is not None:
if not isinstance(kwv, self.valueDatatype()):
kwv = self.valueDatatype()(kwv)
self._value(kwv)
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
Additional keywords:
* value
"""
# NB: This uses post-extension because it makes reference to the value_data_type
super_fn = getattr(super(ConstrainingFacet, self), '_setFromKeywords_vb', lambda *a,**kw: self)
rv = super_fn(**kw)
self.__setFromKeywords(**kw)
return rv
class _LateDatatype_mixin (pyxb_114.cscRoot):
"""Marker class to indicate that the facet instance must be told
its datatype when it is constructed.
This is necessary for facets like L{CF_minInclusive} and
L{CF_minExclusive}, for which the value is determined by the base
type definition of the associated STD. In some cases the value
that must be used in the facet cannot be represented in the Python
type used for the facet; see L{LateDatatypeBindsSuperclass}.
"""
_LateDatatypeBindsSuperclass = None
"""The class variable that indicates that the Subclasses must
override this variable with a value of C{True} or C{False}. The
value is C{True} iff the value used for the facet is not within
the value space of the corresponding value datatype; for example,
L{CF_minExclusive}."""
@classmethod
def LateDatatypeBindsSuperclass (cls):
"""Return true if false if the proposed datatype should be
used, or True if the base type definition of the proposed
datatype should be used."""
if cls._LateDatatypeBindsSuperclass is None:
raise pyxb_114.LogicError('Class %s did not set _LateDatatypeBindsSuperclass variable.')
return cls._LateDatatypeBindsSuperclass
@classmethod
def BindingValueDatatype (cls, value_type):
"""Find the datatype for facet values when this facet is bound
to the given value_type.
If the C{value_type} is an STD, the associated Python support
datatype from this value_type scanning up through the base
type hierarchy is used.
"""
import pyxb_114.xmlschema.structures as structures
if isinstance(value_type, structures.SimpleTypeDefinition):
# Back up until we find something that actually has a
# datatype
while not value_type.hasPythonSupport():
value_type = value_type.baseTypeDefinition()
value_type = value_type.pythonSupport()
assert issubclass(value_type, basis.simpleTypeDefinition)
if cls.LateDatatypeBindsSuperclass():
value_type = value_type.XsdSuperType()
return value_type
def bindValueDatatype (self, value_datatype):
self.setFromKeywords(_constructor=True, value_datatype=self.BindingValueDatatype(value_datatype))
class _Fixed_mixin (pyxb_114.cscRoot):
"""Mix-in to a constraining facet that adds support for the 'fixed' property."""
__fixed = None
def fixed (self): return self.__fixed
def __setFromKeywords (self, **kw):
if kw.get('_reset', False):
self.__fixed = None
kwv = kw.get('fixed', None)
if kwv is not None:
self.__fixed = datatypes.boolean(kwv)
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
Additional keywords:
* fixed
"""
self.__setFromKeywords(**kw)
super_fn = getattr(super(_Fixed_mixin, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
class _CollectionFacet_mixin (pyxb_114.cscRoot):
"""Mix-in to handle facets whose values are collections, not scalars.
For example, the enumeration and pattern facets maintain a list of
enumeration values and patterns, respectively, as their value
space.
Subclasses must define a class variable _CollectionFacet_itemType
which is a reference to a class that is used to construct members
of the collection.
"""
__items = None
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
@keyword _constructor: If C{False} or absent, the object being
set is a member of the collection. If
C{True}, the object being set is the
collection itself.
"""
if kw.get('_reset', False):
self.__items = []
if not kw.get('_constructor', False):
#print self._CollectionFacet_itemType
self.__items.append(self._CollectionFacet_itemType(facet_instance=self, **kw))
super_fn = getattr(super(_CollectionFacet_mixin, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
def _items (self):
"""The members of the collection, as a reference."""
return self.__items
def items (self):
"""The members of the collection, as a copy."""
return self.__items[:]
def iteritems (self):
"""The members of the collection as an iterator"""
return iter(self.__items)
class CF_length (ConstrainingFacet, _Fixed_mixin):
"""A facet that specifies the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-length}
"""
_Name = 'length'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length == self.value())
class CF_minLength (ConstrainingFacet, _Fixed_mixin):
"""A facet that constrains the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minLength}
"""
_Name = 'minLength'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length >= self.value())
class CF_maxLength (ConstrainingFacet, _Fixed_mixin):
"""A facet that constrains the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minLength}
"""
_Name = 'maxLength'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length <= self.value())
import pyxb_114.utils.xmlre
class _PatternElement (utility.PrivateTransient_mixin):
"""This class represents individual patterns that appear within a CF_pattern collection."""
# The compiled regular expression is marked transient because we
# normally do development with Python 2.5, and consequently save
# the pickled namespace archives that go into the distribution
# with that version. Compiled regular expressions in Python 2.5
# include a reference to the re._compile method, which does not
# exist in Python 2.4. As a result, attempts to load a namespace
# which includes types with pattern restrictions fail.
__PrivateTransient = set()
__compiledExpression = None
__PrivateTransient.add('compiledExpression')
__pythonExpression = None
pattern = None
annotation = None
def __init__ (self, pattern=None, value=None, annotation=None, **kw):
if pattern is None:
assert value is not None
pattern = value
assert isinstance(pattern, types.StringTypes)
self.pattern = pattern
self.annotation = annotation
self.__pythonExpression = pyxb_114.utils.xmlre.XMLToPython(pattern)
#print 'Translated pattern %s to %s' % (pattern.encode('ascii', 'xmlcharrefreplace'),
# self.__pythonExpression.encode('ascii', 'xmlcharrefreplace'))
def __str__ (self): return self.pattern
def matches (self, text):
if self.__compiledExpression is None:
self.__compiledExpression = re.compile(self.__pythonExpression)
return self.__compiledExpression.match(text)
class CF_pattern (ConstrainingFacet, _CollectionFacet_mixin):
"""A facet that constrains the lexical representation of a value
to match one of a set of patterns.
See U{http://www.w3.org/TR/xmlschema-2/#rf-pattern}
@note: In PyXB, pattern constraints are ignored for any type with
a Python representation that does not derive from C{basestring}.
This is due to the difficulty in reconstructing the lexical
representation of a non-string type after it has been converted to
its value space.
@todo: On creating new instances of non-string simple types from
string representations, we could apply pattern constraints. That
would mean checking them prior to invoking the Factory method.
"""
_Name = 'pattern'
_CollectionFacet_itemType = _PatternElement
_ValueDatatype = datatypes.string
__patternElements = None
def patternElements (self): return self.__patternElements
def __init__ (self, **kw):
super(CF_pattern, self).__init__(**kw)
self.__patternElements = []
def addPattern (self, **kw):
pattern = self._CollectionFacet_itemType(**kw)
self.__patternElements.append(pattern)
return pattern
def _validateConstraint_vx (self, value):
# If validation is inhibited, or if the facet hasn't had any
# restrictions applied yet, return True.
if 0 == len(self.__patternElements):
return True
if not isinstance(value, basestring):
return True
for pe in self.__patternElements:
if pe.matches(value):
return True
return False
class _EnumerationElement (object):
"""This class represents individual values that appear within a
L{CF_enumeration} collection."""
__value = None
def value (self):
"""The Python value that is used for equality testing
against this enumeration.
This is an instance of L{enumeration.valueDatatype()<CF_enumeration.valueDatatype>},
initialized from the unicodeValue."""
return self.__value
__tag = None
def tag (self):
"""The Python identifier used for the named constant representing
the enumeration value.
This should include any desired prefix, since it must be
unique within its binding class. If C{None}, no enumeration
constant will be generated."""
return self.__tag
def _setTag (self, tag):
"""Set the tag to be used for this enumeration."""
self.__tag = tag
__enumeration = None
def enumeration (self):
"""A reference to the L{CF_enumeration} instance that owns this element."""
return self.__enumeration
__unicodeValue = None
def unicodeValue (self):
"""The unicode string that defines the enumeration value."""
return self.__unicodeValue
def __init__ (self, enumeration=None, unicode_value=None,
description=None, annotation=None, tag=None,
**kw):
# The preferred keyword is "unicode_value", but when being
# generically applied by
# structures.SimpleTypeDefinition.__updateFacets, the unicode
# value comes in through the keyword "value". Similarly for
# "enumeration" and "facet_instance".
if unicode_value is None:
unicode_value = kw['value']
if enumeration is None:
enumeration = kw['facet_instance']
self.__unicodeValue = unicode_value
self.__enumeration = enumeration
self.__description = description
self.__annotation = annotation
self.__tag = tag
assert self.__enumeration is not None
value_datatype = self.enumeration().valueDatatype()
self.__value = value_datatype.Factory(self.unicodeValue(), _validate_constraints=False, _from_xml=True)
if (self.__description is None) and (self.__annotation is not None):
self.__description = str(self.__annotation)
def __str__ (self):
return utility.QuotedEscaped(self.unicodeValue())
class CF_enumeration (ConstrainingFacet, _CollectionFacet_mixin, _LateDatatype_mixin):
"""Capture a constraint that restricts valid values to a fixed set.
A STD that has an enumeration restriction should mix-in
L{pyxb_114.binding.basis.enumeration_mixin}, and should have a class
variable titled C{_CF_enumeration} that is an instance of this
class.
"unicode" refers to the Unicode string by which the value is
represented in XML.
"tag" refers to the Python member reference associated with the
enumeration. The value is derived from the unicode value of the
enumeration element and an optional prefix that identifies the
owning simple type when the tag is promoted to module-level
visibility.
"value" refers to the Python value held in the tag
See U{http://www.w3.org/TR/xmlschema-2/#rf-enumeration}
"""
_Name = 'enumeration'
_CollectionFacet_itemType = _EnumerationElement
_LateDatatypeBindsSuperclass = False
__tagToElement = None
__valueToElement = None
__unicodeToElement = None
# The prefix to be used when making enumeration tags visible at
# the module level. If None, tags are not made visible.
__enumPrefix = None
def __init__ (self, **kw):
super(CF_enumeration, self).__init__(**kw)
self.__enumPrefix = kw.get('enum_prefix', self.__enumPrefix)
self.__tagToElement = { }
self.__valueToElement = { }
self.__unicodeToElement = { }
def enumPrefix (self):
return self.__enumPrefix
def elements (self):
"""@deprecated: Use L{items} or L{iteritems} instead."""
return self.items()
def values (self):
"""Return a list of enumeration values."""
return [ _ee.value() for _ee in self.iteritems() ]
def itervalues (self):
"""Generate the enumeration values."""
for ee in self.iteritems():
yield ee.value()
def addEnumeration (self, **kw):
kw['enumeration'] = self
ee = _EnumerationElement(**kw)
if ee.tag in self.__tagToElement:
raise pyxb_114.IncompleteImplementationError('Duplicate enumeration tags')
self.__tagToElement[ee.tag()] = ee
self.__unicodeToElement[ee.unicodeValue()] = ee
value = ee.value()
# Not just issubclass(self.valueDatatype(), basis.STD_list);
# this may be a union with one of those as a member type.
if isinstance(value, list):
value = ' '.join([ _v.xsdLiteral() for _v in value ])
self.__valueToElement[value] = ee
self._items().append(ee)
return value
def elementForValue (self, value):
"""Return the L{_EnumerationElement} instance that has the given value.
@raise KeyError: the value is not valid for the enumeration."""
return self.__valueToElement[value]
def valueForUnicode (self, ustr):
"""Return the enumeration value corresponding to the given unicode string.
If ustr is not a valid option for this enumeration, return None."""
rv = self.__unicodeToElement.get(ustr, None)
if rv is not None:
rv = rv.value()
return rv
def _validateConstraint_vx (self, value):
# If validation is inhibited, or if the facet hasn't had any
# restrictions applied yet, return True.
if 0 == len(self._items()):
return True
for ee in self.iteritems():
if ee.value() == value:
return True
return False
class _Enumeration_mixin (pyxb_114.cscRoot):
"""Marker class to indicate that the generated binding has enumeration members."""
@classmethod
def valueForUnicode (cls, ustr):
return cls._CF_enumeration.valueForUnicode(ustr)
class _WhiteSpace_enum (datatypes.NMTOKEN, _Enumeration_mixin):
"""The enumeration used to constrain the whiteSpace facet"""
pass
_WhiteSpace_enum._CF_enumeration = CF_enumeration(value_datatype=_WhiteSpace_enum)
_WhiteSpace_enum.preserve = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=u'preserve', tag='preserve')
_WhiteSpace_enum.replace = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=u'replace', tag='replace')
_WhiteSpace_enum.collapse = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=u'collapse', tag='collapse')
# NOTE: For correctness we really need to initialize the facet map for
# WhiteSpace_enum, even though at the moment it isn't necessary. We
# can't right now, because its parent datatypes.NMTOKEN hasn't been
# initialized yet
_WhiteSpace_enum._InitializeFacetMap(_WhiteSpace_enum._CF_enumeration)
class CF_whiteSpace (ConstrainingFacet, _Fixed_mixin):
"""Specify the value-space interpretation of whitespace.
See U{http://www.w3.org/TR/xmlschema-2/#rf-whiteSpace}
"""
_Name = 'whiteSpace'
_ValueDatatype = _WhiteSpace_enum
__TabCRLF_re = re.compile("[\t\n\r]")
__MultiSpace_re = re.compile(" +")
def normalizeString (self, value):
"""Normalize the given string in accordance with the configured whitespace interpretation."""
if self.value() is None:
return value
if self.value() == _WhiteSpace_enum.preserve:
return utility.NormalizeWhitespace(value, preserve=True)
if self.value() == _WhiteSpace_enum.replace:
return utility.NormalizeWhitespace(value, replace=True)
assert self.value() == _WhiteSpace_enum.collapse, 'Unexpected value "%s" for whiteSpace facet' % (self.value(),)
return utility.NormalizeWhitespace(value, collapse=True)
def _validateConstraint_vx (self, value):
"""No validation rules for whitespace facet."""
return True
class CF_minInclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the minimum legal value for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minInclusive}
"""
_Name = 'minInclusive'
_LateDatatypeBindsSuperclass = False
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() <= value)
class CF_maxInclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the maximum legal value for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-maxInclusive}
"""
_Name = 'maxInclusive'
_LateDatatypeBindsSuperclass = False
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() >= value)
class CF_minExclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the exclusive lower bound of legal values for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minExclusive}
"""
_Name = 'minExclusive'
_LateDatatypeBindsSuperclass = True
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() < value)
class CF_maxExclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the exclusive upper bound of legal values for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-maxExclusive}
"""
_Name = 'maxExclusive'
_LateDatatypeBindsSuperclass = True
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() > value)
class CF_totalDigits (ConstrainingFacet, _Fixed_mixin):
"""Specify the number of digits in the *value* space of the type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-totalDigits}
"""
_Name = 'totalDigits'
_ValueDatatype = datatypes.positiveInteger
def _validateConstraint_vx (self, value):
if self.value() is None:
return True
n = 0
scale = 1
match = False
v = None
while (n <= self.value()) and (not match):
v = long(value * scale)
match = ((value * scale) == v)
if self.value() == n:
break
n += 1
scale *= 10
while n < self.value():
n += 1
scale *= 10
return match and (v is not None) and (abs(v) < scale)
class CF_fractionDigits (ConstrainingFacet, _Fixed_mixin):
"""Specify the number of sub-unit digits in the *value* space of the type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-fractionDigits}
"""
_Name = 'fractionDigits'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
if self.value() is None:
return True
n = 0
scale = 1
while n <= self.value():
if ((value * scale) == long(value * scale)):
return True
n += 1
scale *= 10
return False
class FundamentalFacet (Facet):
"""A fundamental facet provides information on the value space of the associated type."""
_FacetPrefix = 'FF'
@classmethod
def CreateFromDOM (cls, node, owner_type_definition, base_type_definition=None):
facet_class = cls.ClassForFacet(node.getAttribute('name'))
rv = facet_class(base_type_definition=base_type_definition,
owner_type_definition=owner_type_definition)
rv.updateFromDOM(node)
def updateFromDOM (self, node):
if not node.hasAttribute('name'):
raise pyxb_114.SchemaValidationError('No name attribute in facet')
assert node.getAttribute('name') == self.Name()
self._updateFromDOM(node)
def _updateFromDOM (self, node):
try:
super(FundamentalFacet, self)._updateFromDOM(node)
except AttributeError, e:
pass
if (self.valueDatatype() is not None) and node.hasAttribute('value'):
self._value(self.valueDatatype()(node.getAttribute('value')))
# @todo
self.__annotation = None
return self
class FF_equal (FundamentalFacet):
"""Specifies that the associated type supports a notion of equality.
See U{http://www.w3.org/TR/xmlschema-2/#equal}
"""
_Name = 'equal'
class FF_ordered (FundamentalFacet):
"""Specifies that the associated type supports a notion of order.
See U{http://www.w3.org/TR/xmlschema-2/#rf-ordered}
"""
_LegalValues = ( 'false', 'partial', 'total' )
_Name = 'ordered'
_ValueDatatype = datatypes.string
def __init__ (self, **kw):
# @todo: correct value type definition
super(FF_ordered, self).__init__(**kw)
class FF_bounded (FundamentalFacet):
"""Specifies that the associated type supports a notion of bounds.
See U{http://www.w3.org/TR/xmlschema-2/#rf-bounded}
"""
_Name = 'bounded'
_ValueDatatype = datatypes.boolean
class FF_cardinality (FundamentalFacet):
"""Specifies that the associated type supports a notion of length.
See U{http://www.w3.org/TR/xmlschema-2/#rf-cardinality}
"""
_LegalValues = ( 'finite', 'countably infinite' )
_Name = 'cardinality'
_ValueDatatype = datatypes.string
def __init__ (self, **kw):
# @todo correct value type definition
super(FF_cardinality, self).__init__(value_datatype=datatypes.string, **kw)
class FF_numeric (FundamentalFacet):
"""Specifies that the associated type represents a number.
See U{http://www.w3.org/TR/xmlschema-2/#rf-numeric}
"""
_Name = 'numeric'
_ValueDatatype = datatypes.boolean
# The fixed set of expected facets
ConstrainingFacet.Facets = [
CF_length, CF_minLength, CF_maxLength, CF_pattern, CF_enumeration,
CF_whiteSpace, CF_minInclusive, CF_maxInclusive, CF_minExclusive,
CF_maxExclusive, CF_totalDigits, CF_fractionDigits ]
FundamentalFacet.Facets = [
FF_equal, FF_ordered, FF_bounded, FF_cardinality, FF_numeric ]
Facet.Facets = []
Facet.Facets.extend(ConstrainingFacet.Facets)
Facet.Facets.extend(FundamentalFacet.Facets)
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n#################################... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n#################... | ```python
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
from .server import custom_sudo as sudo
import logging
_logger = logging.getLogger(__name__)
class server_docker_image(models.Model):
""""""
_name = 'infrastructure.server_docker_image'
_description = 'Server Docker Image'
_rec_name = 'docker_image_id'
docker_image_id = fields.Many2one(
'infrastructure.docker_image',
'Docker Image',
required=True,
)
server_id = fields.Many2one(
'infrastructure.server',
'Server',
required=True,
ondelete='cascade',
)
_sql_constraints = [
('image_uniq', 'unique(docker_image_id, server_id)',
'Docker Image Must be Unique per server'),
]
@api.multi
def pull_image(self, context=None, detached=False):
""" Tuvimos que ponerle el context porque desde la vista lo pasa sin
enmascararlo en self"""
self.server_id.get_env()
image = self.docker_image_id
image_name = image.pull_name
# if any tag, pull the first one
if image.tag_ids:
image_name = '%s:%s' % (image_name, image.tag_ids[0].name)
_logger.info("Pulling Image %s" % image_name)
if detached:
sudo('dtach -n `mktemp -u /tmp/dtach.XXXX` docker pull %s' %
image_name)
else:
sudo('docker pull %s' % image_name)
@api.multi
def pull_image_detached(self):
self.pull_image(detached=True)
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# TESTING FILE made.by.a.fox. 12.2.15\n# Updated by acrule 01.21.16\n\n#FEATURE LIST\n# Y connect to db\n# Y write to file\n# Y Write JSON format\n# Accept input date pa... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# TESTING FILE made.by.a.fox. 12.2.15\n# Updated by acrule 01.21.16\n\n#FEATURE LIST\n# Y connect to db\n# Y write to file\n# Y Write JSON format\n# Acce... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
# TESTING FILE made.by.a.fox. 12.2.15
# Updated by acrule 01.21.16
#FEATURE LIST
# Y connect to db
# Y write to file
# Y Write JSON format
# Accept input date parameter
#KNOWN ISSUES
# 2. no formatting or conversion of datetime stamps
import re
import os
import sys
import json
import sqlite3 as lite
import collections
import time
import datetime
db_file = os.path.expanduser('~/.traces/traces.sqlite') #looks for db under ~/.traces
con = lite.connect(db_file)
with con:
data = [] #master data container
apps = [] #list of apps
windows = [] # list of windows
urls = []
appevents = [] #list of application events
windowevents = [] #list of window events
urlevents = []
exps = [] #list of experiences
images = [] #list of screenshots
words = [] #list of keywords
cur = con.cursor()
#SQL query strings
appsSQL = "SELECT * FROM app"
windowsSQL = "SELECT * FROM window"
urlSQL = "SELECT * FROM url"
activeappSQL = "SELECT a.id, a.app_id, a.event, a.time as startt, min(b.time) AS endt FROM appevent a, appevent b WHERE a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activewindowSQL = "SELECT a.id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM windowevent a, windowevent b WHERE a.window_id = b.window_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
activeurlSQL = "SELECT a.id, a.url_id, a.app_id, a.window_id, a.event, a.time as startt, min(b.time) AS endt FROM urlevent a, urlevent b WHERE a.url_id = b.url_id AND a.window_id = b.window_id AND a.app_id = b.app_id AND a.event = 'Active' AND b.event in ('Inactive', 'Close') AND a.time < b.time AND a.time IS NOT NULL AND b.time IS NOT NULL GROUP BY startt"
experienceSQL = "SELECT * FROM experience"
wordsSQL = "SELECT * FROM keys"
#GET list of applications
cur.execute(appsSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['time'] = row[1]
a['name'] = row[2]
apps.append(a)
#GET list of windows
cur.execute(windowsSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['time'] = row[1]
w['name'] = row[2]
w['app'] = row[3]
windows.append(w)
#GET list of urls
cur.execute(urlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['time'] = row[1]
u['title'] = row[2]
u['url'] = row[3]
u['host'] = row[4]
urls.append(u)
#GET list intervals for primary application
cur.execute(activeappSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['appid'] = row[1]
a['event'] = row[2]
a['start'] = row[3]
a['end'] = row[4]
appevents.append(a)
#GET list intervals for primary window
cur.execute(activewindowSQL)
rows = cur.fetchall()
for row in rows:
w = collections.OrderedDict()
w['id'] = row[0]
w['windowid'] = row[1]
w['appid'] = (item for item in windows if item["id"] == row[1]).next()['app']
w['event'] = row[2]
w['start'] = row[3]
w['end'] = row[4]
windowevents.append(w)
#GET list intervals for urls
cur.execute(activeurlSQL)
rows = cur.fetchall()
for row in rows:
u = collections.OrderedDict()
u['id'] = row[0]
u['urlid'] = row[1]
u['appid'] = row[2]
u['windowid'] = row[3]
u['event'] = row[4]
u['start'] = row[5]
u['end'] = row[6]
urlevents.append(u)
#GET list of experiences
cur.execute(experienceSQL)
rows = cur.fetchall()
for row in rows:
a = collections.OrderedDict()
a['id'] = row[0]
a['text'] = row[2]
exps.append(a)
#GET list of screenshots
image_dir = os.path.expanduser('~/.traces/screenshots') #looks for db under ~/.traces
for y in os.listdir(image_dir):
y_dir = os.path.join(image_dir,y)
if not os.path.isdir(y_dir):
continue
for m in os.listdir(y_dir):
m_dir = os.path.join(y_dir, m)
if not os.path.isdir(m_dir):
continue
for d in os.listdir(m_dir):
d_dir = os.path.join(m_dir, d)
if not os.path.isdir(d_dir):
continue
for h in os.listdir(d_dir):
h_dir = os.path.join(d_dir, h)
if not os.path.isdir(h_dir):
continue
h_images = os.listdir(h_dir)
for image in h_images:
#make sure the file is an image
if image[-4:] == '.jpg':
i = collections.OrderedDict()
image_time = datetime.datetime.strptime(image[0:19], '%y%m%d-%H%M%S%f')
i['time'] = (image_time - datetime.datetime(1970,1,1)).total_seconds() + time.timezone #add timezone offset
i['image'] = os.path.join("screenshots", y, m, d, h, image)
images.append(i)
#GET keywords
cmd_rows = []
newWord = ['Enter','Left','Right','Up','Down','Tab','Escape', ' ']
starttime = 0.0
app = 0
window = 0
s = ''
cur.execute(wordsSQL)
rows = cur.fetchall()
for row in rows:
if 'Cmd' in row[3]:
cmd_rows.append(row)
else:
text = str(row[2])
# if its a char indicating a new word, save our text token
if text in newWord:
# save our data
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass the whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking time
starttime = float(row[1])
s = ''
# if its a regular char on the same window, just keep building the string
elif int(row[5]) == window: # and float(row[1]) - time <= 300.0:
if text == 'Backspace':
s = s[:-1]
else:
s += row[2]
#else its a regular char but we switched windows, save the data
else:
if len(s) > 0:
k = collections.OrderedDict()
k['time'] = starttime #datetime.datetime.fromtimestamp(starttime).strftime("%H:%M %m/%d/%y")
k['text'] = s #just pass teh whole string for now
k['app'] = app
k['window'] = window
words.append(k)
#reset tracking variables
window = int(row[5])
app = int(row[4])
starttime = float(row[1])
#write the character to start the next word
if text in newWord or text == 'Backspace':
s = ''
else:
s = row[2]
#ASSEMBLE apps and experince into json
d = collections.OrderedDict()
d['apps']=apps
d['window']=windows
d['url']=urls
d['appevents']=appevents
d['windowevents']=windowevents
d['urlevents']=urlevents
d['exps']=exps
d['images']=images
d['words']=words
data = d
#WRITE file
file = 'extract.json'
z = open(file,'w')
z.writelines(json.dumps(data))
``` |
[
{
"content": "Here is a code snippet:\n```python\nfrom __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\n\nif not settings.configured:\n # set the default Django settings module for the 'celery' program.\n os.env... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nfrom __future__ import absolute_import\nimport os\nfrom celery import Celery\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\n\nif not settings.configured:\n # set the default Django settings module for the 'celery' prog... | ```python
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('icecreamratings_project')
class CeleryConfig(AppConfig):
name = 'icecreamratings_project.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client
from raven.contrib.celery import register_signal
client = Client(dsn=settings.RAVEN_CONFIG['dsn'])
register_signal(client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# $Id$\n#\n# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# which is include... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# $Id$\n#\n# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC\n#\n# @@ All Rights Reserved @@\n# This file is part of the RDKit.\n# The contents are covered by the terms of the BSD license\n# ... | ```python
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Functionality for SATIS typing atoms
"""
from rdkit import Chem
_debug = 0
#
# These are SMARTS patterns for the special cases used in
# SATIS typing.
#
aldehydePatt = Chem.MolFromSmarts('[CD2]=[OD1]')
ketonePatt = Chem.MolFromSmarts('[CD3]=[OD1]')
amidePatt = Chem.MolFromSmarts('[CD3](=[OD1])-[#7]')
esterPatt = Chem.MolFromSmarts('C(=[OD1])-O-[#6]')
carboxylatePatt = Chem.MolFromSmarts('C(=[OD1])-[OX1]')
carboxylPatt = Chem.MolFromSmarts('C(=[OD1])-[OX2]')
specialCases = ((carboxylatePatt,97),
(esterPatt,96),
(carboxylPatt,98),
(amidePatt,95),
(ketonePatt,94),
(aldehydePatt,93))
def SATISTypes(mol,neighborsToInclude=4):
""" returns SATIS codes for all atoms in a molecule
The SATIS definition used is from:
J. Chem. Inf. Comput. Sci. _39_ 751-757 (1999)
each SATIS code is a string consisting of _neighborsToInclude_ + 1
2 digit numbers
**Arguments**
- mol: a molecule
- neighborsToInclude (optional): the number of neighbors to include
in the SATIS codes
**Returns**
a list of strings nAtoms long
"""
global specialCases
nAtoms = mol.GetNumAtoms()
atomicNums = [0]*nAtoms
atoms = mol.GetAtoms()
for i in xrange(nAtoms):
atomicNums[i] = atoms[i].GetAtomicNum()
nSpecialCases = len(specialCases)
specialCaseMatches = [None]*nSpecialCases
for i,(patt,idx) in enumerate(specialCases):
if mol.HasSubstructMatch(patt):
specialCaseMatches[i] = mol.GetSubstructMatches(patt)
else:
specialCaseMatches[i] = ()
codes = [None]*nAtoms
for i in range(nAtoms):
code = [99]*(neighborsToInclude+1)
atom = atoms[i]
atomIdx = atom.GetIdx()
code[0] = min(atom.GetAtomicNum(),99)
bonds = atom.GetBonds()
nBonds = len(bonds)
otherIndices = [-1]*nBonds
if _debug: print code[0],
for j in range(nBonds):
otherIndices[j] = bonds[j].GetOtherAtom(atom).GetIdx()
if _debug: print otherIndices[j],
if _debug: print
otherNums = [atomicNums[x] for x in otherIndices] + \
[1]*atom.GetTotalNumHs()
otherNums.sort()
nOthers = len(otherNums)
if nOthers > neighborsToInclude:
otherNums.reverse()
otherNums = otherNums[:neighborsToInclude]
otherNums.reverse()
for j in range(neighborsToInclude):
code[j+1] = min(otherNums[j],99)
else:
for j in range(nOthers):
code[j+1] = min(otherNums[j],99)
if nOthers < neighborsToInclude and code[0] in [6,8]:
found = 0
for j in range(nSpecialCases):
for matchTuple in specialCaseMatches[j]:
if atomIdx in matchTuple:
code[-1] = specialCases[j][1]
found = 1
break
if found:
break
codes[i] = ''.join(['%02d'%(x) for x in code])
return codes
if __name__ == '__main__':
smis = ['CC(=O)NC','CP(F)(Cl)(Br)(O)',
'O=CC(=O)C','C(=O)OCC(=O)O','C(=O)[O-]']
for smi in smis:
print smi
m = Chem.MolFromSmiles(smi)
codes = SATISTypes(m)
print codes
``` |
[
{
"content": "```python\n#!/usr/bin/python3\n\n# @begin:license\n#\n# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; ei... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python3\n\n# @begin:license\n#\n# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Softwar... | ```python
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import node_db
SampleTrackDescription = node_db.NodeDescription(
uri='builtin://sample-track',
display_name='Sample Track',
type=node_db.NodeDescription.PROCESSOR,
node_ui=node_db.NodeUIDescription(
type='builtin://sample-track',
),
processor=node_db.ProcessorDescription(
type='builtin://sample-script',
),
builtin_icon='track-type-sample',
ports=[
node_db.PortDescription(
name='out:left',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
node_db.PortDescription(
name='out:right',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
]
)
``` |
[
{
"content": "Replicate the source code:\n```python\nimport hashlib\nfrom pathlib import Path\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nimport sphinx\n\nimport matplotlib as mpl\nfrom matplotlib import cbook\nfrom matplotlib.mathtext import MathTextParser\nmathtext_p... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport hashlib\nfrom pathlib import Path\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nimport sphinx\n\nimport matplotlib as mpl\nfrom matplotlib import cbook\nfrom matplotlib.mathtext import MathTextPa... | ```python
import hashlib
from pathlib import Path
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import sphinx
import matplotlib as mpl
from matplotlib import cbook
from matplotlib.mathtext import MathTextParser
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, MathTextParser._font_type_mapping)
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = {'fontset': fontset_choice}
class MathDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {'fontset': fontset_choice}
def run(self):
latex = ''.join(self.content)
node = latex_math(self.block_text)
node['latex'] = latex
node['fontset'] = self.options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
with mpl.rc_context({'mathtext.fontset': fontset}):
if Path(filename).exists():
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except Exception:
cbook._warn_external(
f"Could not render math expression {latex}")
depth = 0
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
fontset = node['fontset']
name = 'math-{}'.format(
hashlib.md5((latex + fontset).encode()).hexdigest()[-10:])
destdir = Path(setup.app.builder.outdir, '_images', 'mathmpl')
destdir.mkdir(parents=True, exist_ok=True)
dest = destdir / f'{name}.png'
depth = latex2png(latex, dest, fontset)
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return (f'<img src="{setup.app.builder.imgpath}/mathmpl/{name}.png"'
f' {cls}{style}/>')
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('mathmpl', math_role)
app.add_directive('mathmpl', MathDirective)
if sphinx.version_info < (1, 8):
app.add_role('math', math_role)
app.add_directive('math', MathDirective)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\nfrom message import Message, except_f1\nimport C1, C2\n\nclass B1(Message):\n \"\"\"Classe que implementa B1.\"\"\"\n\n @property\n def sollicitud(self):\n \"\"\"Retorna l'objecte Sollicitud\"\"\"\n return C1.Sollicitud(self.obj.BajaEner... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nfrom message import Message, except_f1\nimport C1, C2\n\nclass B1(Message):\n \"\"\"Classe que implementa B1.\"\"\"\n\n @property\n def sollicitud(self):\n \"\"\"Retorna l'objecte Sollicitud\"\"\"\n return C1.Sollicitud(s... | ```python
# -*- coding: utf-8 -*-
from message import Message, except_f1
import C1, C2
class B1(Message):
"""Classe que implementa B1."""
@property
def sollicitud(self):
"""Retorna l'objecte Sollicitud"""
return C1.Sollicitud(self.obj.BajaEnergia.DatosSolicitud)
@property
def client(self):
"""Retorna l'objecte Client"""
return C1.Client(self.obj.BajaEnergia.Cliente)
@property
def acceptacio(self):
"""Retorna l'objecte Acceptacio"""
obj = getattr(self.obj, self._header, False)
if obj and hasattr(obj, 'DatosAceptacion'):
return C1.Acceptacio(obj.DatosAceptacion)
return False
@property
def activacio(self):
"""Retorna l'objecte Activacio"""
return C1.Activacio(self.obj.NotificacionBajaEnergia)
@property
def anullacio(self):
"""Retorna l'object Anullacio"""
return C1.Anullacio(self.obj.AnulacionSolicitud)
@property
def rebuig(self):
"""Retorna una llista de Rebuig"""
data = []
for i in self.obj.RechazoATRDistribuidoras.Rechazo:
data.append(C1.Rebuig(i))
return data
@property
def rebuig_anullacio(self):
"""Retorna l'objecte Rebuig"""
data = []
for i in self.obj.RechazoDeAnulacion.RechazoAnulacion:
data.append(C1.Rebuig(i))
return data
@property
def contracte(self):
"""Retorna l'objecte Contracte"""
obj = getattr(self.obj, self._header)
try:
idcontrato = C1.Contracte(obj.IdContrato)
except AttributeError:
# Step 04 Acceptacio has the classic structure
idcontrato = C1.Contracte(obj.Contrato)
return idcontrato
@property
def direccio_correspondecia(self):
direccio = False
try:
direccio = DireccioAmbIndicador(self.obj.BajaEnergia.DireccionCorrespondencia)
except AttributeError:
pass
return direccio
@property
def header(self):
return self._header
@property
def punts_mesura(self):
"""Retorna una llista de punts de mesura"""
data = []
obj = getattr(self.obj, self._header)
for i in obj.PuntosDeMedida.PuntoDeMedida:
data.append(C1.PuntMesura(i))
return data
@property
def comentaris(self):
"""Retorna una llista de comentaris"""
data = []
obj = getattr(self.obj, self._header)
if (hasattr(obj, 'Comentarios') and
hasattr(obj.Comentarios, 'Comentario')):
for i in obj.Comentarios.Comentario:
data.append(C2.Comentari(i))
return data
class DireccioAmbIndicador(object):
"""Classe que implementa la direccio"""
def __init__(self, data):
self.direccio = data
@property
def indicador(self):
"""Retorna F/S/O"""
value = ''
try:
value = self.direccio.Indicador.text
except AttributeError:
pass
return value
@property
def direccio_correspondecia(self):
value = False
try:
value = C1.Direccio(self.direccio.Direccion)
except AttributeError:
pass
return value
``` |
[
{
"content": "```python\nimport os\nfrom boxbranding import getImageVersion\n\ndef enumFeeds():\n\tfor fn in os.listdir('/etc/opkg'):\n\t\tif fn.endswith('-feed.conf'):\n\t\t\tfile = open(os.path.join('/etc/opkg', fn))\n\t\t\tfeedfile = file.readlines()\n\t\t\tfile.close()\n\t\t\ttry:\n\t\t\t\tfor feed in feedf... | [
{
"content": "<|memory_start|>```python\nimport os\nfrom boxbranding import getImageVersion\n\ndef enumFeeds():\n\tfor fn in os.listdir('/etc/opkg'):\n\t\tif fn.endswith('-feed.conf'):\n\t\t\tfile = open(os.path.join('/etc/opkg', fn))\n\t\t\tfeedfile = file.readlines()\n\t\t\tfile.close()\n\t\t\ttry:\n\t\t\t\tf... | ```python
import os
from boxbranding import getImageVersion
def enumFeeds():
for fn in os.listdir('/etc/opkg'):
if fn.endswith('-feed.conf'):
file = open(os.path.join('/etc/opkg', fn))
feedfile = file.readlines()
file.close()
try:
for feed in feedfile:
yield feed.split()[1]
except IndexError:
pass
except IOError:
pass
def enumPlugins(filter_start=''):
list_dir = listsDirPath()
for feed in enumFeeds():
package = None
try:
for line in open(os.path.join(list_dir, feed), 'r'):
if line.startswith('Package:'):
package = line.split(":",1)[1].strip()
version = ''
description = ''
if package.startswith(filter_start) and not package.endswith('-dev') and not package.endswith('-staticdev') and not package.endswith('-dbg') and not package.endswith('-doc') and not package.endswith('-src'):
continue
package = None
if package is None:
continue
if line.startswith('Version:'):
version = line.split(":",1)[1].strip()
elif line.startswith('Description:'):
description = line.split(":",1)[1].strip()
elif description and line.startswith(' '):
description += line[:-1]
elif len(line) <= 1:
d = description.split(' ',3)
if len(d) > 3:
# Get rid of annoying "version" and package repeating strings
if d[1] == 'version':
description = d[3]
if description.startswith('gitAUTOINC'):
description = description.split(' ',1)[1]
yield package, version, description.strip()
package = None
except IOError:
pass
def listsDirPath():
try:
for line in open('/etc/opkg/opkg.conf', "r"):
if line.startswith('option'):
line = line.split(' ', 2)
if len(line) > 2 and line[1] == ('lists_dir'):
return line[2].strip()
elif line.startswith('lists_dir'):
return line.replace('\n','').split(' ')[2]
except Exception, ex:
print "[opkg]", ex
return '/var/lib/opkg/lists'
if __name__ == '__main__':
for p in enumPlugins('enigma'):
print p
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n__author__ = 'ivo'\n\nimport logging\nimport argparse\n\nfrom blackswan import config\n\n_log = logging.getLogger(__name__)\n\nclass ModuleBase():\n argparser = None\n\n def __init__(self):\n self.co... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n__author__ = 'ivo'\n\nimport logging\nimport argparse\n\nfrom blackswan import config\n\n_log = logging.getLogger(__name__)\n\nclass ModuleBase():\n argparser = None\n\n def __init__(self):\... | ```python
__author__ = 'ivo'
import logging
import argparse
from blackswan import config
_log = logging.getLogger(__name__)
class ModuleBase():
argparser = None
def __init__(self):
self.config = {}
@classmethod
def register(cls):
cls.argparser = argparse.ArgumentParser(description=cls.description, prog=cls.modname, add_help=False)
cls.argparser.add_argument("-b", "--db", default=config.def_db, help="The blackswan db file. Default: {}".format(config.def_db))
cls.add_args()
config.modules[cls.modname] = cls
_log.debug("Module %s registered", cls.modname)
return
@classmethod
def add_args(cls):
raise NotImplementedError
def work(self):
raise NotImplementedError
def __repr__(self):
return "<{}({})>".format(self.modname, repr(self.config))
def parse_args(self, modargs):
args = self.argparser.parse_args(args=modargs)
self.config.update(**vars(args))
def run(self):
_log.info("Module %s started", self.modname)
self.work()
_log.info("Module %s finished", self.modname)
def configure(self, **kwargs):
self.config.update(kwargs)
_log.info("Module %s configured: \n%s", self.modname, repr(self.config))
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Copyright 2016 IBM All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Copyright 2016 IBM All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Licen... | ```python
# Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
* Thresholds for identifying meaningful tones returned by the Watson Tone
Analyzer. Current values are
* based on the recommendations made by the Watson Tone Analyzer at
* https://www.ibm.com/watson/developercloud/doc/tone-analyzer/understanding
-tone.shtml
* These thresholds can be adjusted to client/domain requirements.
"""
import json
PRIMARY_EMOTION_SCORE_THRESHOLD = 0.5
WRITING_HIGH_SCORE_THRESHOLD = 0.75
WRITING_NO_SCORE_THRESHOLD = 0.0
SOCIAL_HIGH_SCORE_THRESHOLD = 0.75
SOCIAL_LOW_SCORE_THRESHOLD = 0.25
# Labels for the tone categories returned by the Watson Tone Analyzer
EMOTION_TONE_LABEL = 'emotion_tone'
WRITING_TONE_LABEL = 'writing_tone'
SOCIAL_TONE_LABEL = 'social_tone'
"""
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
"""
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory):
emotionTone = None
writingTone = None
socialTone = None
# if there is no context in a
if 'context' not in conversationPayload:
conversationPayload['context'] = {};
if 'user' not in conversationPayload['context']:
conversationPayload['context'] = initUser()
# For convenience sake, define a variable for the user object
user = conversationPayload['context']['user'];
# Extract the tones - emotion, writing and social
if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']:
for toneCategory in toneAnalyzerPayload['document_tone'][
'tone_categories']:
if toneCategory['category_id'] == EMOTION_TONE_LABEL:
emotionTone = toneCategory
if toneCategory['category_id'] == WRITING_TONE_LABEL:
writingTone = toneCategory
if toneCategory['category_id'] == SOCIAL_TONE_LABEL:
socialTone = toneCategory
updateEmotionTone(user, emotionTone, maintainHistory)
updateWritingTone(user, writingTone, maintainHistory)
updateSocialTone(user, socialTone, maintainHistory)
conversationPayload['context']['user'] = user
return conversationPayload;
'''
initToneContext initializes a user object containing tone data (from the
Watson Tone Analyzer)
@returns user json object with the emotion, writing and social tones. The
current
tone identifies the tone for a specific conversation turn, and the history
provides the conversation for
all tones up to the current tone for a conversation instance with a user.
'''
def initUser():
return {
'user': {
'tone': {
'emotion': {
'current': None
},
'writing': {
'current': None
},
'social': {
'current': None
}
}
}
}
'''
updateEmotionTone updates the user emotion tone with the primary emotion -
the emotion tone that has
a score greater than or equal to the EMOTION_SCORE_THRESHOLD; otherwise
primary emotion will be 'neutral'
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param emotionTone a json object containing the emotion tones in the payload
returned by the Tone Analyzer
'''
def updateEmotionTone(user, emotionTone, maintainHistory):
maxScore = 0.0
primaryEmotion = None
primaryEmotionScore = None
for tone in emotionTone['tones']:
if tone['score'] > maxScore:
maxScore = tone['score']
primaryEmotion = tone['tone_name'].lower()
primaryEmotionScore = tone['score']
if maxScore <= PRIMARY_EMOTION_SCORE_THRESHOLD:
primaryEmotion = 'neutral'
primaryEmotionScore = None
# update user emotion tone
user['tone']['emotion']['current'] = primaryEmotion;
if maintainHistory:
if 'history' not in user['tone']['emotion']:
user['tone']['emotion']['history'] = []
user['tone']['emotion']['history'].append({
'tone_name': primaryEmotion,
'score': primaryEmotionScore
})
'''
updateWritingTone updates the user with the writing tones interpreted based
on the specified thresholds
@param: user a json object representing user information (tone) to be used
in conversing with the Conversation Service
@param: writingTone a json object containing the writing tones in the
payload returned by the Tone Analyzer
'''
def updateWritingTone(user, writingTone, maintainHistory):
currentWriting = [];
currentWritingObject = [];
# Process each writing tone and determine if it is high or low
for tone in writingTone['tones']:
if tone['score'] >= WRITING_HIGH_SCORE_THRESHOLD:
currentWriting.append(tone['tone_name'].lower() + '_high')
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= WRITING_NO_SCORE_THRESHOLD:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'no evidence'
})
else:
currentWritingObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user writing tone
user['tone']['writing']['current'] = currentWriting
if maintainHistory:
if 'history' not in user['tone']['writing']:
user['tone']['writing']['history'] = []
user['tone']['writing']['history'].append(currentWritingObject) # TODO -
# is this the correct location??? AW
"""
updateSocialTone updates the user with the social tones interpreted based on
the specified thresholds
@param user a json object representing user information (tone) to be used in
conversing with the Conversation Service
@param socialTone a json object containing the social tones in the payload
returned by the Tone Analyzer
"""
def updateSocialTone(user, socialTone, maintainHistory):
currentSocial = []
currentSocialObject = []
# Process each social tone and determine if it is high or low
for tone in socialTone['tones']:
if tone['score'] >= SOCIAL_HIGH_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_high')
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely high'
})
elif tone['score'] <= SOCIAL_LOW_SCORE_THRESHOLD:
currentSocial.append(tone['tone_name'].lower() + '_low');
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely low'
})
else:
currentSocialObject.append({
'tone_name': tone['tone_name'].lower(),
'score': tone['score'],
'interpretation': 'likely medium'
})
# update user social tone
user['tone']['social']['current'] = currentSocial
if maintainHistory:
if not user['tone']['social']['current']:
user['tone']['social']['current'] = [];
user['tone']['social']['current'].append(currentSocialObject);
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# - Redistributions of so... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# - Redis... | ```python
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.2.7'
import os
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nfrom itertools import chain\nfrom glob import glob\n\nimport cookielaw\n\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/Stable',\n 'En... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nfrom itertools import chain\nfrom glob import glob\n\nimport cookielaw\n\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/S... | ```python
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from itertools import chain
from glob import glob
import cookielaw
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Session',
]
package_data_globs = (
'cookielaw/templates/cookielaw/*.html',
'cookielaw/static/cookielaw/*/*',
'cookielaw/locale/*/*/*'
)
package_data = []
for f in chain(*map(glob, package_data_globs)):
package_data.append(f.split('/', 1)[1])
setup(
author='Piotr Kilczuk',
author_email='piotr@tymaszweb.pl',
name='django-cookie-law',
version='.'.join(str(v) for v in cookielaw.VERSION),
description='Helps your Django project comply with EU cookie law regulations',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/TyMaszWeb/django-cookie-law',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.2',
'django-classy-tags>=0.3.0',
],
tests_require=[
'selenium==3.0.1',
],
packages=find_packages(),
package_data={'cookielaw': package_data},
include_package_data=False,
zip_safe=False,
test_suite='runtests.main',
)
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n\"\"\"Functions for fetching basic statistics about observers and observations.\"\"\"\n\nfrom ebird.api.utils import call\nfrom ebird.api.validation import (\n clean_area,\n clean_date,\n clean_max_observers,\n clean... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n\"\"\"Functions for fetching basic statistics about observers and observations.\"\"\"\n\nfrom ebird.api.utils import call\nfrom ebird.api.validation import (\n clean_area,\n clean_date,\n clean_max_obser... | ```python
"""Functions for fetching basic statistics about observers and observations."""
from ebird.api.utils import call
from ebird.api.validation import (
clean_area,
clean_date,
clean_max_observers,
clean_rank,
clean_region,
)
TOP_100_URL = "https://ebird.org/ws2.0/product/top100/%s/%s"
TOTALS_URL = "https://ebird.org/ws2.0/product/stats/%s/%s"
def get_top_100(token, region, date, rank="spp", max_results=100):
"""
Get the observers who have seen the most species or submitted the
greatest number of checklists on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#2d8d3f94-c4b0-42bd-9c8e-71edfa6347ba
:param token: the token needed to access the API.
:param region: the code for the region, eg. US-NV.
:param date: the date, since Jan 1st 1800.
:param rank: order results by species seen (spp) or checklists submitted (cl).
:param max_results: the maximum number of entries to return from
1 to 100. The default value is 100.
:return: the list of observers.
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOP_100_URL % (clean_region(region), date.strftime("%Y/%m/%d"))
params = {
"maxObservers": clean_max_observers(max_results),
"rankedBy": clean_rank(rank),
}
headers = {
"X-eBirdApiToken": token,
}
return call(url, params, headers)
def get_totals(token, area, date):
"""
Get the number of contributors, checklists submitted and species seen
on a given date.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#4416a7cc-623b-4340-ab01-80c599ede73e
:param token: the token needed to access the API.
:param area: the code for a country subnational1 , subnational2 region
or location
:param date: the date, since Jan 1st 1800.
:return: the totals for the given date
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = TOTALS_URL % (clean_area(area), clean_date(date))
headers = {
"X-eBirdApiToken": token,
}
return call(url, {}, headers)
``` |
[
{
"content": "```python\nfrom conans import ConanFile, tools\n\nimport os\n\n\nclass MsgpackConan(ConanFile):\n name = \"msgpack\"\n version = \"2.1.5\"\n license = \"Boost Software License 1.0\"\n url = \"https://github.com/sztomi/conan-msgpack\"\n description = \"The official C++ library for Me... | [
{
"content": "<|memory_start|>```python\nfrom conans import ConanFile, tools\n\nimport os\n\n\nclass MsgpackConan(ConanFile):\n name = \"msgpack\"\n version = \"2.1.5\"\n license = \"Boost Software License 1.0\"\n url = \"https://github.com/sztomi/conan-msgpack\"\n description = \"The official C+... | ```python
from conans import ConanFile, tools
import os
class MsgpackConan(ConanFile):
name = "msgpack"
version = "2.1.5"
license = "Boost Software License 1.0"
url = "https://github.com/sztomi/conan-msgpack"
description = "The official C++ library for MessagePack"
@property
def extracted_dir(self):
return "msgpack-{0}".format(self.version)
def source(self):
archive = "msgpack.tar.gz"
tools.download("https://github.com/msgpack/msgpack-c/releases/download"
"/cpp-{v}/msgpack-{v}.tar.gz".format(v=self.version),
archive)
tools.untargz(archive)
def build(self):
pass # silence warning
def package(self):
self.copy("*.h", dst="include",
src=os.path.join(self.extracted_dir, "include"))
self.copy("*.hpp", dst="include",
src=os.path.join(self.extracted_dir, "include"))
def package_info(self):
self.cpp_info.includedirs.append(os.path.join(self.package_folder, "include"))
``` |
[
{
"content": "Here is a code snippet:\n```python\n#!/usr/bin/env python\r\n# simple test cases for two phase commit extensions to psycopg2\r\n#\r\n# Copyright (C) 2008 Mariano Reingart <mariano@nsis.com.ar>\r\n#\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\r\n# simple test cases for two phase commit extensions to psycopg2\r\n#\r\n# Copyright (C) 2008 Mariano Reingart <mariano@nsis.com.ar>\r\n#\r\n# This program is free software; you can redistribute it and/or modify\r\n# it un... | ```python
#!/usr/bin/env python
# simple test cases for two phase commit extensions to psycopg2
#
# Copyright (C) 2008 Mariano Reingart <mariano@nsis.com.ar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
import unittest
import psycopg2
import tpc
from psycopg2.extensions import ISOLATION_LEVEL_SERIALIZABLE, \
ISOLATION_LEVEL_AUTOCOMMIT,ISOLATION_LEVEL_READ_COMMITTED
from psycopg2 import ProgrammingError
# for compatibility with psycopg2 tests
class tests:
dsn = "dbname='test' user='postgres' password='psql' port=5432"
gid = 'test-gid-1234'
class TwoPhaseTests(unittest.TestCase):
def setUp(self):
self.connect()
self.curs=self.conn.cursor()
self.clean()
# set psycopg2 default isolation level
self.conn.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
##self.curs.execute("CREATE TABLE table1 ( data TEXT )")
def tearDown(self):
self.clean()
def connect(self):
self.conn = psycopg2.connect(tests.dsn,
connection_factory=tpc.TwoPhaseCommitConnection)
self.xid = self.conn.xid(0,gid,'')
def clean(self):
self.assertEqual(self.conn.isolation_level, ISOLATION_LEVEL_READ_COMMITTED)
# clean table 1
self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
# rollback any prepared transaction
err=False
for xid in self.conn.tpc_recover():
print "rolling back xid[1]"
self.curs.execute("ROLLBACK PREPARED %s",(xid[1],))
err=True
if err:
raise RuntimeError("Unhandled prepared TPC transaction")
self.curs.execute("DELETE FROM table1")
def insert(self):
self.curs.execute("INSERT INTO table1 (data) VALUES ('1234')")
def rowcount(self):
self.curs.execute("SELECT * FROM table1 ")
return self.curs.rowcount
def test_one_phase_commit(self):
"Test to commit a one phase transaction"
self.conn.tpc_begin(self.xid)
self.insert()
self.conn.tpc_commit()
self.assertEqual(self.rowcount(), 1)
def test_one_phase_rollback(self):
"Test to rollback a one phase transaction"
self.conn.tpc_begin(self.xid)
self.insert()
self.conn.tpc_rollback()
self.assertEqual(self.rowcount(), 0)
def test_two_phase_commit(self):
"Test to commit a complete two phase transaction"
self.conn.tpc_begin(self.xid)
self.insert()
self.conn.tpc_prepare()
self.conn.tpc_commit()
self.assertEqual(self.rowcount(), 1)
def test_two_phase_rollback(self):
"Test to rollback a complete two phase transaction"
self.conn.tpc_begin(self.xid)
self.conn.tpc_prepare()
self.conn.tpc_rollback()
self.assertEqual(self.rowcount(), 0)
def test_recovered_commit(self):
"Test to commit a recovered transaction"
self.conn.tpc_begin(self.xid)
self.insert()
self.conn.tpc_prepare()
self.connect() # reconnect
self.assertEqual(self.conn.tpc_recover(), [self.xid])
self.conn.tpc_commit(self.xid)
self.assertEqual(self.rowcount(), 1)
def test_recovered_rollback(self):
"Test to rollback a recovered transaction"
self.conn.tpc_begin(self.xid)
self.insert()
self.conn.tpc_prepare()
self.connect() # reconnect
self.assertEqual(self.conn.tpc_recover(), [self.xid])
self.conn.tpc_rollback(self.xid)
self.assertEqual(self.rowcount(), 0)
def test_single_phase_commit(self):
"Test to commit a single phase (normal) transaction"
self.insert()
self.conn.commit()
self.assertEqual(self.rowcount(), 1)
def test_single_phase_rollback(self):
"Test to rollback a single phase (normal) transaction"
self.insert()
self.conn.rollback()
self.assertEqual(self.rowcount(), 0)
def test_dbapi20_tpc(self):
"Test basic dbapi 2.0 conformance"
self.assertEqual(len(self.conn.tpc_recover()),0)
# tpc_commit outside tpc transaction
self.assertRaises(ProgrammingError, self.conn.tpc_commit)
# commit or rollback inside tpc transaction
self.conn.tpc_begin(self.xid)
self.assertRaises(ProgrammingError, self.conn.commit)
self.assertRaises(ProgrammingError, self.conn.rollback)
self.conn.tpc_rollback()
# transaction not prepared
self.assertRaises(ProgrammingError, self.conn.tpc_commit,self.xid)
self.assertRaises(ProgrammingError, self.conn.tpc_rollback,self.xid)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TwoPhaseTests)
#suite.debug()
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nimport Queue\nimport threading\n\nMAX_WAIT_QUEUE_TIMEOUT = 2\n\nclass ArticleInserter(threading.Thread):\n '''Thread which inserts articles into the database\n '''\n\n def __init__(self, queue, build_view):\n threading.... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nimport Queue\nimport threading\n\nMAX_WAIT_QUEUE_TIMEOUT = 2\n\nclass ArticleInserter(threading.Thread):\n '''Thread which inserts articles into the database\n '''\n\n def __init__(self, queue, build_view):\n ... | ```python
import Queue
import threading
MAX_WAIT_QUEUE_TIMEOUT = 2
class ArticleInserter(threading.Thread):
'''Thread which inserts articles into the database
'''
def __init__(self, queue, build_view):
threading.Thread.__init__(self)
'''constructor
@param queue the queue to which the articles and redirects are read
@param build_view the database build view to use to connect to the database
'''
self._queue = queue
self._build_view = build_view
self._end = False
def run(self):
while not self._end:
try:
# fetch item from queue
item = self._queue.get(True, MAX_WAIT_QUEUE_TIMEOUT)
# insert as article or redirect respectively
if item['type'] == 'article':
self._build_view.insert_article(item['id'], item['title'])
else:
self._build_view.insert_redirect(item['title'], item['target'])
# commit and mark as done
self._build_view.commit()
self._build_view.reset_cache()
self._queue.task_done()
except Queue.Empty:
pass
def end(self):
self._end = True
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\nimport abc\nimport sys\n\nfrom uqbar.cli.CLI import CLI\n\n\nclass CLIAggregator(CLI):\n \"\"\"\n Aggregates CLI scripts.\n\n ::\n\n >>> import uqbar.cli\n >>> class ExampleAggregator(uqbar.cli.CLIAggregat... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nimport abc\nimport sys\n\nfrom uqbar.cli.CLI import CLI\n\n\nclass CLIAggregator(CLI):\n \"\"\"\n Aggregates CLI scripts.\n\n ::\n\n >>> import uqbar.cli\n >>> class ExampleAggregator(uqbar... | ```python
import abc
import sys
from uqbar.cli.CLI import CLI
class CLIAggregator(CLI):
"""
Aggregates CLI scripts.
::
>>> import uqbar.cli
>>> class ExampleAggregator(uqbar.cli.CLIAggregator):
... @property
... def cli_classes(self):
... return []
...
>>> script = ExampleAggregator()
>>> try:
... script('--help')
... except SystemExit:
... pass
...
usage: example-aggregator [-h] [--version] {help,list} ...
<BLANKLINE>
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
<BLANKLINE>
subcommands:
{help,list}
help print subcommand help
list list subcommands
"""
### CLASS VARIABLES ###
__slots__ = ()
### SPECIAL METHODS ###
def __call__(self, arguments=None):
if arguments is None:
arguments = self.argument_parser.parse_known_args()
else:
if isinstance(arguments, str):
arguments = arguments.split()
elif not isinstance(arguments, (list, tuple)):
message = "must be str, list, tuple or none: {!r}."
message = message.format(arguments)
raise ValueError(message)
arguments = self.argument_parser.parse_known_args(arguments)
self._process_args(arguments)
sys.exit(0)
### PRIVATE METHODS ###
def _handle_help_command(self, unknown_args):
aliases = self.cli_aliases
program_names = self.cli_program_names
cli_class = None
if (
len(unknown_args) == 2
and unknown_args[0] in aliases
and unknown_args[1] in aliases[unknown_args[0]]
):
cli_class = aliases[unknown_args[0]][unknown_args[1]]
elif (
len(unknown_args) == 1
and unknown_args[0] in aliases
and not isinstance(aliases[unknown_args[0]], dict)
):
cli_class = aliases[unknown_args[0]]
elif len(unknown_args) == 1 and unknown_args[0] in program_names:
cli_class = program_names[unknown_args[0]]
elif not len(unknown_args):
self(["--help"])
return
if cli_class:
instance = cli_class()
print(instance.formatted_help)
else:
print("Cannot resolve {} to subcommand.".format(unknown_args))
def _handle_list_command(self):
by_scripting_group = {}
for cli_class in self.cli_classes:
instance = cli_class()
scripting_group = getattr(instance, "scripting_group", None)
group = by_scripting_group.setdefault(scripting_group, [])
group.append(instance)
print()
if None in by_scripting_group:
group = by_scripting_group.pop(None)
for instance in sorted(group, key=lambda x: x.alias):
message = "{}: {}".format(instance.alias, instance.short_description)
print(message)
print()
for group, instances in sorted(by_scripting_group.items()):
print("[{}]".format(group))
for instance in sorted(instances, key=lambda x: x.alias):
message = " {}: {}".format(
instance.alias, instance.short_description
)
print(message)
print()
def _process_args(self, arguments):
arguments, unknown_args = arguments
if arguments.subparser_name == "help":
self._handle_help_command(unknown_args)
elif arguments.subparser_name == "list":
self._handle_list_command()
else:
if hasattr(arguments, "subsubparser_name"):
cli_class = self.cli_aliases[arguments.subparser_name][
arguments.subsubparser_name
]
elif getattr(arguments, "subparser_name"):
cli_class = self.cli_aliases[arguments.subparser_name]
elif getattr(arguments, "subparser_name") is None:
self(["--help"])
return
instance = cli_class()
instance(unknown_args)
def _setup_argument_parser(self, parser):
subparsers = parser.add_subparsers(dest="subparser_name", title="subcommands")
subparsers.add_parser("help", add_help=False, help="print subcommand help")
subparsers.add_parser("list", add_help=False, help="list subcommands")
alias_map = self.cli_aliases
for key in sorted(alias_map):
if not isinstance(alias_map[key], dict):
cli_class = alias_map[key]
instance = cli_class()
subparsers.add_parser(
key, add_help=False, help=instance.short_description
)
else:
subkeys = sorted(alias_map[key])
group_subparser = subparsers.add_parser(
key, help="{{{}}} subcommand(s)".format(", ".join(subkeys))
)
group_subparsers = group_subparser.add_subparsers(
dest="subsubparser_name", title="{} subcommands".format(key),
)
for subkey in subkeys:
cli_class = alias_map[key][subkey]
instance = cli_class()
group_subparsers.add_parser(
subkey, add_help=False, help=instance.short_description
)
### PUBLIC PROPERTIES ###
@property
def cli_aliases(self):
"""
Developer script aliases.
"""
scripting_groups = []
aliases = {}
for cli_class in self.cli_classes:
instance = cli_class()
if getattr(instance, "alias", None):
scripting_group = getattr(instance, "scripting_group", None)
if scripting_group:
scripting_groups.append(scripting_group)
entry = (scripting_group, instance.alias)
if (scripting_group,) in aliases:
message = "alias conflict between scripting group"
message += " {!r} and {}"
message = message.format(
scripting_group, aliases[(scripting_group,)].__name__
)
raise Exception(message)
if entry in aliases:
message = "alias conflict between {} and {}"
message = message.format(
aliases[entry].__name__, cli_class.__name__
)
raise Exception(message)
aliases[entry] = cli_class
else:
entry = (instance.alias,)
if entry in scripting_groups:
message = "alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.alias)
raise Exception(message)
if entry in aliases:
message = "alias conflict be {} and {}"
message = message.format(cli_class.__name__, aliases[entry])
raise Exception(message)
aliases[(instance.alias,)] = cli_class
else:
if instance.program_name in scripting_groups:
message = "Alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.program_name)
raise Exception(message)
aliases[(instance.program_name,)] = cli_class
alias_map = {}
for key, value in aliases.items():
if len(key) == 1:
alias_map[key[0]] = value
else:
if key[0] not in alias_map:
alias_map[key[0]] = {}
alias_map[key[0]][key[1]] = value
return alias_map
@abc.abstractproperty
def cli_classes(self):
"""
Developer scripts classes.
"""
return []
@property
def cli_program_names(self):
"""
Developer script program names.
"""
program_names = {}
for cli_class in self.cli_classes:
instance = cli_class()
program_names[instance.program_name] = cli_class
return program_names
``` |
[
{
"content": "```python\nfrom intern.remote.boss import BossRemote\nfrom intern.resource.boss.resource import *\nimport numpy as np\nfrom requests import HTTPError\n\nrmt = BossRemote('neurodata.cfg')\n\nxmax = 8\nymax = 4\nzmax = 5\ntmax = 10\n\nCOLL_NAME = 'gray'\nEXP_NAME = 'timeseries_test'\nCHAN_NAME = 'Ch... | [
{
"content": "<|memory_start|>```python\nfrom intern.remote.boss import BossRemote\nfrom intern.resource.boss.resource import *\nimport numpy as np\nfrom requests import HTTPError\n\nrmt = BossRemote('neurodata.cfg')\n\nxmax = 8\nymax = 4\nzmax = 5\ntmax = 10\n\nCOLL_NAME = 'gray'\nEXP_NAME = 'timeseries_test'\... | ```python
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import numpy as np
from requests import HTTPError
rmt = BossRemote('neurodata.cfg')
xmax = 8
ymax = 4
zmax = 5
tmax = 10
COLL_NAME = 'gray'
EXP_NAME = 'timeseries_test'
CHAN_NAME = 'Ch1'
COORD_FRAME = COLL_NAME + '_' + EXP_NAME
coord = CoordinateFrameResource(
COORD_FRAME, '', 0, xmax, 0, ymax, 0, zmax)
try:
coord_actual = rmt.get_project(coord)
except HTTPError:
coord_actual = rmt.create_project(coord)
# Create or get experiment
chan_setup = ExperimentResource(
EXP_NAME, COLL_NAME, coord_frame=COORD_FRAME,
num_time_samples=tmax, time_step=1)
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
# Create or get a channel to write to
chan_setup = ChannelResource(
CHAN_NAME, COLL_NAME, EXP_NAME, 'image', '', datatype='uint16')
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
x_rng = [0, xmax]
y_rng = [0, ymax]
z_rng = [0, zmax]
t_rng = [0, tmax]
print('Data model setup.')
data = np.random.randint(1, 3000, (tmax, zmax, ymax, xmax))
data = data.astype(np.uint16)
# Upload the cutout to the channel.
rmt.create_cutout(chan_actual, 0, x_rng, y_rng, z_rng, data,
time_range=t_rng)
cutout_data = rmt.get_cutout(
chan_actual, 0, x_rng, y_rng, z_rng, time_range=t_rng)
np.testing.assert_array_equal(data, cutout_data)
print(np.shape(cutout_data))
# (10, 5, 4, 8)
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/python\n\n# Modify the solute geometry and charges in Gromacs .gro and .top files\n\n# Use with 5 arguments:\n# 1 (read): generic system file\n# 2 (read): .top file\n# 3 (read): .gro file\n# 4 (write): modified .top file\n# 5 (write): modified .gro fi... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/python\n\n# Modify the solute geometry and charges in Gromacs .gro and .top files\n\n# Use with 5 arguments:\n# 1 (read): generic system file\n# 2 (read): .top file\n# 3 (read): .gro file\n# 4 (write): modified .top file\n# 5 (write): ... | ```python
#!/usr/bin/python
# Modify the solute geometry and charges in Gromacs .gro and .top files
# Use with 5 arguments:
# 1 (read): generic system file
# 2 (read): .top file
# 3 (read): .gro file
# 4 (write): modified .top file
# 5 (write): modified .gro file
import sys
import re
import math
import copy
#=============================
# Get input arguments
try:
system_input = sys.argv[1]
except IndexError:
sys.exit("Missing input file")
try:
top_input = sys.argv[2]
except IndexError:
sys.exit("Missing input file")
try:
gro_input = sys.argv[3]
except IndexError:
sys.exit("Missing input file")
try:
top_output = sys.argv[4]
except IndexError:
sys.exit("Missing output file")
try:
gro_output = sys.argv[5]
except IndexError:
sys.exit("Missing output file")
#=============================
# Function to replace a word in a string
# (keeping the alignment if possible)
def rep_word ( words, num, new ):
l = len(words[num])
words[num] = new.rjust(l)
#=============================
# Function to displace a molecule, matching an atom with reference
def displace ( mol1, mol2, at ):
disp = {}
disp["x"] = mol1[at]["x"]-mol2[at]["x"]
disp["y"] = mol1[at]["y"]-mol2[at]["y"]
disp["z"] = mol1[at]["z"]-mol2[at]["z"]
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = old[i]["x"]+disp["x"]
mol2[i]["y"] = old[i]["y"]+disp["y"]
mol2[i]["z"] = old[i]["z"]+disp["z"]
return
#=============================
# Function to superpose molecules
# see: Acta Chrystallogr. Sec. A 61 (2005), 478
# J. Comput. Chem. 31 (2010), 1561
def superpose ( mol1, mol2 ):
center1 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center1["x"] += mol1[i]["x"]
center1["y"] += mol1[i]["y"]
center1["z"] += mol1[i]["z"]
center1["x"] = center1["x"]/len(mol1)
center1["y"] = center1["y"]/len(mol1)
center1["z"] = center1["z"]/len(mol1)
for i in range(len(mol1)):
mol1[i]["x"] -= center1["x"]
mol1[i]["y"] -= center1["y"]
mol1[i]["z"] -= center1["z"]
G1 = 0
for i in range(len(mol1)):
G1 += mol1[i]["x"]**2+mol1[i]["y"]**2+mol1[i]["z"]**2
# only use first atoms of mol2 to superpose
center2 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center2["x"] += mol2[i]["x"]
center2["y"] += mol2[i]["y"]
center2["z"] += mol2[i]["z"]
center2["x"] = center2["x"]/len(mol1)
center2["y"] = center2["y"]/len(mol1)
center2["z"] = center2["z"]/len(mol1)
# but move the whole mol2
for i in range(len(mol2)):
mol2[i]["x"] -= center2["x"]
mol2[i]["y"] -= center2["y"]
mol2[i]["z"] -= center2["z"]
G2 = 0
for i in range(len(mol2)):
G2 += mol2[i]["x"]**2+mol2[i]["y"]**2+mol2[i]["z"]**2
M = {}
for i in ["x", "y", "z"]:
for j in ["x", "y", "z"]:
M[i+j] = 0
for k in range(len(mol1)):
M[i+j] += mol1[k][i] * mol2[k][j]
K = []
K.append( [ M["xx"]+M["yy"]+M["zz"], M["yz"]-M["zy"], M["zx"]-M["xz"], M["xy"]-M["yx"] ] )
K.append( [ M["yz"]-M["zy"], M["xx"]-M["yy"]-M["zz"], M["xy"]+M["yx"], M["xz"]+M["zx"] ] )
K.append( [ M["zx"]-M["xz"], M["xy"]+M["yx"], M["yy"]-M["xx"]-M["zz"], M["yz"]+M["zy"] ] )
K.append( [ M["xy"]-M["yx"], M["xz"]+M["zx"], M["yz"]+M["zy"], M["zz"]-M["xx"]-M["yy"] ] )
coef = []
D = (M["xy"]**2+M["xz"]**2-M["yx"]**2-M["zx"]**2)**2
E = (-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2-2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))*\
(-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2+2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))
F = (-(M["xz"]+M["zx"])*(M["yz"]-M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]+M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]+M["zz"]))
G = (-(M["xz"]+M["zx"])*(M["yz"]+M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]-M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]+M["zz"]))
H = ( (M["xy"]+M["yx"])*(M["yz"]+M["zy"])+(M["xz"]+M["zx"])*(M["xx"]-M["yy"]+M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]-M["zy"])+(M["xz"]+M["zx"])*(M["xx"]+M["yy"]+M["zz"]))
I = ( (M["xy"]+M["yx"])*(M["yz"]-M["zy"])+(M["xz"]-M["zx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]+M["zy"])+(M["xz"]-M["zx"])*(M["xx"]+M["yy"]-M["zz"]))
coef.append( D+E+F+G+H+I )
coef.append( -8.0*( M["xx"]*M["yy"]*M["zz"]+M["xy"]*M["yz"]*M["zx"]+M["xz"]*M["yx"]*M["zy"]
-M["xx"]*M["yz"]*M["zy"]-M["xy"]*M["yx"]*M["zz"]-M["xz"]*M["yy"]*M["zx"] ) )
coef.append( -2.0*( M["xx"]**2+M["xy"]**2+M["xz"]**2+M["yx"]**2+M["yy"]**2+M["yz"]**2+M["zx"]**2+M["zy"]**2+M["zz"]**2 ) )
coef.append( 0.0 )
coef.append( 1.0 )
root_old = 0.0
root = 0.5*(G1+G2)
while (math.fabs(root-root_old) > 1.0e-6):
root_old = root
P = root**4+coef[2]*root**2+coef[1]*root+coef[0]
dP = 4*root**3+2*coef[2]*root+coef[1]
root -= P/dP
for i in range(len(K)):
K[i][i] -= root
for i in range(len(K)):
vect = []
for j in range(len(K)):
adj = copy.deepcopy(K)
del adj[i]
for k in range(len(adj)):
del adj[k][j]
det = adj[0][0]*adj[1][1]*adj[2][2]+adj[0][1]*adj[1][2]*adj[2][0]+adj[0][2]*adj[1][0]*adj[2][1] \
-adj[0][0]*adj[1][2]*adj[2][1]-adj[0][1]*adj[1][0]*adj[2][2]-adj[0][2]*adj[1][1]*adj[2][0]
det *= (-1)**(i+j)
vect.append(det)
norm = math.sqrt(vect[0]**2+vect[1]**2+vect[2]**2+vect[3]**2)
if (norm > 1.0e-6):
vect[0] = -vect[0]/norm
vect[1] = vect[1]/norm
vect[2] = vect[2]/norm
vect[3] = vect[3]/norm
break
M["xx"] =vect[0]**2+vect[1]**2-vect[2]**2-vect[3]**2
M["yy"] =vect[0]**2-vect[1]**2+vect[2]**2-vect[3]**2
M["zz"] =vect[0]**2-vect[1]**2-vect[2]**2+vect[3]**2
M["xy"] =2.0*(vect[1]*vect[2]-vect[0]*vect[3])
M["yx"] =2.0*(vect[1]*vect[2]+vect[0]*vect[3])
M["yz"] =2.0*(vect[2]*vect[3]-vect[0]*vect[1])
M["zy"] =2.0*(vect[2]*vect[3]+vect[0]*vect[1])
M["zx"] =2.0*(vect[1]*vect[3]-vect[0]*vect[2])
M["xz"] =2.0*(vect[1]*vect[3]+vect[0]*vect[2])
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = M["xx"]*old[i]["x"]+M["xy"]*old[i]["y"]+M["xz"]*old[i]["z"]+center1["x"]
mol2[i]["y"] = M["yx"]*old[i]["x"]+M["yy"]*old[i]["y"]+M["yz"]*old[i]["z"]+center1["y"]
mol2[i]["z"] = M["zx"]*old[i]["x"]+M["zy"]*old[i]["y"]+M["zz"]*old[i]["z"]+center1["z"]
return
#=============================
# Read the system file
# Skip the file until the solute is found is found
file_system = open(system_input, "r")
for line in file_system:
if (re.match("Solute",line)):
break
# Skip name and number of molecules
file_system.next()
file_system.next()
# Read coordinates and charges
mol = []
num = int(file_system.next())
for i in range(num):
tmp = dict(zip(("x","y","z","q"),file_system.next().split()[4:8]))
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
tmp["q"] = float(tmp["q"])
mol.append(tmp)
file_system.close()
#=============================
# Read the topology file
# and write the modified charges
file_top = open(top_input, "r")
file_top_out = open(top_output, "w")
# Skip to the definition of the first molecule's atoms
for line in file_top:
file_top_out.write(line)
if (re.match("\[\s*atoms\s*\]",line)):
break
# Replace the 7th word (the charge) with the new charge
for i in range(num):
line = file_top.next()
# Skip comment lines
while (re.match("\s*;", line)):
file_top_out.write(line)
line = file_top.next()
words = re.findall("(\s*\S+)",line)
rep_word(words, 6, " "+str(mol[i]["q"]))
file_top_out.write("".join(words)+"\n")
# Copy the rest of the file unchanged
for line in file_top:
file_top_out.write(line)
file_top.close()
file_top_out.close()
#=============================
# Read the coordinates file
# and write the modified coordinates
coord_prec = "11.6"
veloc_prec = "11.7"
format_str = "%%5d%%5s%%5s%%5d%%%sf%%%sf%%%sf%%%sf%%%sf%%%sf\n" % (coord_prec, coord_prec, coord_prec, veloc_prec, veloc_prec, veloc_prec)
file_gro = open(gro_input, "r")
file_gro_out = open(gro_output, "w")
# First read the solute coordinates
file_gro.next()
file_gro.next()
mol_gro = []
for i in range(num):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["x"] = float(tmp["x"])*10
tmp["y"] = float(tmp["y"])*10
tmp["z"] = float(tmp["z"])*10
mol_gro.append(tmp)
# Modify the input coordinates to fit the original orientation
superpose ( mol_gro, mol )
# Back to the top of the file
file_gro.seek(0)
# Copy title and total number of atoms
file_gro_out.write(file_gro.next())
numtot = int(file_gro.next())
file_gro_out.write("%5d\n" % numtot)
# Read the atom coordinates and velocities
for i in range(numtot):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z","vx","vy","vz"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["resnum"] = int(line[0:5])
tmp["resname"] = line[5:10]
tmp["atname"] = line[10:15]
tmp["atnum"] = int(line[15:20])
# For the solute, write the new coordinates, in nm
if (i < num):
tmp["x"] = 0.1*mol[i]["x"]
tmp["y"] = 0.1*mol[i]["y"]
tmp["z"] = 0.1*mol[i]["z"]
else:
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
# Write the velocities if present
if "vx" in tmp:
tmp["vx"] = float(tmp["vx"])
tmp["vy"] = float(tmp["vy"])
tmp["vz"] = float(tmp["vz"])
else:
tmp["vx"] = 0.0
tmp["vy"] = 0.0
tmp["vz"] = 0.0
file_gro_out.write(format_str % \
(tmp["resnum"], tmp["resname"], tmp["atname"], tmp["atnum"], tmp["x"], tmp["y"], tmp["z"], tmp["vx"], tmp["vy"], tmp["vz"]))
# Copy the cell tensor
file_gro_out.write(file_gro.next())
file_gro.close()
file_gro_out.close()
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport cv2\nimport time\nfrom colormath.color_diff import delta_e_cie2000\nfrom colormath.color_objects import LabColor, sRGBColor\nfrom colormath.color_conversions import convert_color\n\ndef... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport cv2\nimport time\nfrom colormath.color_diff import delta_e_cie2000\nfrom colormath.color_objects import LabColor, sRGBColor\nfrom colormath.color_conversions import conv... | ```python
#!/usr/bin/env python
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import time
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
def nothing(*arg):
pass
def is_allowed_color(cur_int, avg_int, m_val):
b = abs(cur_int[0] - avg_int[0])
g = abs(cur_int[1] - avg_int[1])
r = abs(cur_int[2] - avg_int[2])
if (b > m_val or g > m_val or r > m_val):
return True
else:
return False
def make_gt_val(val, min_val):
if val < min_val:
val = min_val
return val
def make_odd(val):
if val % 2 == 0:
val += 1
return val
def get_avg_bgr(in_img, in_cntrs):
ttlA = 0
sum_roiA_mean = (0, 0, 0)
avg_roiA_mean = (0, 0, 0)
ttlA = len(in_cntrs)
for cnt2 in in_cntrs:
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
roiA = in_img[y:y2+w2, x:x2+h2]
roiA_mean = cv2.mean(roiA)
int_roiA_mean = (int(roiA_mean[0]), int(roiA_mean[1]), int(roiA_mean[2]))
sum_roiA_mean = (int_roiA_mean[0] + sum_roiA_mean[0], int_roiA_mean[1] + sum_roiA_mean[1], int_roiA_mean[2] + sum_roiA_mean[2])
if ttlA > 0:
avg_roiA_mean = (sum_roiA_mean[0]/ttlA, sum_roiA_mean[1]/ttlA, sum_roiA_mean[2]/ttlA)
return avg_roiA_mean
window_nm = 'img_cntrls'
cam_res_w = 640
cam_res_h = 480
cam_fr_rt = 32
cv2.namedWindow(window_nm)
cv2.createTrackbar('blur_size', window_nm, 7 , 21, nothing)
cv2.createTrackbar('canny_min', window_nm, 156, 255, nothing)
cv2.createTrackbar('thresh_min', window_nm, 7 , 255, nothing)
cv2.createTrackbar('min_area', window_nm, 5 , 2000, nothing)
cv2.createTrackbar('max_area', window_nm, 40000 , 90000, nothing)
cv2.createTrackbar('max_delta', window_nm, 20 , 100, nothing)
cv2.createTrackbar('get_avg', window_nm, 0 , 1, nothing)
cv2.createTrackbar('get_mode', window_nm, 0, 7, nothing)
camera = PiCamera()
camera.resolution = (cam_res_w, cam_res_h)
camera.framerate = cam_fr_rt
rawCapture = PiRGBArray(camera, size=(cam_res_w, cam_res_h))
time.sleep(0.2)
avg_roi_mean = (0, 0, 0) #b, g, r
delta_color = 000.0000
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
#############################################
### GET THE CURRENT FRAME FROM THE CAMERA ###
#############################################
im = frame.array
im_raw = im #keep a copy in case we want to look at it later
####################
### GET SETTINGS ###
####################
s = cv2.getTrackbarPos('get_avg', window_nm)
blur_size = cv2.getTrackbarPos('blur_size',window_nm)
canny_min = cv2.getTrackbarPos('canny_min',window_nm)
thresh_min = cv2.getTrackbarPos('thresh_min',window_nm)
min_area = cv2.getTrackbarPos('min_area',window_nm)
max_area = cv2.getTrackbarPos('max_area',window_nm)
max_delta = cv2.getTrackbarPos('max_delta',window_nm)
mode = cv2.getTrackbarPos('get_mode', window_nm)
############################
### ENSURE CORRECT VALUE ###
############################
blur_size = make_odd(blur_size)
blur_size = make_gt_val(blur_size, 0)
thresh_min = make_odd(thresh_min)
thresh_min = make_gt_val(thresh_min, 0)
########################################################
### START IMAGE PROCESSING TO FIND OBJECTS IN RANGE ###
########################################################
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(imgray, (blur_size, blur_size))
#edged = cv2.Canny(blur, canny_min, 255)
ret3, thresh = cv2.threshold(blur, thresh_min, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
###S = 1 means get an average of the overall RGB picture
if s == 1:
blur_size == 0
thresh_size = 1
min_area = 0
ovr_avg = get_avg_bgr(im, contours)
avg_roi_mean = ovr_avg
print avg_roi_mean
cv2.setTrackbarPos('get_avg', window_nm, 0)
else:
ttl_area = 0
ttl_cntrs = len(contours)
ttl_color = 0
sum_roi_mean = (0, 0, 0)
for cnt in contours:
a = cv2.contourArea(cnt)
### DO WE HAVE SOMETHING IN THE RIGHT SIZE (NO NEED TO PICK UP CARS) ###
if min_area < a < max_area:
ttl_area += 1
x, y, h, w = cv2.boundingRect(cnt)
roi = im[y:y+h, x:x+w]
roi_mean = cv2.mean(roi)
int_roi_mean = (int(roi_mean[0]), int(roi_mean[1]), int(roi_mean[2]))
b, g, r = avg_roi_mean
bckgrnd_lab = convert_color(sRGBColor(r, g, b), LabColor)
contColor_lab = convert_color(sRGBColor(roi_mean[2],roi_mean[1], roi_mean[0]), LabColor)
delta_color = round(delta_e_cie2000(bckgrnd_lab, contColor_lab),1)
if delta_color >= max_delta:
# if is_allowed_color(int_roi_mean, avg_roi_mean, max_dev):
cv2.rectangle(im, (x, y), (x+h, y+w), int_roi_mean, 2)
ttl_color += 1
strLoc = str(x) + ',' + str(y) + ':' + str(delta_color)
cv2.putText(im, strLoc, (x,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,0,0), 1)
strTTL = str(ttl_cntrs) + ' - ' + str(ttl_area) + ' - ' + str(ttl_color)
cv2.putText(im, str(strTTL), (20,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2)
cv2.putText(im, str(avg_roi_mean), (20, cam_res_h - 20) ,cv2.FONT_HERSHEY_PLAIN, 2.0, avg_roi_mean, 2)
if mode == 0:
cv2.imshow('imgview', im_raw)
print 'Raw image view'
elif mode == 1:
cv2.imshow('imgview', imgray)
print 'Grayscale view'
elif mode == 2:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 3:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 4:
cv2.imshow('imgview', thresh)
print 'Threshold view'
else:
cv2.imshow('imgview', im)
print 'Contour overlay on raw view'
ch = cv2.waitKey(5)
rawCapture.truncate(0)
if ch == 27:
break
cv2.destroyAllWindows()
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python3\n\n# This file is part of the MicroPython project, http://micropython.org/\n# The MIT License (MIT)\n# Copyright (c) 2019 Damien P. George\n\nimport os\nimport subprocess\nimport sys\nimport argparse\n\nsys.path.append('../tools')\nimpo... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\n# This file is part of the MicroPython project, http://micropython.org/\n# The MIT License (MIT)\n# Copyright (c) 2019 Damien P. George\n\nimport os\nimport subprocess\nimport sys\nimport argparse\n\nsys.path.append('... | ```python
#!/usr/bin/env python3
# This file is part of the MicroPython project, http://micropython.org/
# The MIT License (MIT)
# Copyright (c) 2019 Damien P. George
import os
import subprocess
import sys
import argparse
sys.path.append('../tools')
import pyboard
# Paths for host executables
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython_coverage')
NATMOD_EXAMPLE_DIR = '../examples/natmod/'
# Supported tests and their corresponding mpy module
TEST_MAPPINGS = {
'btree': 'btree/btree_$(ARCH).mpy',
'framebuf': 'framebuf/framebuf_$(ARCH).mpy',
'uheapq': 'uheapq/uheapq_$(ARCH).mpy',
'urandom': 'urandom/urandom_$(ARCH).mpy',
'ure': 'ure/ure_$(ARCH).mpy',
'uzlib': 'uzlib/uzlib_$(ARCH).mpy',
}
# Code to allow a target MicroPython to import an .mpy from RAM
injected_import_hook_code = """\
import sys, uos, uio
class __File(uio.IOBase):
def __init__(self):
self.off = 0
def ioctl(self, request, arg):
return 0
def readinto(self, buf):
buf[:] = memoryview(__buf)[self.off:self.off + len(buf)]
self.off += len(buf)
return len(buf)
class __FS:
def mount(self, readonly, mkfs):
pass
def chdir(self, path):
pass
def stat(self, path):
if path == '__injected.mpy':
return tuple(0 for _ in range(10))
else:
raise OSError(-2) # ENOENT
def open(self, path, mode):
return __File()
uos.mount(__FS(), '/__remote')
uos.chdir('/__remote')
sys.modules['{}'] = __import__('__injected')
"""
class TargetSubprocess:
def __init__(self, cmd):
self.cmd = cmd
def close(self):
pass
def run_script(self, script):
try:
p = subprocess.run(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script)
return p.stdout, None
except subprocess.CalledProcessError as er:
return b'', er
class TargetPyboard:
def __init__(self, pyb):
self.pyb = pyb
self.pyb.enter_raw_repl()
def close(self):
self.pyb.exit_raw_repl()
self.pyb.close()
def run_script(self, script):
try:
self.pyb.enter_raw_repl()
output = self.pyb.exec_(script)
output = output.replace(b'\r\n', b'\n')
return output, None
except pyboard.PyboardError as er:
return b'', er
def run_tests(target_truth, target, args, stats):
for test_file in args.files:
# Find supported test
for k, v in TEST_MAPPINGS.items():
if test_file.find(k) != -1:
test_module = k
test_mpy = v.replace('$(ARCH)', args.arch)
break
else:
print('---- {} - no matching mpy'.format(test_file))
continue
# Read test script
with open(test_file, 'rb') as f:
test_file_data = f.read()
# Create full test with embedded .mpy
try:
with open(NATMOD_EXAMPLE_DIR + test_mpy, 'rb') as f:
test_script = b'__buf=' + bytes(repr(f.read()), 'ascii') + b'\n'
except OSError:
print('---- {} - mpy file not compiled'.format(test_file))
continue
test_script += bytes(injected_import_hook_code.format(test_module), 'ascii')
test_script += test_file_data
# Run test under MicroPython
result_out, error = target.run_script(test_script)
# Work out result of test
extra = ''
if error is None and result_out == b'SKIP\n':
result = 'SKIP'
elif error is not None:
result = 'FAIL'
extra = ' - ' + str(error)
else:
# Check result against truth
try:
with open(test_file + '.exp', 'rb') as f:
result_exp = f.read()
error = None
except OSError:
result_exp, error = target_truth.run_script(test_file_data)
if error is not None:
result = 'TRUTH FAIL'
elif result_out != result_exp:
result = 'FAIL'
print(result_out)
else:
result = 'pass'
# Accumulate statistics
stats['total'] += 1
if result == 'pass':
stats['pass'] += 1
elif result == 'SKIP':
stats['skip'] += 1
else:
stats['fail'] += 1
# Print result
print('{:4} {}{}'.format(result, test_file, extra))
def main():
cmd_parser = argparse.ArgumentParser(description='Run dynamic-native-module tests under MicroPython')
cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py')
cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py')
cmd_parser.add_argument('-a', '--arch', default='x64', help='native architecture of the target')
cmd_parser.add_argument('files', nargs='*', help='input test files')
args = cmd_parser.parse_args()
target_truth = TargetSubprocess([CPYTHON3])
if args.pyboard:
target = TargetPyboard(pyboard.Pyboard(args.device))
else:
target = TargetSubprocess([MICROPYTHON])
stats = {'total': 0, 'pass': 0, 'fail':0, 'skip': 0}
run_tests(target_truth, target, args, stats)
target.close()
target_truth.close()
print('{} tests performed'.format(stats['total']))
print('{} tests passed'.format(stats['pass']))
if stats['fail']:
print('{} tests failed'.format(stats['fail']))
if stats['skip']:
print('{} tests skipped'.format(stats['skip']))
if stats['fail']:
sys.exit(1)
if __name__ == "__main__":
main()
``` |
[
{
"content": "```python\n\"\"\"\n Defines the routing endpoints of the RESTful API for databayes.\n\n Each method corresponds to an API action and returns the status of the action and the output. This\n layer handles communication to the databayes daemon.\n\n IMPORTANT NOTE! - Only one of these ser... | [
{
"content": "<|memory_start|>```python\n\"\"\"\n Defines the routing endpoints of the RESTful API for databayes.\n\n Each method corresponds to an API action and returns the status of the action and the output. This\n layer handles communication to the databayes daemon.\n\n IMPORTANT NOTE! - Only ... | ```python
"""
Defines the routing endpoints of the RESTful API for databayes.
Each method corresponds to an API action and returns the status of the action and the output. This
layer handles communication to the databayes daemon.
IMPORTANT NOTE! - Only one of these server instances should be running to avoid race conditions
"""
from databayes_api import app, log, redisio, config, \
gen_queue_id, exists_queue_item
import json, time
from flask import render_template, redirect, url_for, \
request, escape, flash, g, session, Response
ERR_MSG_BADLY_FORMED_REQUEST = 'Malformed request, try again'
# UTILITY METHODS
def handle_queue_validation():
"""
Method for handling queue validation in the view logic
:return:
"""
qid = str(gen_queue_id())
iterations = 0
while exists_queue_item(qid):
if iterations == config.REDIS_QUEUE_COUNTER_MAX:
return -1 # Indicates failure
qid = str(gen_queue_id())
iterations += 1
return str(qid)
def unpack_query_params(request):
"""
Helper method to fetch query paramaters for command requests
:param request:
:return:
"""
ret = dict()
ret['ok'] = True
ret['types'] = []
ret['fields'] = []
ret['fields1'] = []
ret['fields2'] = []
ret['values1'] = []
ret['values2'] = []
ret['message'] = ''
ret['fields'] = request.args.get('fields').split(',') \
if request.args.get('fields') else []
ret['types'] = request.args.get('types').split(',') \
if request.args.get('fields') else []
ret['fields1'] = request.args.get('fields1').split(',') \
if request.args.get('fields1') else []
ret['fields2'] = request.args.get('fields2').split(',') \
if request.args.get('fields2') else []
ret['values1'] = request.args.get('values1').split(',') \
if request.args.get('values1') else []
ret['values2'] = request.args.get('values2').split(',') \
if request.args.get('values2') else []
if len(ret['fields']) != len(ret['types']) or \
len(ret['fields1']) != len(ret['values1']) or \
len(ret['fields2']) != len(ret['values2']):
ret['ok'] = False
ret['message'] = 'Count of fields and types or values do not match'
return ret
def wait_for_response(qid, poll_frequency=10.0, max_tries=5):
"""
Handles polling a response from the redis queue determined by id. Returns
an empty response if it never arrives.
:param qid: int redis queue id
:param poll_frequency: int millisecond frequency of a poll
:param max_tries: int poll no more times than this
:return: string response written to redis from the daemon
"""
rsp = ""
for i in xrange(max_tries):
rsp = redisio.DataIORedis().read(config.DBY_RSP_QUEUE_PREFIX + qid)
if rsp: # got response, stop polling
break
time.sleep(float(poll_frequency) / 1000.0)
return rsp
# --- VIEW METHODS ---
# ====================
def get_arg_str(fields, values, delimiter):
"""
Synthesizes argument strings for entity attributes for databayes. Length
of fields and values must be equal.
:param fields: list of field names
:param values: list of field values
:param delimeter: str, relevant delimeter
:return: argument string
"""
items = []
for i in xrange(len(fields)):
items.append(str(fields[i]) + str(delimiter) + str(values[i]))
return ",".join(items)
def view_switch(view, args):
"""
General method which implements view logic
:param view: str, view to construct a response for
:param args: view arguments passed along
:return: text response from databayes or error
"""
log.debug('Processing view: "{0}"'.format(view))
log.debug('Processing args: "{0}"'.format(str(args)))
query_param_obj = unpack_query_params(request)
if (not query_param_obj['ok']):
return Response(json.dumps([query_param_obj['message']]),
mimetype='application/json')
# Retrieve a valid queue item
qid = handle_queue_validation()
if qid == -1:
return Response(json.dumps(['Queue is full, try again later.']),
mimetype='application/json')
# Construct command
cmd = ""
if view == 'define_entity':
if 'values' in query_param_obj.keys() and \
'fields' in query_param_obj.keys():
arg_str = get_arg_str(query_param_obj['fields'],
query_param_obj['values'], '_')
else:
arg_str = ""
log.info('Warning: entity has no attributes')
cmd = 'def {0}({1})'.format(args['entity'], arg_str) \
if arg_str else 'def ' + str(args['entity'])
elif view == 'add_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'add rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
elif view == 'generate':
pass
elif view == 'list_entity':
cmd = 'lst ent {0}'.format(args['pattern'])
elif view == 'list_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'lst rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
elif view == 'remove_entity':
cmd = 'rm ent {0}'.format(args['entity'])
elif view == 'remove_relation':
arg_str_1 = get_arg_str(query_param_obj['fields1'], query_param_obj['values1'], '=')
arg_str_2 = get_arg_str(query_param_obj['fields2'], query_param_obj['values2'], '=')
cmd = 'rm rel {0}({1}) {2}({3})'.format(args['entity_1'], arg_str_1,
args['entity_2'], arg_str_2)
log.info('sending command: "{0}"'.format(cmd))
# Send cmd to databayes daemon
redisio.DataIORedis().connect()
redisio.DataIORedis().write(config.DBY_CMD_QUEUE_PREFIX + qid, cmd)
# check response
rsp = wait_for_response(qid)
if not rsp:
rsp = "Could not find response before max retires expired."
return rsp
def home(entity):
"""
Defines web interface to the tool and help.
"""
# TODO - add content here, primarily an interface to an instance
# run on rackspace host
return Response("Welcome to databayes!",
mimetype='application/json')
def version(entity):
"""
Basic version info for databayes
"""
return Response("databayes v1. 2015. Ryan Faulkner",
mimetype='application/json')
def define_entity(entity):
"""
Handles remote requests to databayes for entity definition
Translation: def e(<f1>_<t1>, <f2>_<t2>, ...) ->
/def/e?fields=f1,f2,...&types=t1,t2,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('define_entity', {'entity': entity})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def add_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for adding relations
Translation: add rel e1(<f1_1>_<v1_1>,...) e2(<f2_1>_<v2_1>,...) ->
/add/rel/e1/e2?fields1=f1_1,...&types1=t1_1,...
&fields2=f2_1,...&types2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch(
'add_relation', {'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def generate(entity_1, entity_2):
"""
Handles remote requests to databayes for generating samples
Translation: gen e1(<f1_1>_<v1_1>,...) constrain e2(<f2_1>_<v2_1>,...) ->
/gen/e1/e2?fields1=f1_1,...&types1=t1_1,...&fields2=f2_1,...&types2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('generate',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def list_entity(pattern):
"""
Handles remote requests to databayes for listing entities
Translation: lst ent regex -> /lst/ent/regex
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('list_entity', {'pattern': pattern})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def list_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for listing relations
Translation: lst rel regex1 regex2 -> /lst/ent/regex1/regex2
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('list_relation',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def remove_entity(entity):
"""
Handles remote requests to databayes for removing entities
Translation: rm ent e -> /rm/ent/e
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps([view_switch('remove_entity', {'entity': entity})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
def remove_relation(entity_1, entity_2):
"""
Handles remote requests to databayes for removing relations
Translation: rm rel e1(<f1_1>_<v1_1>,...) e2(<f2_1>_<v2_1>,...)
-> /rm/rel/e1/e2?fields1=f1_1,...&values1=t1_1,...&fields2=f2_1,
...&values2=t2_1,...
:return: JSON response indicating status of action & output
"""
try:
return Response(
json.dumps(
[view_switch('remove_relation',
{'entity_1': entity_1, 'entity_2': entity_2})]),
mimetype='application/json')
except Exception as e:
log.error(e.message)
return Response(
json.dumps([ERR_MSG_BADLY_FORMED_REQUEST]),
mimetype='application/json')
# Stores view references in structure
view_list = {
home.__name__: home,
version.__name__: version,
define_entity.__name__: define_entity,
add_relation.__name__: add_relation,
generate.__name__: generate,
list_entity.__name__: list_entity,
list_relation.__name__: list_relation,
remove_entity.__name__: remove_entity,
remove_relation.__name__: remove_relation,
}
route_deco = {
home.__name__: app.route('/', methods=['GET']),
version.__name__: app.route('/v', methods=['GET']),
define_entity.__name__: app.route('/def/<entity>', methods=['GET', 'POST']),
add_relation.__name__: app.route('/add/<entity_1>/<entity_2>', methods=['GET', 'POST']),
generate.__name__: app.route('/gen', methods=['GET', 'POST']),
list_entity.__name__: app.route('/lst/ent/<pattern>', methods=['GET', 'POST']),
list_relation.__name__: app.route('/lst/rel/<pattern_1>/<pattern_2>', methods=['GET', 'POST']),
remove_entity.__name__: app.route('/rm/ent/<entity>', methods=['GET', 'POST']),
remove_relation.__name__: app.route('/rm/rel/<entity_1>/<entity_2>', methods=['GET', 'POST']),
}
# Apply decorators to views
def init_views():
for key in route_deco:
log.info('Registering view - {0}'.format(key))
route = route_deco[key]
view_method = view_list[key]
view_list[key] = route(view_method)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n\"\"\"\nThis module contains a collection of commonly encountered HTTP exceptions.\n\nThis allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.\n\"\"\"\n\nfrom err... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n\"\"\"\nThis module contains a collection of commonly encountered HTTP exceptions.\n\nThis allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.\n\"... | ```python
"""
This module contains a collection of commonly encountered HTTP exceptions.
This allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.
"""
from errors import ErrorMessage
__author__ = "Benjamin Schubert <ben.c.schubert@gmail.com>"
class BaseHTTPException(Exception):
"""
This is the base HTTP Exception.
It should not be used as is, as it signifies that the server had an unexpected error.
"""
status_code = 500 # type: int
def __init__(self, payload: ErrorMessage = None, status_code: int = None):
"""
Create a new `BaseHTTPException`.
:param payload: payload to send to explain the error to the user.
:param status_code: HTTP status code to send. If not given, will fallback to `self.status_code`.
"""
super().__init__(self)
if payload is None:
payload = dict()
self.payload = payload
if status_code is not None:
self.status_code = status_code
class ForbiddenException(BaseHTTPException):
def __init__(self):
super().__init__({}, 401)
class BadRequestException(BaseHTTPException):
"""This is an exception to throw to return a 400 BAD REQUEST to the user."""
def __init__(self, payload: ErrorMessage):
"""
Create a new `BadRequestException`.
:param payload: payload to send to explain the error to the user.
"""
super().__init__(payload, 400)
class NotFoundException(BaseHTTPException):
"""This is an exception to throw to return a 404 NOT FOUND to the user."""
def __init__(self):
"""Create a new `NotFoundException`."""
super().__init__(None, 404)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n# Copyright 2014 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/li... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2014 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://w... | ```python
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
"""
Time calculations functions
We're mostly using oslo.utils for time calculations but we're encapsulating it
to ease maintenance in case of library modifications.
"""
import calendar
import datetime
from oslo_utils import timeutils
from dateutil import tz
def dt2ts(orig_dt):
"""Translate a datetime into a timestamp."""
return calendar.timegm(orig_dt.timetuple())
def iso2dt(iso_date):
"""iso8601 format to datetime."""
iso_dt = timeutils.parse_isotime(iso_date)
trans_dt = timeutils.normalize_time(iso_dt)
return trans_dt
def ts2dt(timestamp):
"""timestamp to datetime format."""
if not isinstance(timestamp, float):
timestamp = float(timestamp)
return datetime.datetime.utcfromtimestamp(timestamp)
def ts2iso(timestamp):
"""timestamp to is8601 format."""
if not isinstance(timestamp, float):
timestamp = float(timestamp)
return timeutils.iso8601_from_timestamp(timestamp)
def dt2iso(orig_dt):
"""datetime to is8601 format."""
return timeutils.isotime(orig_dt)
def utcnow():
"""Returns a datetime for the current utc time."""
return timeutils.utcnow()
def utcnow_ts():
"""Returns a timestamp for the current utc time."""
return timeutils.utcnow_ts()
def get_month_days(dt):
return calendar.monthrange(dt.year, dt.month)[1]
def add_days(base_dt, days, stay_on_month=True):
if stay_on_month:
max_days = get_month_days(base_dt)
if days > max_days:
return get_month_end(base_dt)
return base_dt + datetime.timedelta(days=days)
def add_month(dt, stay_on_month=True):
next_month = get_next_month(dt)
return add_days(next_month, dt.day, stay_on_month)
def sub_month(dt, stay_on_month=True):
prev_month = get_last_month(dt)
return add_days(prev_month, dt.day, stay_on_month)
def get_month_start(dt=None):
if not dt:
dt = utcnow()
month_start = datetime.datetime(dt.year, dt.month, 1)
return month_start
def get_month_start_timestamp(dt=None):
return dt2ts(get_month_start(dt))
def get_month_end(dt=None):
month_start = get_month_start(dt)
days_of_month = get_month_days(month_start)
month_end = month_start.replace(day=days_of_month)
return month_end
def get_last_month(dt=None):
if not dt:
dt = utcnow()
month_end = get_month_start(dt) - datetime.timedelta(days=1)
return get_month_start(month_end)
def get_next_month(dt=None):
month_end = get_month_end(dt)
next_month = month_end + datetime.timedelta(days=1)
return next_month
def get_next_month_timestamp(dt=None):
return dt2ts(get_next_month(dt))
def get_billing_month_period(billing_month_start=None,
billing_month=None):
year_month = billing_month.split('-')
year = int(year_month[0])
month = int(year_month[1])
month_days = get_month_days(datetime.datetime(year, month, 1))
billing_month_start = \
month_days if month_days < billing_month_start else billing_month_start
start_dt = datetime.datetime(year, month, billing_month_start)
end_dt = \
get_next_month(start_dt)+datetime.timedelta(days=billing_month_start-1)
return (start_dt, end_dt)
def local2utc(dt=None):
if not dt:
dt = datetime.datetime.now()
from_zone = tz.gettz(datetime.datetime.now(tz.tzlocal()).tzname())
to_zone = tz.gettz('UTC')
local = dt.replace(tzinfo=from_zone)
utc = local.astimezone(to_zone)
return datetime.datetime.strftime(utc, "%Y-%m-%d %H:%M:%S")
def utc2local(dt=None):
if not dt:
dt = utcnow()
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(datetime.datetime.now(tz.tzlocal()).tzname())
utc = dt.replace(tzinfo=from_zone)
local = utc.astimezone(to_zone)
return datetime.datetime.strftime(local, "%Y-%m-%d %H:%M:%S")
def reformat_month_style(billing_month):
'''From 2015-9 to 201509'''
year_month = billing_month.split('-')
year = int(year_month[0])
month = int(year_month[1])
return '%s0%s' % (year, month) if month < 10 else '%s%s' % (year, month)
def access_monthly_billing_table(billing_month_start, billing_month):
begin, end = get_billing_month_period(billing_month_start, billing_month)
return True if datetime.datetime.now() > end else False
def get_past_days(days=1):
dt = utcnow()
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(datetime.datetime.now(tz.tzlocal()).tzname())
utc = dt.replace(tzinfo=from_zone)
local = utc.astimezone(to_zone)
past_days = []
for i in range(days):
date_time = local-datetime.timedelta(days=i+1)
past_days.append('%s-%s-%s' % (date_time.year,
date_time.month, date_time.day))
past_days.reverse()
return past_days
def get_past_months(months=1):
dt = utcnow()
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(datetime.datetime.now(tz.tzlocal()).tzname())
utc = dt.replace(tzinfo=from_zone)
base_dt = utc.astimezone(to_zone)
past_months = []
for i in range(months):
month_end = \
get_month_start(base_dt) - datetime.timedelta(days=1)
past_months.append('%s-%s' % (month_end.year,
month_end.month))
base_dt = month_end
past_months.reverse()
return past_months
def month2str(billing_month):
month_filters = []
year_month = billing_month.split('-')
year = int(year_month[0])
month = int(year_month[1])
for before in range(1,month+1):
str = '%s0%s' % (year, before) if before < 10 else '%s%s' % (year, before)
month_filters.append(str)
return month_filters
def get_billing_month(billing_month_start, dt=None):
billing_day_dt = datetime.datetime.strptime(utc2local(dt), "%Y-%m-%d %H:%M:%S")
billing_month = '%s-%s' % (billing_day_dt.year, billing_day_dt.month)
if billing_day_dt.day < billing_month_start:
billing_month = '%s-%s' % (
get_last_month(
billing_day_dt).year,
get_last_month(
billing_day_dt).month)
return billing_month
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# -*- coding: utf-8 -*-\n\nfrom fissure_Coude import fissure_Coude\n\nclass fissure_Coude_4(fissure_Coude):\n \"\"\"\n probleme de fissure du Coude : ASCOU09A\n adaptation maillage\n \"\"\"\n\n# ------------... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nfrom fissure_Coude import fissure_Coude\n\nclass fissure_Coude_4(fissure_Coude):\n \"\"\"\n probleme de fissure du Coude : ASCOU09A\n adaptation maillage\n \"\"\"\n... | ```python
# -*- coding: utf-8 -*-
from fissure_Coude import fissure_Coude
class fissure_Coude_4(fissure_Coude):
"""
probleme de fissure du Coude : ASCOU09A
adaptation maillage
"""
# ---------------------------------------------------------------------------
def setParamGeometrieSaine(self):
"""
Paramètres géométriques du tuyau coudé sain:
angleCoude
r_cintr
l_tube_p1
l_tube_p2
epais
de
"""
self.geomParams = dict(angleCoude = 40,
r_cintr = 654,
l_tube_p1 = 1700,
l_tube_p2 = 1700,
epais = 62.5,
de = 912.4)
# ---------------------------------------------------------------------------
def setParamMaillageSain(self):
self.meshParams = dict(n_long_p1 = 16,
n_ep = 5,
n_long_coude = 30,
n_circ_g = 50,
n_circ_d = 20,
n_long_p2 = 12)
# ---------------------------------------------------------------------------
def setParamShapeFissure(self):
"""
paramètres de la fissure
profondeur : 0 < profondeur <= épaisseur
azimut : entre 0 et 360°
alpha : 0 < alpha < angleCoude
longueur : <=2*profondeur ==> ellipse, >2*profondeur = fissure longue
orientation : 0° : longitudinale, 90° : circonférentielle, autre : uniquement fissures elliptiques
externe : True : fissure face externe, False : fissure face interne
"""
print "setParamShapeFissure", self.nomCas
self.shapeFissureParams = dict(nomRep = '.',
nomFicSain = self.nomCas,
nomFicFissure = 'fissure_' + self.nomCas,
profondeur = 10,
azimut = 90,
alpha = 20,
longueur = 240,
orientation = 90,
lgInfluence = 30,
elliptique = False,
convexe = True,
externe = True)
# ---------------------------------------------------------------------------
def setReferencesMaillageFissure(self):
self.referencesMaillageFissure = dict(Entity_Quad_Pyramid = 948,
Entity_Quad_Triangle = 1562,
Entity_Quad_Edge = 1192,
Entity_Quad_Penta = 732,
Entity_Quad_Hexa = 22208,
Entity_Node = 133418,
Entity_Quad_Tetra = 18759,
Entity_Quad_Quadrangle = 11852)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General... | ```python
# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from helpers.command import Command
from helpers.textutils import gen_creffett
@Command(['creffett', 'rage'], ['nick', 'target', 'ignore', 'do_kick', 'botnick', 'name'])
def cmd(send, msg, args):
"""RAGE!!!
Syntax: {command} <text>
"""
if args['name'] == 'creffett':
if not args['nick'].startswith('creffett') and args['nick'] != args['botnick']:
send("You're not creffett!")
args['ignore'](args['nick'])
if args['target'] != 'private':
args['do_kick'](args['target'], args['nick'], 'creffett impersonation')
return
if not msg:
send("Rage about what?")
return
# c.send_raw("MODE %s -c" % CHANNEL)
send(gen_creffett(msg))
# c.send_raw("MODE %s +c" % CHANNEL)
send('</rage>')
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nclass NumMatrix(object):\n\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n m, n = len(matrix), len(matrix[0] if matrix else [])\n self._sum = [[0] * n for i in xrange(m)... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nclass NumMatrix(object):\n\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n m, n = len(matrix), len(matrix[0] if matrix else [])\n self._sum = [[0] * n fo... | ```python
class NumMatrix(object):
def __init__(self, matrix):
"""
:type matrix: List[List[int]]
"""
m, n = len(matrix), len(matrix[0] if matrix else [])
self._sum = [[0] * n for i in xrange(m)]
col_sum = [0] * n
for i in xrange(m):
s = 0
for j in xrange(n):
col_sum[j] += matrix[i][j]
s += col_sum[j]
self._sum[i][j] = s
def sumRegion(self, row1, col1, row2, col2):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
m, n = len(self._sum), len(self._sum[0] if self._sum else [])
if not (0 <= row1 <= row2 < m and 0 <= col1 <= col2 < n):
return 0
s = self._sum[row2][col2]
if row1 > 0:
s -= self._sum[row1-1][col2]
if col1 > 0:
s -= self._sum[row2][col1-1]
if row1 > 0 and col1 > 0:
s += self._sum[row1-1][col1-1]
return s
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
``` |
[
{
"content": "```python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom pprint import pprint\nimport time\n\nfrom utils import pairwise_distances, batchify\nfrom config import opt, data, loaders\n\nclass SimpleClassifi... | [
{
"content": "<|memory_start|>```python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom pprint import pprint\nimport time\n\nfrom utils import pairwise_distances, batchify\nfrom config import opt, data, loaders\n\nclas... | ```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from pprint import pprint
import time
from utils import pairwise_distances, batchify
from config import opt, data, loaders
class SimpleClassifier(nn.Module):
def __init__(self):
super(SimpleClassifier, self).__init__()
self.input_size = 64
# TODO params
self.hidden_size = 256
self.output_size = 10
self.relu = nn.ReLU()
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.fc3 = nn.Linear(self.hidden_size, self.output_size)
self.reset()
if opt.cuda:
self.cuda()
def reset(self):
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
def forward(self, inp):
if opt.cuda:
inp = inp.cuda()
output = self.fc1(inp)
output = self.relu(output)
output = self.fc3(output)
return output
def train_model(self, train_data, epochs):
optimizer = optim.Adadelta(self.parameters(), 0.1)
criterion = nn.CrossEntropyLoss()
self.train()
size = len(train_data[0])
if size > 0:
for e in range(epochs):
avg_loss = 0
corrects = 0
for i, (features, targets) in enumerate(batchify(train_data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features, targets = features.cuda(), targets.cuda()
output = self.forward(features)
optimizer.zero_grad()
loss = criterion(output, targets)
loss.backward()
optimizer.step()
avg_loss += loss.item()
corrects += (torch.max(output, 1)
[1].view(targets.size()) == targets).sum()
avg_loss = avg_loss / opt.batch_size
accuracy = 100.0 * corrects / size
def predict_prob(self, inp):
with torch.no_grad():
output = self.forward(inp)
output = torch.nn.functional.softmax(output, dim=1)
return output
def validate(self, data):
corrects, avg_loss = 0, 0
with torch.no_grad():
for i, (features, targets) in enumerate(batchify(data)):
features = torch.FloatTensor(features)
targets = torch.LongTensor(targets)
if opt.cuda:
features = features.cuda()
targets = targets.cuda()
logit = self.forward(features)
loss = torch.nn.functional.cross_entropy(logit, targets, size_average=False)
avg_loss += loss.item()
corrects += (torch.max(logit, 1)[1].view(targets.size()) == targets).sum()
size = len(data[0])
avg_loss = avg_loss / size
accuracy = 100.0 * float(corrects) / float(size)
metrics = {
'accuracy': accuracy,
'avg_loss': avg_loss,
'performance': accuracy
}
return metrics
def performance_validate(self, data):
return self.validate(data)
def get_state(self, index):
img = torch.Tensor(data["train"][0][index])
if opt.cuda:
img = img.cuda()
preds = self.forward(img)
state = torch.cat((img, preds)).view(1, -1)
return state
def encode_episode_data(self):
pass
# images = []
# # for i, (features, targets) in enumerate(loaders["train_loader"]):
# all_states = torch.Tensor(data["train"][0])
# for i, (features, targets) in enumerate(batchify(data["train"])):
# features = Variable(torch.FloatTensor(features))
# preds = self.predict_prob(features)
# images.append(preds)
#
# images = torch.cat(images, dim=0)
#
# # data["all_predictions"] = images
# data["all_states"] = torch.cat((all_states, images.cpu()), dim=1)
def query(self, index):
# current_state = data["all_states"][index].view(1, -1)
# all_states = data["all_states"]
# current_all_dist = pairwise_distances(current_state, all_states)
# similar_indices = torch.topk(current_all_dist, opt.selection_radius, 1, largest=False)[1]
# similar_indices = similar_indices.data[0].cpu().numpy()
# for idx in similar_indices:
self.add_index(index)
return [index]
def add_index(self, index):
image = data["train"][0][index]
caption = data["train"][1][index]
data["active"][0].append(image)
data["active"][1].append(caption)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\nfrom datetime import datetime\nimport logging\n\nfrom sqlalchemy import or_\n\nfrom adhocracy import model\nfrom adhocracy.model import Delegation\n\nlog = logging.getLogger(__name__)\n\n# REFACT: Rename: DelegationTraverser? Maybe thats also a new object\... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\nfrom datetime import datetime\nimport logging\n\nfrom sqlalchemy import or_\n\nfrom adhocracy import model\nfrom adhocracy.model import Delegation\n\nlog = logging.getLogger(__name__)\n\n# REFACT: Rename: DelegationTraverser? Maybe thats al... | ```python
from datetime import datetime
import logging
from sqlalchemy import or_
from adhocracy import model
from adhocracy.model import Delegation
log = logging.getLogger(__name__)
# REFACT: Rename: DelegationTraverser? Maybe thats also a new object
# that does the traversing in different ways
# That may become a Strategy object on how the delegation should be traversed
# REFACT: Extract: DelegationUserGraph, DelegationTopicGraph,
# perhaps DelegationGraph as common superclass
# This object should represent a whole delegation graph from different
# points of view
# One Goal would be to be able to load the whole subgraph from the db
# in as few queries as possible
# Optimally just one...
# Maybe there will just be one DelegationGraph with different Strategies
# of how create/ traverse/ filter it attached
# Also it needs to be possible to ask the graph for the status at a
# specific time (all objects have a deleted_at property or it can be
# determined when they are overridden by a later choice)
# Some problems with this class:
# - many of the methods work across multiple nodes and layers of the graph
# - many of the methods take additional parameters to determine when to
# filter this graph for different criteria
# - there are methods that work on the whole graph (some as class-methods)
# - Forward and backward traversal are in the same object
# - it is oblivious as to why the graph is traversed, either to allow
# all agents to place their vote
# or to find out what the delegation wheight of a specific user
# is in a specific context
class DelegationNode(object):
"""
A ``DelegationNode`` describes a part of the voting delegation graph
sorrounding a ``Delegateable`` (i.e. a ``Category``, ``Issue`` or
``Proposal``) and a ``User``.
Right now the delegation graph is a graph of incomming and outgoing
delegations on multiple levels - one level per scope.
Each DelegationNode represents the incomming and outgoing delegations
of one user on one level (scope/delegateable) in this graph.
**TODO:** Developing a good caching strategy for this class would be
useful in order to cache the delegation graph to memcached.
:param user: The ``User`` at the center of this ``DelegationNode``.
:param delegateable: A ``Delegateable``.
"""
def __init__(self, user, delegateable):
self.user = user
self.delegateable = delegateable
def _query_traverse(self, querymod, recurse, at_time=None):
if not at_time: # shouldn't this be if at_time is None: ?
at_time = datetime.utcnow()
query = model.meta.Session.query(Delegation)
query = query.filter(Delegation.scope == self.delegateable)
query = query.filter(Delegation.create_time <= at_time)
query = query.filter(or_(Delegation.revoke_time == None, # noqa
Delegation.revoke_time > at_time))
query = querymod(query)
delegations = query.all()
if recurse:
for parent in self.delegateable.parents:
node = DelegationNode(self.user, parent)
delegations += node._query_traverse(querymod, recurse, at_time)
return delegations
def inbound(self, recurse=True, at_time=None,
is_counting_delegations=False):
"""
Retrieve all inbound delegations (i.e. those that the user has received
from other users in order to vote on their behalf) that apply to the
``Delegateable``.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
"""
delegations = self._query_traverse(
lambda q: q.filter(Delegation.agent == self.user),
recurse, at_time)
delegations = self._filter_out_overriden_delegations(delegations)
if is_counting_delegations:
delegations = self._filter_out_delegations_where_a_more_specific_delegation_exists(delegations)
delegations = self._filter_out_delegations_that_are_overriden_by_direct_votes(delegations)
return delegations
def transitive_inbound(self, recurse=True, at_time=None, _path=None,
is_counting_delegations=False):
"""
Retrieve inbound delegations recursing through the delegation graph
as well as through the category tree.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
:returns: list of ``Delegation``
"""
if _path is None:
_path = []
elif self.user in _path:
return [] # we already visited this node
# circle detection uses this path of visited nodes
_path.append(self.user)
delegations = self.inbound(
recurse=recurse, at_time=at_time,
is_counting_delegations=is_counting_delegations)
for delegation in list(delegations):
ddnode = DelegationNode(delegation.principal, self.delegateable)
additional_delegations = ddnode.transitive_inbound(
recurse=recurse, at_time=at_time, _path=_path,
is_counting_delegations=is_counting_delegations)
for additional_delegation in additional_delegations:
if additional_delegation.principal in _path:
continue # this is a delegation from a node we
# already visited
else:
delegations.append(additional_delegation)
# _path is used as a stack in the recursion - so we need to remove
# what we added in going into the recursion
_path.remove(self.user)
return delegations
def outbound(self, recurse=True, at_time=None, filter=True):
"""
Retrieve all outbound delegations (i.e. those that the user has given
to other users in order allow them to vote on his/her behalf) that
apply to the ``Delegateable``.
:param recurse: if ``True``, search will include delegations on parent
``Delegateables`` in breadth-first traversal order.
:param at_time: return the delegation graph at the given time, defaults
to the current time.
:returns: list of ``Delegation``
"""
delegations = self._query_traverse(
lambda q: q.filter(Delegation.principal == self.user),
recurse, at_time)
if filter:
by_agent = dict()
for delegation in set(delegations):
by_agent[delegation.agent] = (
by_agent.get(delegation.agent, []) + [delegation])
delegations = [self.filter_less_specific_delegations(ds)[0] for
ds in by_agent.values()]
return delegations
# TODO: consider to add a transitive-outbound to know where the vote
# will end up for a specific issue
# REFACT: rename propagate_vote_to_delegators?
def propagate(self, callable, _edge=None, _propagation_path=None):
"""
Propagate a given action along the delegation graph *against*
its direction, i.e. from the agent node towards its principal.
This is the natural direction to propagate actions along this
network since it allows principals to reproduce the actions of
their agents.
Propagation will abort on circular dependencies but has no
recursion depth limit.
:param callable: A callable that is to be called on each node.
It must take three arguments, a ``User``, a
``Delegateable`` and the ``Delegation``
which served as a transitory edge during the
last step of the propagation.
:returns: a list of all results produced by the callable.
"""
if not _propagation_path:
_propagation_path = [self]
elif self in _propagation_path:
return []
else:
_propagation_path.append(self)
result = [callable(self.user, self.delegateable, _edge)]
if not self.delegateable.instance.allow_delegate:
return result
for delegation in self.inbound():
node = DelegationNode(delegation.principal, self.delegateable)
result += node.propagate(callable,
_edge=delegation,
_propagation_path=_propagation_path)
return result
def number_of_delegations(self):
return len(self.transitive_inbound(is_counting_delegations=True))
def __repr__(self):
return "<DelegationNode(%s,%s)>" % (self.user.user_name,
self.delegateable.id)
def __eq__(self, other):
return self.user == other.user and \
self.delegateable == other.delegateable
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def create_delegation(cls, from_user, to_user, scope):
delegation = model.Delegation(from_user, to_user, scope)
# dwt: why do I need to add the delegation to the session here?
# it should just be added via the relation it has to the user and
# either not be in the session at all or automatically via the
# user object
model.meta.Session.add(delegation)
# dwt: Why is the flush here neccessary? - supplies the id
# of course - but is that needed?
model.meta.Session.flush()
return delegation
@classmethod
def filter_less_specific_delegations(cls, delegations):
"""
Given a set of delegations, remove those that are overriden by others.
A delegation is overridden whenever there is another delegation with a
narrower scope that still applies.
:param delegations: The list of delegations that are to be filtered.
They need to all be be from the same principal.
:returns: A filtered list of delegations.
"""
matches = list(delegations)
for d in delegations:
for m in matches:
if m.scope.is_super(d.scope):
matches.remove(m)
return matches
def _filter_out_overriden_delegations(self, delegations):
# return delegations
by_principal = dict()
for delegation in set(delegations):
by_principal[delegation.principal] = by_principal.get(
delegation.principal, []) + [delegation]
return [self.filter_less_specific_delegations(ds)[0] for
ds in by_principal.values()]
def _filter_out_delegations_that_are_overriden_by_direct_votes(
self, delegations):
from adhocracy.lib.democracy.decision import Decision
def is_overriden_by_own_decision(delegation):
if not hasattr(delegation.scope, 'poll'):
return True # scope doesn't have polls -> can't self decide
if delegation.scope.poll is None:
return True # currently no poll in this cope -> can't
# self decide
decision = Decision(delegation.principal, delegation.scope.poll)
return not decision.is_self_decided()
return filter(is_overriden_by_own_decision, delegations)
# REFACT: this method apears to do the same as
# filter_less_specific_delegations (modulo the pre-work
# that happens before it is called)
def _filter_out_delegations_where_a_more_specific_delegation_exists(
self, delegations):
def is_overriden_by_other_delegation(delegation):
node = DelegationNode(delegation.principal, self.delegateable)
outbound_delegations = node.outbound()
if 1 == len(outbound_delegations):
# If this returns false, the data model is invalid!
return outbound_delegations[0].agent == self.user
elif len(outbound_delegations) > 1:
smallest_delegations = [outbound_delegations[0]]
for delegation in outbound_delegations:
scope = smallest_delegations[0].scope
if scope.is_super(delegation.scope):
smallest_delegations = [delegation]
elif scope == delegation.scope:
smallest_delegations.append(delegation)
for delegation in smallest_delegations:
if delegation.agent == self.user:
return True
return False
return filter(is_overriden_by_other_delegation, delegations)
``` |
[
{
"content": "Replicate the source code:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2016 Lenovo\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2016 Lenovo\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the ... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A consolidated manage of neighbor table information management.
# Ultimately, this should use AF_NETLINK, but in the interest of time,
# use ip neigh for the moment
import eventlet.green.subprocess as subprocess
import os
neightable = {}
neightime = 0
import re
_validmac = re.compile('..:..:..:..:..:..')
def update_neigh():
global neightable
global neightime
neightable = {}
if os.name == 'nt':
return
ipn = subprocess.Popen(['ip', 'neigh'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(neighdata, err) = ipn.communicate()
for entry in neighdata.split('\n'):
entry = entry.split(' ')
if len(entry) < 5 or not entry[4]:
continue
if entry[0] in ('192.168.0.100', '192.168.70.100', '192.168.70.125'):
# Note that these addresses are common static ip addresses
# that are hopelessly ambiguous if there are many
# so ignore such entries and move on
# ideally the system network steers clear of this landmine of
# a subnet, but just in case
continue
if not _validmac.match(entry[4]):
continue
neightable[entry[0]] = entry[4]
neightime = os.times()[4]
def refresh_neigh():
global neightime
if os.name == 'nt':
return
if os.times()[4] > (neightime + 30):
update_neigh()
``` |
[
{
"content": "```python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in ... | [
{
"content": "<|memory_start|>```python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law ... | ```python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from translator.hot.syntax.hot_output import HotOutput
TOSCA_TO_HOT_GET_ATTRS = {'ip_address': 'first_address'}
class TranslateOutputs():
'''Translate TOSCA Outputs to Heat Outputs.'''
def __init__(self, outputs):
self.outputs = outputs
def translate(self):
return self._translate_outputs()
def _translate_outputs(self):
hot_outputs = []
for output in self.outputs:
hot_value = {}
if 'get_property' in output.value:
get_parameters = output.value['get_property']
if get_parameters[1] in TOSCA_TO_HOT_GET_ATTRS:
get_parameters[1] = \
TOSCA_TO_HOT_GET_ATTRS[get_parameters[1]]
hot_value['get_attr'] = get_parameters
hot_outputs.append(HotOutput(output.name,
hot_value,
output.description))
else:
hot_outputs.append(HotOutput(output.name,
output.value,
output.description))
return hot_outputs
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\n#******************************************************************************\n#\n# CS 6421 - Simple Conversion\n# Execution: python dollarsYen.py portnum\n# Author: Tim Stamler\n# Group: Malcolm Goldiner\n#\n#*******************************************... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n#******************************************************************************\n#\n# CS 6421 - Simple Conversion\n# Execution: python dollarsYen.py portnum\n# Author: Tim Stamler\n# Group: Malcolm Goldiner\n#\n#***************************... | ```python
#!/usr/bin/env python
#******************************************************************************
#
# CS 6421 - Simple Conversion
# Execution: python dollarsYen.py portnum
# Author: Tim Stamler
# Group: Malcolm Goldiner
#
#******************************************************************************
import socket
import sys
yenConv = 100
def registerConversion(discServHost, discServPort, host, port):
#report information
discServSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
discServSocket.connect((discServerHost, discServerPort))
discServSocket.send("ADD YEN DOLLARS " + host + " " + port + "\n") #waiting on protocol
print discServSocket.recv()
discServSocket.close()
def unregisterConversion(discServHost, discServPort, host, port):
#report information
discServSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
discServSocket.connect((discServerHost, discServerPort))
discServSocket.send("REMOVE " + host + " " + port + "\n") #waiting on protocol
print discServSocket.recv()
discServSocket.close()
def convert(unit, userInput):
if unit == "dollars":
return float(userInput)*yenConv
elif unit == "yen":
return float(userInput)/yenConv
## Function to process requests
def process(conn):
conn.send("Welcome to the yen/dollars converter!\n")
# read userInput from client
userInput = conn.recv(BUFFER_SIZE)
if not userInput:
print "Error reading message"
sys.exit(1)
inputList = userInput.split(" ")
if inputList[0] == "yen" and inputList[1] != "dollars":
conn.send("Invalid input!\n")
return
if inputList[0] == "dollars" and inputList[1] != "yen":
conn.send("Invalid input!\n")
return
result = convert(inputList[0], inputList[2])
print "Received message: ", userInput
conn.send(str(result) + "\n")
conn.close()
### Main code run when program is started
BUFFER_SIZE = 1024
interface = ""
# if input arguments are wrong, print out usage
if len(sys.argv) != 5:
print >> sys.stderr, "usage: python {0} hostAddress portnum discServHost discServPort\n".format(sys.argv[0])
sys.exit(1)
portnum = int(sys.argv[2])
registerConversion(sys.argv[3], int(sys.argv[4]), sys.argv[1], sys.argv[2])
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((interface, portnum))
s.listen(5)
cmdInput = ""
while cmdInput != "quit":
# accept connection and print out info of client
conn, addr = s.accept()
print 'Accepted connection from client', addr
cmdInput = raw_input()
process(conn)
unregisterConversion(sys.argv[3], int(sys.argv[4]), sys.argv[1], sys.argv[2])
s.close()
``` |
[
{
"content": "```python\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\nfrom io import open\n\nwith open('README.md', encoding=\"utf-8\") as fp:\n long_description = fp.read()\n\nsetup(\n name='sendwithus',\n version='5.2.2',\n author='sendwithus',\n author_email='us@se... | [
{
"content": "<|memory_start|>```python\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\nfrom io import open\n\nwith open('README.md', encoding=\"utf-8\") as fp:\n long_description = fp.read()\n\nsetup(\n name='sendwithus',\n version='5.2.2',\n author='sendwithus',\n aut... | ```python
from distutils.core import setup
from setuptools import find_packages
from io import open
with open('README.md', encoding="utf-8") as fp:
long_description = fp.read()
setup(
name='sendwithus',
version='5.2.2',
author='sendwithus',
author_email='us@sendwithus.com',
packages=find_packages(),
scripts=[],
url='https://github.com/sendwithus/sendwithus_python',
license='LICENSE.txt',
description='Python API client for sendwithus.com',
long_description=long_description,
long_description_content_type='text/markdown',
test_suite="sendwithus.test",
install_requires=[
"requests >= 2.0.0",
"six >= 1.9.0"
],
extras_require={
"test": [
"pytest >= 3.0.5",
"pytest-xdist >= 1.15.0"
]
},
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: Apache Software License",
"Development Status :: 5 - Production/Stable",
"Topic :: Communications :: Email"
]
)
``` |
[
{
"content": "```python\nimport os,sys,json,random\nclass uploadTest:\n def __init__(self): \n self.source = sys.argv[1]\n self.f = open(\"operations.txt\",'w',1)\n self.counter = 0\n print('running')\n self.mapping = dict()\n ... | [
{
"content": "<|memory_start|>```python\nimport os,sys,json,random\nclass uploadTest:\n def __init__(self): \n self.source = sys.argv[1]\n self.f = open(\"operations.txt\",'w',1)\n self.counter = 0\n print('running')\n self.mappin... | ```python
import os,sys,json,random
class uploadTest:
def __init__(self):
self.source = sys.argv[1]
self.f = open("operations.txt",'w',1)
self.counter = 0
print('running')
self.mapping = dict()
def getParentID(self,filename):
#search the dictionary for the filename
if(filename in self.mapping):
return self.mapping[filename]
else:
return -1
def getNewID(self):
self.counter += 1
return self.counter
def getRandomID(self):
return random.randrange(0,1000,1)
def run(self):
print(self.number_of_operations())
for root,subdirs,files in os.walk(self.source, topdown=True):
#store the root id
title = os.path.basename(root)
identifier = self.getNewID()
pID = self.getParentID(title)
if(pID == -1):
pID = self.getRandomID()
self.f.write(title+','+str(identifier)+','+str(pID)+'\n')
for subdir in subdirs:
subName = os.path.basename(subdir)
self.mapping[subName] = identifier
for fi in files:
filefolder = os.path.basename(fi)
fileID = self.getRandomID()
self.f.write(filefolder+','+str(fileID)+','+str(identifier)+'\n')
self.f.write('\n')
print('complete')
self.f.close()
def number_of_operations(self):
count = 0
for root,subdirs,files in os.walk(self.source, topdown=True):
count+=1
count= count + len(files)
return count
if(__name__ == '__main__'):
var = uploadTest()
var.run()
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2005,2006,2007,2008,2009 Brett Adams <brett@belizebotanic.org>\n# Copyright (c) 2012-2015 Mario Frasca <mario@anche.no>\n# Copyright 2017 Jardín Botánico de Quito\n#\n# This file is part of ghini.desk... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2005,2006,2007,2008,2009 Brett Adams <brett@belizebotanic.org>\n# Copyright (c) 2012-2015 Mario Frasca <mario@anche.no>\n# Copyright 2017 Jardín Botánico de Quito\n#\n# This file is pa... | ```python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2005,2006,2007,2008,2009 Brett Adams <brett@belizebotanic.org>
# Copyright (c) 2012-2015 Mario Frasca <mario@anche.no>
# Copyright 2017 Jardín Botánico de Quito
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
#
# test.py
#
# Description: test for bauble.utils
import sys
import unittest
from pyparsing import *
from sqlalchemy import *
from nose import SkipTest
import bauble
import bauble.db as db
from bauble.error import check, CheckConditionError
import bauble.utils as utils
from bauble.test import BaubleTestCase
class UtilsGTKTests(unittest.TestCase):
def test_create_message_details_dialog(self):
raise SkipTest('Not Implemented')
details = """these are the lines that i want to test
asdasdadasddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd
dasd
asd
addasdadadad"""
msg = 'msg'
d = utils.create_message_details_dialog(msg, details)
d.run()
def test_create_message_dialog(self):
raise SkipTest('Not Implemented')
msg = 'msg'
#msg = ' this is a longer message to test that the dialog width is correct.....but what if it keeps going'
d = utils.create_message_dialog(msg)
d.run()
def test_search_tree_model(self):
from gi.repository import Gtk
model = Gtk.TreeStore(str)
# the rows that should be found
to_find = []
row = model.append(None, ['1'])
model.append(row, ['1.1'])
to_find.append(model.append(row, ['something']))
model.append(row, ['1.3'])
row = model.append(None, ['2'])
to_find.append(model.append(row, ['something']))
model.append(row, ['2.1'])
to_find.append(model.append(None, ['something']))
root = model.get_iter_first()
results = utils.search_tree_model(model[root], 'something')
self.assertTrue(sorted([model.get_path(r) for r in results]),
sorted(to_find))
class UtilsTests(unittest.TestCase):
def test_xml_safe(self):
class test(object):
def __str__(self):
return repr(self)
def __unicode__(self):
return repr(self)
import re
assert re.match('<.*?>', utils.xml_safe(str(test())))
assert re.match('<.*?>', utils.xml_safe(str(test())))
assert utils.xml_safe('test string') == 'test string'
assert utils.xml_safe('test string') == 'test string'
assert utils.xml_safe('test< string') == 'test< string'
assert utils.xml_safe('test< string') == 'test< string'
def test_range_builder(self):
assert utils.range_builder('1-3') == [1, 2, 3]
assert utils.range_builder('1-3,5-7') == [1, 2, 3, 5, 6 ,7]
assert utils.range_builder('1-3,5') == [1, 2, 3, 5]
assert utils.range_builder('1-3,5,7-9')== [1, 2, 3, 5, 7, 8, 9]
assert utils.range_builder('1,2,3,4') == [1, 2, 3, 4]
assert utils.range_builder('11') == [11]
# bad range strings
assert utils.range_builder('-1') == []
assert utils.range_builder('a-b') == []
#self.assertRaises(ParseException, utils.range_builder, '-1')
self.assertRaises(CheckConditionError, utils.range_builder, '2-1')
#self.assertRaises(ParseException, utils.range_builder, 'a-b')
def test_get_urls(self):
text = 'There a link in here: http://bauble.belizebotanic.org'
urls = utils.get_urls(text)
self.assertTrue(urls == [(None, 'http://bauble.belizebotanic.org')], urls)
text = 'There a link in here: http://bauble.belizebotanic.org '\
'and some text afterwards.'
urls = utils.get_urls(text)
self.assertTrue(urls == [(None, 'http://bauble.belizebotanic.org')], urls)
text = 'There is a link here: http://bauble.belizebotanic.org '\
'and here: https://belizebotanic.org and some text afterwards.'
urls = utils.get_urls(text)
self.assertTrue(urls == [(None, 'http://bauble.belizebotanic.org'),
(None, 'https://belizebotanic.org')], urls)
text = 'There a labeled link in here: '\
'[BBG]http://bauble.belizebotanic.org and some text afterwards.'
urls = utils.get_urls(text)
self.assertTrue(urls == [('BBG', 'http://bauble.belizebotanic.org')],
urls)
class UtilsDBTests(BaubleTestCase):
def test_find_dependent_tables(self):
metadata = MetaData()
metadata.bind = db.engine
# table1 does't depend on any tables
table1 = Table('table1', metadata,
Column('id', Integer, primary_key=True))
# table2 depends on table1
table2 = Table('table2', metadata,
Column('id', Integer, primary_key=True),
Column('table1', Integer, ForeignKey('table1.id')))
# table3 depends on table2
table3 = Table('table3', metadata,
Column('id', Integer, primary_key=True),
Column('table2', Integer, ForeignKey('table2.id')),
Column('table4', Integer, ForeignKey('table4.id'))
)
# table4 depends on table2
table4 = Table('table4', metadata,
Column('id', Integer, primary_key=True),
Column('table2', Integer, ForeignKey('table2.id')))
# tables that depend on table 1 are 3, 4, 2
depends = list(utils.find_dependent_tables(table1, metadata))
print('table1: %s' % [table.name for table in depends])
self.assertTrue(list(depends) == [table2, table4, table3])
# tables that depend on table 2 are 3, 4
depends = list(utils.find_dependent_tables(table2, metadata))
print('table2: %s' % [table.name for table in depends])
self.assertTrue(depends == [table4, table3])
# no tables depend on table 3
depends = list(utils.find_dependent_tables(table3, metadata))
print('table3: %s' % [table.name for table in depends])
self.assertTrue(depends == [])
# table that depend on table 4 are 3
depends = list(utils.find_dependent_tables(table4, metadata))
print('table4: %s' % [table.name for table in depends])
self.assertTrue(depends == [table3])
class ResetSequenceTests(BaubleTestCase):
def setUp(self):
super().setUp()
self.metadata = MetaData()
self.metadata.bind = db.engine
def tearDown(self):
super().tearDown()
self.metadata.drop_all()
@staticmethod
def get_currval(col):
if db.engine.name == 'postgresql':
name = '%s_%s_seq' % (col.table.name, col.name)
stmt = "select currval('%s');" % name
return db.engine.execute(stmt).fetchone()[0]
elif db.engine.name == 'sqlite':
stmt = 'select max(%s) from %s' % (col.name, col.table.name)
return db.engine.execute(stmt).fetchone()[0] + 1
def test_no_col_sequence(self):
# Test utils.reset_sequence on a column without a Sequence()
#
# This only tests that reset_sequence() doesn't fail if there is
# no sequence.
# test that a column without an explicit sequence works
table = Table('test_reset_sequence', self.metadata,
Column('id', Integer, primary_key=True))
self.metadata.create_all()
self.insert = table.insert()#.compile()
db.engine.execute(self.insert, values=[{'id': 1}])
utils.reset_sequence(table.c.id)
def test_empty_col_sequence(self):
# Test utils.reset_sequence on a column without a Sequence()
#
# This only tests that reset_sequence() doesn't fail if there is
# no sequence.
# test that a column without an explicit sequence works
table = Table('test_reset_sequence', self.metadata,
Column('id', Integer, primary_key=True))
self.metadata.create_all()
#self.insert = table.insert()#.compile()
#db.engine.execute(self.insert, values=[{'id': 1}])
utils.reset_sequence(table.c.id)
def test_with_col_sequence(self):
# UPDATE: 10/18/2011 -- we don't use Sequence() explicitly,
# just autoincrement=True on primary_key columns so this test
# probably isn't necessary
table = Table('test_reset_sequence', self.metadata,
Column('id', Integer,
Sequence('test_reset_sequence_id_seq'),
primary_key=True, unique=True))
self.metadata.create_all()
rangemax = 10
for i in range(1, rangemax+1):
table.insert().values(id=i).execute()
utils.reset_sequence(table.c.id)
currval = self.get_currval(table.c.id)
self.assertTrue(currval > rangemax, currval)
from bauble.utils import topological_sort
class TopologicalSortTests(unittest.TestCase):
def test_empty_dependencies(self):
r = topological_sort(['a', 'b', 'c'], [])
self.assertTrue('a' in r)
self.assertTrue('b' in r)
self.assertTrue('c' in r)
def test_full_dependencies(self):
r = topological_sort(['a', 'b', 'c'], [('a', 'b'), ('b', 'c')])
self.assertTrue('a' in r)
self.assertTrue('b' in r)
self.assertTrue('c' in r)
self.assertEqual(r.pop(), 'c')
self.assertEqual(r.pop(), 'b')
self.assertEqual(r.pop(), 'a')
def test_partial_dependencies(self):
r = topological_sort(['b', 'e'], [('a', 'b'), ('b', 'c'), ('b', 'd')])
print(r)
self.assertTrue('e' in r)
r.remove('e')
any = set([r.pop(), r.pop()])
self.assertEqual(any, set(['c', 'd']))
self.assertEqual(r.pop(), 'b')
#self.assertEquals(r, [])
def test_empty_input_full_dependencies(self):
r = topological_sort([], [('a', 'b'), ('b', 'c'), ('b', 'd')])
#self.assertEquals(r, [])
``` |
[
{
"content": "Here is some code:\n```python\n# Copyright (C) 2004-2008 Paul Cochrane\n# \n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at you... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# Copyright (C) 2004-2008 Paul Cochrane\n# \n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the Lic... | ```python
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Class and functions associated with a pyvisi MeshPlot objects (gnuplot)
"""
# generic imports
from pyvisi.renderers.gnuplot.common import debugMsg
import copy
# module specific imports
from pyvisi.renderers.gnuplot.plot import Plot
__revision__ = '$Revision$'
class MeshPlot(Plot):
"""
Mesh plot
"""
def __init__(self, scene):
"""
Initialisation of MeshPlot class
@param scene: the scene with which to associate the MeshPlot
@type scene: Scene object
"""
debugMsg("Called MeshPlot.__init__()")
Plot.__init__(self, scene)
# grab the renderer
self.renderer = scene.renderer
# set up some of the attributes
self.title = None
self.xlabel = None
self.ylabel = None
self.zlabel = None
# to show contours of the surface on the bottom of the axes, set
# this variable to True
self.contours = False
# now add the object to the scene
scene.add(self)
def setData(self, *dataList, **options):
"""
Sets the data to the given plot object.
@param dataList: list of data objects to plot
@type dataList: tuple
@param options: dictionary of extra options
@type options: dict
"""
debugMsg("Called setData() in MeshPlot()")
self.renderer.runString("# MeshPlot.setData()")
# for the moment, make sure that there are three arrays
if len(dataList) != 3:
raise ValueError, "Must have three arrays as input (at present)"
# do some sanity checks on the data
xData = dataList[0]
yData = dataList[1]
zData = dataList[2]
if len(xData.shape) != 1:
raise ValueError, "x data array is not of the correct shape: %s"\
% xData.shape
if len(yData.shape) != 1:
raise ValueError, "y data array is not of the correct shape: %s"\
% yData.shape
if len(zData.shape) != 2:
raise ValueError, "z data array is not of the correct shape: %s"\
% zData.shape
# pass the data around
## the x data
self.renderer.renderDict['_x'] = copy.deepcopy(xData)
## the y data
self.renderer.renderDict['_y'] = copy.deepcopy(yData)
## the z data
self.renderer.renderDict['_z'] = copy.deepcopy(zData)
self.renderer.runString(\
"_data = Gnuplot.GridData(_z, _x, _y, binary=1)")
return
def render(self):
"""
Does MeshPlot object specific rendering stuff
"""
debugMsg("Called MeshPlot.render()")
self.renderer.runString("# MeshPlot.render()")
self.renderer.runString("_gnuplot('set surface')")
# if a title is set, put it here
if self.title is not None:
evalString = "_gnuplot.title(\'%s\')" % self.title
self.renderer.runString(evalString)
# if an xlabel is set, add it
if self.xlabel is not None:
evalString = "_gnuplot.xlabel(\'%s\')" % self.xlabel
self.renderer.runString(evalString)
# if a ylabel is set add it
if self.ylabel is not None:
evalString = "_gnuplot.ylabel(\'%s\')" % self.ylabel
self.renderer.runString(evalString)
# if a zlabel is set add it
if self.zlabel is not None:
evalString = "_gnuplot('set zlabel \\'%s\\'')" % self.zlabel
self.renderer.runString(evalString)
# sets the appropriate linestyle for mesh plots
evalString = "_gnuplot('set style data lines')"
self.renderer.runString(evalString)
# makes sure that the lines are hidden
evalString = "_gnuplot('set hidden3d')"
self.renderer.runString(evalString)
# if contours is true, set the relevant option
if self.contours:
evalString = "_gnuplot('set contour base')"
self.renderer.runString(evalString)
# set up the evalString to use for plotting
evalString = "_gnuplot.splot(_data)"
self.renderer.runString(evalString)
return
# vim: expandtab shiftwidth=4:
``` |
[
{
"content": "Here is the snippet:\n```python\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Linux platform implementation.\"\"\"\n\nfrom __future__ import division\n\nimport base64\n... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Linux platform implementation.\"\"\"\n\nfrom __future__ import division\n\... | ```python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux platform implementation."""
from __future__ import division
import base64
import errno
import functools
import os
import re
import socket
import struct
import sys
import warnings
from collections import namedtuple, defaultdict
from . import _common
from . import _psposix
from . import _psutil_linux as cext
from . import _psutil_posix as cext_posix
from ._common import isfile_strict, usage_percent
from ._common import NIC_DUPLEX_FULL, NIC_DUPLEX_HALF, NIC_DUPLEX_UNKNOWN
from ._compat import PY3, long
if sys.version_info >= (3, 4):
import enum
else:
enum = None
__extra__all__ = [
# io prio constants
"IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
"IOPRIO_CLASS_IDLE",
# connection status constants
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ]
# --- constants
HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
# RLIMIT_* constants, not guaranteed to be present on all kernels
if HAS_PRLIMIT:
for name in dir(cext):
if name.startswith('RLIM'):
__extra__all__.append(name)
# Number of clock ticks per second
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
BOOT_TIME = None # set later
if PY3:
FS_ENCODING = sys.getfilesystemencoding()
if enum is None:
AF_LINK = socket.AF_PACKET
else:
AddressFamily = enum.IntEnum('AddressFamily',
{'AF_LINK': socket.AF_PACKET})
AF_LINK = AddressFamily.AF_LINK
# ioprio_* constants http://linux.die.net/man/2/ioprio_get
if enum is None:
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
else:
class IOPriority(enum.IntEnum):
IOPRIO_CLASS_NONE = 0
IOPRIO_CLASS_RT = 1
IOPRIO_CLASS_BE = 2
IOPRIO_CLASS_IDLE = 3
globals().update(IOPriority.__members__)
# taken from /fs/proc/array.c
PROC_STATUSES = {
"R": _common.STATUS_RUNNING,
"S": _common.STATUS_SLEEPING,
"D": _common.STATUS_DISK_SLEEP,
"T": _common.STATUS_STOPPED,
"t": _common.STATUS_TRACING_STOP,
"Z": _common.STATUS_ZOMBIE,
"X": _common.STATUS_DEAD,
"x": _common.STATUS_DEAD,
"K": _common.STATUS_WAKE_KILL,
"W": _common.STATUS_WAKING
}
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
TCP_STATUSES = {
"01": _common.CONN_ESTABLISHED,
"02": _common.CONN_SYN_SENT,
"03": _common.CONN_SYN_RECV,
"04": _common.CONN_FIN_WAIT1,
"05": _common.CONN_FIN_WAIT2,
"06": _common.CONN_TIME_WAIT,
"07": _common.CONN_CLOSE,
"08": _common.CONN_CLOSE_WAIT,
"09": _common.CONN_LAST_ACK,
"0A": _common.CONN_LISTEN,
"0B": _common.CONN_CLOSING
}
# set later from __init__.py
NoSuchProcess = None
ZombieProcess = None
AccessDenied = None
TimeoutExpired = None
# --- utils
def open_text(fname):
"""On Python 3 opens a file in text mode by using fs encoding.
On Python 2 this is just an alias for open(name, 'rt').
"""
kw = dict(encoding=FS_ENCODING) if PY3 else dict()
return open(fname, "rt", **kw)
# --- named tuples
def _get_cputimes_fields():
"""Return a namedtuple of variable fields depending on the
CPU times available on this Linux kernel version which may be:
(user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
[guest_nice]]])
"""
with open('/proc/stat', 'rb') as f:
values = f.readline().split()[1:]
fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append('steal')
if vlen >= 9:
# Linux >= 2.6.24
fields.append('guest')
if vlen >= 10:
# Linux >= 3.2.0
fields.append('guest_nice')
return fields
scputimes = namedtuple('scputimes', _get_cputimes_fields())
svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached'])
pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
pmmap_grouped = namedtuple(
'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
'shared_dirty', 'private_clean', 'private_dirty',
'referenced', 'anonymous', 'swap'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# --- system memory
def virtual_memory():
total, free, buffers, shared, _, _ = cext.linux_sysinfo()
cached = active = inactive = None
with open('/proc/meminfo', 'rb') as f:
for line in f:
if line.startswith(b"Cached:"):
cached = int(line.split()[1]) * 1024
elif line.startswith(b"Active:"):
active = int(line.split()[1]) * 1024
elif line.startswith(b"Inactive:"):
inactive = int(line.split()[1]) * 1024
if (cached is not None and
active is not None and
inactive is not None):
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
cached = active = inactive = 0
avail = free + buffers + cached
used = total - free
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free,
active, inactive, buffers, cached)
def swap_memory():
_, _, _, _, total, free = cext.linux_sysinfo()
used = total - free
percent = usage_percent(used, total, _round=1)
# get pgin/pgouts
with open("/proc/vmstat", "rb") as f:
sin = sout = None
for line in f:
# values are expressed in 4 kilo bytes, we want bytes instead
if line.startswith(b'pswpin'):
sin = int(line.split(b' ')[1]) * 4 * 1024
elif line.startswith(b'pswpout'):
sout = int(line.split(b' ')[1]) * 4 * 1024
if sin is not None and sout is not None:
break
else:
# we might get here when dealing with exotic Linux flavors, see:
# https://github.com/giampaolo/psutil/issues/313
msg = "'sin' and 'sout' swap memory stats couldn't " \
"be determined and were set to 0"
warnings.warn(msg, RuntimeWarning)
sin = sout = 0
return _common.sswap(total, used, free, percent, sin, sout)
# --- CPUs
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
with open('/proc/stat', 'rb') as f:
values = f.readline().split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
def per_cpu_times():
"""Return a list of namedtuple representing the CPU times
for every CPU available on the system.
"""
cpus = []
with open('/proc/stat', 'rb') as f:
# get rid of the first line which refers to system wide CPU stats
f.readline()
for line in f:
if line.startswith(b'cpu'):
values = line.split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
entry = scputimes(*fields)
cpus.append(entry)
return cpus
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
with open('/proc/cpuinfo', 'rb') as f:
for line in f:
if line.lower().startswith(b'processor'):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# https://github.com/giampaolo/psutil/issues/200
# try to parse /proc/stat as a last resort
if num == 0:
search = re.compile('cpu\d')
with open_text('/proc/stat') as f:
for line in f:
line = line.split(' ')[0]
if search.match(line):
num += 1
if num == 0:
# mimic os.cpu_count()
return None
return num
def cpu_count_physical():
"""Return the number of physical cores in the system."""
mapping = {}
current_info = {}
with open('/proc/cpuinfo', 'rb') as f:
for line in f:
line = line.strip().lower()
if not line:
# new section
if (b'physical id' in current_info and
b'cpu cores' in current_info):
mapping[current_info[b'physical id']] = \
current_info[b'cpu cores']
current_info = {}
else:
# ongoing section
if (line.startswith(b'physical id') or
line.startswith(b'cpu cores')):
key, value = line.split(b'\t:', 1)
current_info[key] = int(value)
# mimic os.cpu_count()
return sum(mapping.values()) or None
# --- other system functions
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0' or hostname == ':0':
hostname = 'localhost'
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
global BOOT_TIME
with open('/proc/stat', 'rb') as f:
for line in f:
if line.startswith(b'btime'):
ret = float(line.strip().split()[1])
BOOT_TIME = ret
return ret
raise RuntimeError("line 'btime' not found in /proc/stat")
# --- processes
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b'/proc') if x.isdigit()]
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
# --- network
class Connections:
"""A wrapper on top of /proc/net/* files, retrieving per-process
and system-wide open connections (TCP, UDP, UNIX) similarly to
"netstat -an".
Note: in case of UNIX sockets we're only able to determine the
local endpoint/path, not the one it's connected to.
According to [1] it would be possible but not easily.
[1] http://serverfault.com/a/417946
"""
def __init__(self):
tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
unix = ("unix", socket.AF_UNIX, None)
self.tmap = {
"all": (tcp4, tcp6, udp4, udp6, unix),
"tcp": (tcp4, tcp6),
"tcp4": (tcp4,),
"tcp6": (tcp6,),
"udp": (udp4, udp6),
"udp4": (udp4,),
"udp6": (udp6,),
"unix": (unix,),
"inet": (tcp4, tcp6, udp4, udp6),
"inet4": (tcp4, udp4),
"inet6": (tcp6, udp6),
}
def get_proc_inodes(self, pid):
inodes = defaultdict(list)
for fd in os.listdir("/proc/%s/fd" % pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
except OSError as err:
# ENOENT == file which is gone in the meantime;
# os.stat('/proc/%s' % self.pid) will be done later
# to force NSP (if it's the case)
if err.errno in (errno.ENOENT, errno.ESRCH):
continue
elif err.errno == errno.EINVAL:
# not a link
continue
else:
raise
else:
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode].append((pid, int(fd)))
return inodes
def get_all_inodes(self):
inodes = {}
for pid in pids():
try:
inodes.update(self.get_proc_inodes(pid))
except OSError as err:
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine
# as we'll just end up returning a connection with PID
# and fd set to None anyway.
# Both netstat -an and lsof does the same so it's
# unlikely we can do any better.
# ENOENT just means a PID disappeared on us.
if err.errno not in (
errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):
raise
return inodes
def decode_address(self, addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if PY3:
ip = ip.encode('ascii')
if family == socket.AF_INET:
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
# ip = ip.decode('hex')
# return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port)
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith('6') and not os.path.exists(file):
# IPv6 not supported
return
with open_text(file) as f:
f.readline() # skip the first line
for line in f:
try:
_, laddr, raddr, status, _, _, _, _, _, inode = \
line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (
file, line))
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
# see: https://github.com/giampaolo/psutil/issues/675
kw = dict(encoding=FS_ENCODING, errors='replace') if PY3 else dict()
with open(file, 'rt', **kw) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (
file, line))
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
def retrieve(self, kind, pid=None):
if kind not in self.tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in self.tmap])))
if pid is not None:
inodes = self.get_proc_inodes(pid)
if not inodes:
# no connections for this process
return []
else:
inodes = self.get_all_inodes()
ret = set()
for f, family, type_ in self.tmap[kind]:
if family in (socket.AF_INET, socket.AF_INET6):
ls = self.process_inet(
"/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
else:
ls = self.process_unix(
"/proc/net/%s" % f, family, inodes, filter_pid=pid)
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
if pid:
conn = _common.pconn(fd, family, type_, laddr, raddr,
status)
else:
conn = _common.sconn(fd, family, type_, laddr, raddr,
status, bound_pid)
ret.add(conn)
return list(ret)
_connections = Connections()
def net_connections(kind='inet'):
"""Return system-wide open connections."""
return _connections.retrieve(kind)
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
with open_text("/proc/net/dev") as f:
lines = f.readlines()
retdict = {}
for line in lines[2:]:
colon = line.rfind(':')
assert colon > 0, repr(line)
name = line[:colon].strip()
fields = line[colon + 1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[3])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN}
names = net_io_counters().keys()
ret = {}
for name in names:
isup, duplex, speed, mtu = cext.net_if_stats(name)
duplex = duplex_map[duplex]
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
net_if_addrs = cext_posix.net_if_addrs
# --- disks
def disk_io_counters():
"""Return disk I/O statistics for every disk installed on the
system as a dict of raw tuples.
"""
# man iostat states that sectors are equivalent with blocks and
# have a size of 512 bytes since 2.4 kernels. This value is
# needed to calculate the amount of disk I/O in bytes.
SECTOR_SIZE = 512
# determine partitions we want to look for
partitions = []
with open_text("/proc/partitions") as f:
lines = f.readlines()[2:]
for line in reversed(lines):
_, _, _, name = line.split()
if name[-1].isdigit():
# we're dealing with a partition (e.g. 'sda1'); 'sda' will
# also be around but we want to omit it
partitions.append(name)
else:
if not partitions or not partitions[-1].startswith(name):
# we're dealing with a disk entity for which no
# partitions have been defined (e.g. 'sda' but
# 'sda1' was not around), see:
# https://github.com/giampaolo/psutil/issues/338
partitions.append(name)
#
retdict = {}
with open_text("/proc/diskstats") as f:
lines = f.readlines()
for line in lines:
# http://www.mjmwired.net/kernel/Documentation/iostats.txt
fields = line.split()
if len(fields) > 7:
_, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
fields[:11]
else:
# from kernel 2.6.0 to 2.6.25
_, _, name, reads, rbytes, writes, wbytes = fields
rtime, wtime = 0, 0
if name in partitions:
rbytes = int(rbytes) * SECTOR_SIZE
wbytes = int(wbytes) * SECTOR_SIZE
reads = int(reads)
writes = int(writes)
rtime = int(rtime)
wtime = int(wtime)
retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
return retdict
def disk_partitions(all=False):
"""Return mounted disk partitions as a list of namedtuples"""
fstypes = set()
with open_text("/proc/filesystems") as f:
for line in f:
line = line.strip()
if not line.startswith("nodev"):
fstypes.add(line.strip())
else:
# ignore all lines starting with "nodev" except "nodev zfs"
fstype = line.split("\t")[1]
if fstype == "zfs":
fstypes.add("zfs")
retlist = []
partitions = cext.disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if device == '' or fstype not in fstypes:
continue
ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
disk_usage = _psposix.disk_usage
# --- decorators
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
def wrap_exceptions_w_zombie(fun):
"""Same as above but also handles zombies."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
return wrapper
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
with open_text("/proc/%s/stat" % self.pid) as f:
data = f.read()
# XXX - gets changed later and probably needs refactoring
return data[data.find('(') + 1:data.rfind(')')]
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except OSError as err:
if err.errno in (errno.ENOENT, errno.ESRCH):
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split('\x00')[0]
if exe.endswith(' (deleted)') and not os.path.exists(exe):
exe = exe[:-10]
return exe
@wrap_exceptions
def cmdline(self):
with open_text("/proc/%s/cmdline" % self.pid) as f:
data = f.read()
if data.endswith('\x00'):
data = data[:-1]
return [x for x in data.split('\x00')]
@wrap_exceptions
def terminal(self):
tmap = _psposix._get_terminal_map()
with open("/proc/%s/stat" % self.pid, 'rb') as f:
tty_nr = int(f.read().split(b' ')[6])
try:
return tmap[tty_nr]
except KeyError:
return None
if os.path.exists('/proc/%s/io' % os.getpid()):
@wrap_exceptions
def io_counters(self):
fname = "/proc/%s/io" % self.pid
with open(fname, 'rb') as f:
rcount = wcount = rbytes = wbytes = None
for line in f:
if rcount is None and line.startswith(b"syscr"):
rcount = int(line.split()[1])
elif wcount is None and line.startswith(b"syscw"):
wcount = int(line.split()[1])
elif rbytes is None and line.startswith(b"read_bytes"):
rbytes = int(line.split()[1])
elif wbytes is None and line.startswith(b"write_bytes"):
wbytes = int(line.split()[1])
for x in (rcount, wcount, rbytes, wbytes):
if x is None:
raise NotImplementedError(
"couldn't read all necessary info from %r" % fname)
return _common.pio(rcount, wcount, rbytes, wbytes)
else:
def io_counters(self):
raise NotImplementedError("couldn't find /proc/%s/io (kernel "
"too old?)" % self.pid)
@wrap_exceptions
def cpu_times(self):
with open("/proc/%s/stat" % self.pid, 'rb') as f:
st = f.read().strip()
# ignore the first two values ("pid (exe)")
st = st[st.find(b')') + 2:]
values = st.split(b' ')
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
return _common.pcputimes(utime, stime)
@wrap_exceptions
def wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except _psposix.TimeoutExpired:
# support for private module import
if TimeoutExpired is None:
raise
raise TimeoutExpired(timeout, self.pid, self._name)
@wrap_exceptions
def create_time(self):
with open("/proc/%s/stat" % self.pid, 'rb') as f:
st = f.read().strip()
# ignore the first two values ("pid (exe)")
st = st[st.rfind(b')') + 2:]
values = st.split(b' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
# Also use cached value if available.
bt = BOOT_TIME or boot_time()
return (float(values[19]) / CLOCK_TICKS) + bt
@wrap_exceptions
def memory_info(self):
with open("/proc/%s/statm" % self.pid, 'rb') as f:
vms, rss = f.readline().split()[:2]
return _common.pmem(int(rss) * PAGESIZE,
int(vms) * PAGESIZE)
@wrap_exceptions
def memory_info_ex(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
with open("/proc/%s/statm" % self.pid, "rb") as f:
vms, rss, shared, text, lib, data, dirty = \
[int(x) * PAGESIZE for x in f.readline().split()[:7]]
return pextmem(rss, vms, shared, text, lib, data, dirty)
if os.path.exists('/proc/%s/smaps' % os.getpid()):
@wrap_exceptions
def memory_maps(self):
"""Return process's mapped memory regions as a list of named tuples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
with open_text("/proc/%s/smaps" % self.pid) as f:
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(':'):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith('VmFlags:'):
# see issue #369
continue
else:
raise ValueError("don't know how to inte"
"rpret line %r" % line)
yield (current_block.pop(), data)
ls = []
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = \
hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
ls.append((
addr, perms, path,
data['Rss:'],
data.get('Size:', 0),
data.get('Pss:', 0),
data.get('Shared_Clean:', 0),
data.get('Shared_Dirty:', 0),
data.get('Private_Clean:', 0),
data.get('Private_Dirty:', 0),
data.get('Referenced:', 0),
data.get('Anonymous:', 0),
data.get('Swap:', 0)
))
return ls
else:
def memory_maps(self):
msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or " \
"CONFIG_MMU kernel configuration option is not enabled" \
% self.pid
raise NotImplementedError(msg)
@wrap_exceptions_w_zombie
def cwd(self):
# readlink() might return paths containing null bytes causing
# problems when used with other fs-related functions (os.*,
# open(), ...)
path = os.readlink("/proc/%s/cwd" % self.pid)
return path.replace('\x00', '')
@wrap_exceptions
def num_ctx_switches(self):
vol = unvol = None
with open("/proc/%s/status" % self.pid, "rb") as f:
for line in f:
if line.startswith(b"voluntary_ctxt_switches"):
vol = int(line.split()[1])
elif line.startswith(b"nonvoluntary_ctxt_switches"):
unvol = int(line.split()[1])
if vol is not None and unvol is not None:
return _common.pctxsw(vol, unvol)
raise NotImplementedError(
"'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
"fields were not found in /proc/%s/status; the kernel is "
"probably older than 2.6.23" % self.pid)
@wrap_exceptions
def num_threads(self):
with open("/proc/%s/status" % self.pid, "rb") as f:
for line in f:
if line.startswith(b"Threads:"):
return int(line.split()[1])
raise NotImplementedError("line not found")
@wrap_exceptions
def threads(self):
thread_ids = os.listdir("/proc/%s/task" % self.pid)
thread_ids.sort()
retlist = []
hit_enoent = False
for thread_id in thread_ids:
fname = "/proc/%s/task/%s/stat" % (self.pid, thread_id)
try:
with open(fname, 'rb') as f:
st = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
hit_enoent = True
continue
raise
# ignore the first two values ("pid (exe)")
st = st[st.find(b')') + 2:]
values = st.split(b' ')
utime = float(values[11]) / CLOCK_TICKS
stime = float(values[12]) / CLOCK_TICKS
ntuple = _common.pthread(int(thread_id), utime, stime)
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def nice_get(self):
# with open_text('/proc/%s/stat' % self.pid) as f:
# data = f.read()
# return int(data.split()[18])
# Use C implementation
return cext_posix.getpriority(self.pid)
@wrap_exceptions
def nice_set(self, value):
return cext_posix.setpriority(self.pid, value)
@wrap_exceptions
def cpu_affinity_get(self):
return cext.proc_cpu_affinity_get(self.pid)
@wrap_exceptions
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
if err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError("invalid CPU #%i (choose between %s)"
% (cpu, allcpus))
raise
# only starting from kernel 2.6.13
if hasattr(cext, "proc_ioprio_get"):
@wrap_exceptions
def ionice_get(self):
ioclass, value = cext.proc_ioprio_get(self.pid)
if enum is not None:
ioclass = IOPriority(ioclass)
return _common.pionice(ioclass, value)
@wrap_exceptions
def ionice_set(self, ioclass, value):
if value is not None:
if not PY3 and not isinstance(value, (int, long)):
msg = "value argument is not an integer (gor %r)" % value
raise TypeError(msg)
if not 0 <= value <= 7:
raise ValueError(
"value argument range expected is between 0 and 7")
if ioclass in (IOPRIO_CLASS_NONE, None):
if value:
msg = "can't specify value with IOPRIO_CLASS_NONE " \
"(got %r)" % value
raise ValueError(msg)
ioclass = IOPRIO_CLASS_NONE
value = 0
elif ioclass == IOPRIO_CLASS_IDLE:
if value:
msg = "can't specify value with IOPRIO_CLASS_IDLE " \
"(got %r)" % value
raise ValueError(msg)
value = 0
elif ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
if value is None:
# TODO: add comment explaining why this is 4 (?)
value = 4
else:
# otherwise we would get OSError(EVINAL)
raise ValueError("invalid ioclass argument %r" % ioclass)
return cext.proc_ioprio_set(self.pid, ioclass, value)
if HAS_PRLIMIT:
@wrap_exceptions
def rlimit(self, resource, limits=None):
# If pid is 0 prlimit() applies to the calling process and
# we don't want that. We should never get here though as
# PID 0 is not supported on Linux.
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
try:
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError(
"second argument must be a (soft, hard) tuple, "
"got %s" % repr(limits))
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
except OSError as err:
if err.errno == errno.ENOSYS and pid_exists(self.pid):
# I saw this happening on Travis:
# https://travis-ci.org/giampaolo/psutil/jobs/51368273
raise ZombieProcess(self.pid, self._name, self._ppid)
else:
raise
@wrap_exceptions
def status(self):
with open("/proc/%s/status" % self.pid, 'rb') as f:
for line in f:
if line.startswith(b"State:"):
letter = line.split()[1]
if PY3:
letter = letter.decode()
# XXX is '?' legit? (we're not supposed to return
# it anyway)
return PROC_STATUSES.get(letter, '?')
@wrap_exceptions
def open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
hit_enoent = False
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
try:
file = os.readlink(file)
except OSError as err:
# ENOENT == file which is gone in the meantime
if err.errno in (errno.ENOENT, errno.ESRCH):
hit_enoent = True
continue
elif err.errno == errno.EINVAL:
# not a link
continue
else:
raise
else:
# If file is not an absolute path there's no way
# to tell whether it's a regular file or not,
# so we skip it. A regular file is always supposed
# to be absolutized though.
if file.startswith('/') and isfile_strict(file):
ntuple = _common.popenfile(file, int(fd))
retlist.append(ntuple)
if hit_enoent:
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return retlist
@wrap_exceptions
def connections(self, kind='inet'):
ret = _connections.retrieve(kind, self.pid)
# raise NSP if the process disappeared on us
os.stat('/proc/%s' % self.pid)
return ret
@wrap_exceptions
def num_fds(self):
return len(os.listdir("/proc/%s/fd" % self.pid))
@wrap_exceptions
def ppid(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b"PPid:"):
# PPid: nnnn
return int(line.split()[1])
raise NotImplementedError("line 'PPid' not found in %s" % fpath)
@wrap_exceptions
def uids(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b'Uid:'):
_, real, effective, saved, fs = line.split()
return _common.puids(int(real), int(effective), int(saved))
raise NotImplementedError("line 'Uid' not found in %s" % fpath)
@wrap_exceptions
def gids(self):
fpath = "/proc/%s/status" % self.pid
with open(fpath, 'rb') as f:
for line in f:
if line.startswith(b'Gid:'):
_, real, effective, saved, fs = line.split()
return _common.pgids(int(real), int(effective), int(saved))
raise NotImplementedError("line 'Gid' not found in %s" % fpath)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nfrom dataclasses import dataclass\nfrom threading import Event\n\nfrom pymuse.utils.stoppablequeue import StoppableQueue\n\n@dataclass\nclass SignalData():\n \"\"\"\n Dataclass for a signal data point. Event_marker attribute is optional\n... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nfrom dataclasses import dataclass\nfrom threading import Event\n\nfrom pymuse.utils.stoppablequeue import StoppableQueue\n\n@dataclass\nclass SignalData():\n \"\"\"\n Dataclass for a signal data point. Event_marker attribu... | ```python
from dataclasses import dataclass
from threading import Event
from pymuse.utils.stoppablequeue import StoppableQueue
@dataclass
class SignalData():
"""
Dataclass for a signal data point. Event_marker attribute is optional
"""
time: float
values: list
event_marker: list = None
class Signal():
"""Represents the accumulated signal that is store in a queue. It tag every sample with a time"""
def __init__(self, length: int, acquisition_frequency: float):
self._shutdown_event = Event()
self._signal_queue: StoppableQueue = StoppableQueue(length, self._shutdown_event)
self._signal_period: float = (1 / acquisition_frequency)
self._data_counter: int = 0
@property
def signal_queue(self) -> StoppableQueue:
return self._signal_queue
def push(self, data_list: list):
time = self._data_counter * self._signal_period
signal_data: SignalData = SignalData(time, data_list)
self._signal_queue.put(signal_data, True, self._signal_period)
self._data_counter += 1
def pop(self, timeout=None) -> SignalData:
return self._signal_queue.get(True, timeout)
def shutdown(self):
self._shutdown_event.set()
``` |
[
{
"content": "```python\nimport os\nimport os.path as op\nimport sys\nimport warnings\n\nimport numpy as np\nfrom nose.tools import assert_true, assert_equal, assert_raises\nfrom numpy.testing import assert_allclose\n\nfrom mne import (read_dipole, read_forward_solution,\n convert_forward_soluti... | [
{
"content": "<|memory_start|>```python\nimport os\nimport os.path as op\nimport sys\nimport warnings\n\nimport numpy as np\nfrom nose.tools import assert_true, assert_equal, assert_raises\nfrom numpy.testing import assert_allclose\n\nfrom mne import (read_dipole, read_forward_solution,\n conver... | ```python
import os
import os.path as op
import sys
import warnings
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_allclose
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events)
from mne.dipole import get_phantom_dipoles
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
run_subprocess)
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert_equal(orig.name, new.name)
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert_equal(len(dip), n_dipoles)
assert_equal(dip.pos.shape, (n_dipoles, 3))
assert_equal(dip.ori.shape, (n_dipoles, 3))
assert_equal(dip.gof.shape, (n_dipoles,))
assert_equal(dip.amplitude.shape, (n_dipoles,))
@testing.requires_testing_data
def test_io_dipoles():
"""Test IO for .dip files."""
tempdir = _TempDir()
dipole = read_dipole(fname_dip)
print(dipole) # test repr
out_fname = op.join(tempdir, 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere)
@slow_test
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting():
"""Test dipole fitting."""
amp = 10e-9
tempdir = _TempDir()
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
factor = 1.
# XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
# Anaconda bug for now...
if os.getenv('TRAVIS', 'false') == 'true' and \
sys.version[:3] in ('3.5', '2.7'):
factor = 0.8
assert_true((data_rms > factor * resi_rms).all(),
msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert_equal(fwd['src'][0]['coord_frame'], 5)
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert_true(dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did at least as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
assert_true(gc_dists[0] >= gc_dists[1] * factor,
'gc-dists (ori): %s' % gc_dists)
assert_true(amp_errs[0] >= amp_errs[1] * factor,
'amplitude errors: %s' % amp_errs)
assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
@testing.requires_testing_data
def test_dipole_fitting_fixed():
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert_equal(len(evoked_crop.times), 1)
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert_true(isinstance(dip_seq, Dipole))
assert_equal(len(dip_seq.times), 1)
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert_true(isinstance(dip_free, Dipole))
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert_true(np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid, resid_free[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert_true(isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid, resid_fixed[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
assert_raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert_true((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed():
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the objet DipoleFixed
print(dip)
_check_roundtrip_fixed(dip)
with warnings.catch_warnings(record=True) as w: # unused fields
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_true(any('extra fields' in str(ww.message) for ww in w))
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with warnings.catch_warnings(record=True): # unused fields
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip):
"""Helper to test roundtrip IO for fixed dipoles."""
tempdir = _TempDir()
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times)
assert_equal(dip_read.info['xplotter_layout'], dip.info['xplotter_layout'])
assert_equal(dip_read.ch_names, dip.ch_names)
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert_equal(ch_1['ch_name'], ch_2['ch_name'])
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
assert_raises(ValueError, get_phantom_dipoles, 0)
assert_raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert_equal(pos.shape, (32, 3))
assert_equal(ori.shape, (32, 3))
run_tests_if_main(False)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n#!/usr/bin/env python3\n\"\"\"\nGO compiler wrapper (sets GOROOT automatically)\n\"\"\"\n\nimport glob\nimport os\nimport signal\nimport sys\n\nimport command_mod\nimport subtask_mod\n\n\nclass Main:\n \"\"\"\n Main class\n \"\"\"\n\n ... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\"\"\"\nGO compiler wrapper (sets GOROOT automatically)\n\"\"\"\n\nimport glob\nimport os\nimport signal\nimport sys\n\nimport command_mod\nimport subtask_mod\n\n\nclass Main:\n \"\"\"\n Main class\... | ```python
#!/usr/bin/env python3
"""
GO compiler wrapper (sets GOROOT automatically)
"""
import glob
import os
import signal
import sys
import command_mod
import subtask_mod
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
@staticmethod
def run() -> int:
"""
Start program
"""
golang = command_mod.Command(os.path.join('bin', 'go'), errors='stop')
golang.extend_args(sys.argv[1:])
goroot = os.path.dirname(os.path.dirname(golang.get_file()))
if os.path.isdir(os.path.join(goroot, 'pkg')):
os.environ['GOROOT'] = goroot
subtask_mod.Exec(golang.get_cmdline()).run()
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
``` |
[
{
"content": "```python\n\"\"\"\nPerturbations\n-------------\nModule oriented to perform a perturbation of the system in order to carry out\nwith statistical testing of models.\nThe main function of this module is grouping functions which are able to\nchange the system to other statistically probable options i... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nPerturbations\n-------------\nModule oriented to perform a perturbation of the system in order to carry out\nwith statistical testing of models.\nThe main function of this module is grouping functions which are able to\nchange the system to other statistically pr... | ```python
"""
Perturbations
-------------
Module oriented to perform a perturbation of the system in order to carry out
with statistical testing of models.
The main function of this module is grouping functions which are able to
change the system to other statistically probable options in order to explore
the sample space.
TODO
----
-Aggregation perturbation:
--- Discretization perturbed.
--- Fluctuation of features between borders.
- Fluctuation of borders
--- Fluctuation of edge points
--- Fluctuation over sampling points
"""
import numpy as np
###############################################################################
############################ Location perturbation ############################
###############################################################################
class BasePerturbation:
"""General perturbation. It constains default functions for perturbation
objects.
"""
def _initialization(self):
self.locations_p = None
self.features_p = None
self.relations_p = None
self.discretizations_p = None
self.k_perturb = 1
## Ensure correctness
self.assert_correctness()
def assert_correctness(self):
"""Assert the correct Perturbation class."""
assert('_categorytype' in dir(self))
assert('_perturbtype' in dir(self))
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return i
################## Transformations of the main elements ###################
def apply2locs(self, locations):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray or others
the spatial information to be perturbed.
Returns
-------
locations: np.ndarray or others
the spatial information perturbated.
"""
return locations
def apply2features(self, features):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
return features
def apply2relations(self, relations):
"""Apply perturbation to relations.
Parameters
----------
relations: np.ndarray or others
the relations between elements to be perturbated.
Returns
-------
relations: np.ndarray or others
the relations between elements perturbated.
"""
return relations
def apply2discretizations(self, discretization):
"""Apply perturbation to discretization.
Parameters
----------
discretization: np.ndarray or others
the discretization perturbation.
Returns
-------
discretization: np.ndarray or others
the discretization perturbation.
"""
return discretization
######################### Precomputed applications ########################
def apply2features_ind(self, features, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return self.features_p[i, :, k]
def apply2locs_ind(self, locations, i, k):
"""Apply perturbation to locations individually for precomputed
applications.
Parameters
----------
locations: np.ndarray or others
the spatial information to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the spatial information perturbated.
"""
return self.locations_p[i, :, k]
def apply2relations_ind(self, relations, i, k):
"""For precomputed applications. Apply perturbation to relations.
Parameters
----------
relations: np.ndarray or others
the relations between elements to be perturbated.
Returns
-------
relations: np.ndarray or others
the relations between elements perturbated.
"""
return self.relations_p[i, :, k]
##################### Selfcomputation of main elements ####################
def selfcompute_features(self, features):
pass
def selfcompute_locations(self, locations):
pass
def selfcompute_relations(self, relations):
pass
def selfcompute_discretizations(self, discretizations):
pass
################################# Examples ################################
# def selfcompute_locations(self, locations):
# self.locations_p = self.apply2locs(locations)
#
# def selfcompute_features(self, features):
# self.features_p = self.apply2features(features)
###############################################################################
############################## None perturbation ##############################
###############################################################################
class NonePerturbation(BasePerturbation):
"""None perturbation. Default perturbation which not alters the system."""
_categorytype = "general"
_perturbtype = "none"
def __init__(self, k_perturb=1):
"""The none perturbation, null perturbation where anything happens.
Parameters
----------
k_perturb: int (default=1)
the number of perturbations applied.
"""
self._initialization()
self.k_perturb = k_perturb
###############################################################################
############################ Location perturbation ############################
###############################################################################
class JitterLocations(BasePerturbation):
"""Jitter module to perturbe locations of the system in order of testing
methods.
TODO: Fit some model for infering stds.
"""
_categorytype = "location"
_perturbtype = "jitter_coordinate"
def __init__(self, stds=0, k_perturb=1):
"""The jitter locations apply to locations a jittering perturbation.
Parameters
----------
k_perturb: int (default=1)
the number of perturbations applied.
"""
self._initialization()
self._stds = np.array(stds)
self.k_perturb = k_perturb
def apply2locs(self, locations, k=None):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray
the spatial information to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
locations: np.ndarray
the spatial information perturbated.
"""
## Preparation of ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
locations_p = np.zeros((len(locations), locations.shape[1], len(ks)))
for ik in range(len(ks)):
jitter_d = np.random.random(locations.shape)
locations_pj = np.multiply(self._stds, jitter_d) + locations
locations_p[:, :, ik] = locations_pj
return locations_p
class PermutationPerturbationLocations(BasePerturbation):
"""Reindice perturbation for the whole locations."""
_categorytype = "location"
_perturbtype = "element_permutation"
def __init__(self, reindices):
"""Perturbations by permuting locations.
Parameters
----------
reindices: np.ndarray
the reindices to apply permutation perturbations.
"""
self._initialization()
self._format_reindices(reindices)
def _format_reindices(self, reindices):
"""Format reindices.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
if type(reindices) == np.ndarray:
self.k_perturb = reindices.shape[1]
self.reindices = reindices
elif type(reindices) == tuple:
n, k_perturb = reindices
if type(n) == int and type(k_perturb) == int:
self.k_perturb = k_perturb
self.reindices = np.vstack([np.random.permutation(n)
for i in xrange(k_perturb)]).T
def apply2locs(self, locations, k=None):
"""Apply perturbation to locations.
Parameters
----------
locations: np.ndarray
the spatial information to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
locations: np.ndarray
the spatial information perturbated.
"""
## Preparation of ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
##Be coherent with the input location types
ndim = 1 if '__len__' not in dir(locations[0]) else len(locations[0])
if type(locations) == np.ndarray:
locations_p = np.zeros((len(locations), ndim, len(ks)))
for ik in range(len(ks)):
locations_p[:, :, ik] = locations[self.reindices[:, ks[ik]]]
else:
locations_p = [[[]]*len(locations)]*len(ks)
for ik in range(len(ks)):
for i in range(len(locations)):
locations_p[ik][i] = locations[self.reindices[i, ks[ik]]]
return locations_p
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return self.reindices[i, k]
###############################################################################
########################### Permutation perturbation ##########################
###############################################################################
class PermutationPerturbation(BasePerturbation):
"""Reindice perturbation for the whole features variables."""
_categorytype = "feature"
_perturbtype = "element_permutation"
def __init__(self, reindices):
"""Element perturbation for all permutation perturbation.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
self._initialization()
self._format_reindices(reindices)
def _format_reindices(self, reindices):
"""Format reindices for permutation reindices.
Parameters
----------
reindices: np.ndarray or tuple
the reindices to apply permutation perturbations.
"""
if type(reindices) == np.ndarray:
self.k_perturb = reindices.shape[1]
self.reindices = reindices
elif type(reindices) == tuple:
n, k_perturb = reindices
if type(n) == int and type(k_perturb) == int:
self.k_perturb = k_perturb
self.reindices = np.vstack([np.random.permutation(n)
for i in xrange(k_perturb)]).T
def apply2features(self, features, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
## Assert good features
assert len(features) == len(self.reindices)
## Prepare ks
ks = range(self.k_perturb) if k is None else k
ks = [k] if type(k) == int else ks
## Computation of new prturbated features
sh = len(features), features.shape[1], len(ks)
features_p = np.zeros(sh)
for ik in range(len(ks)):
features_p[:, :, ik] = features[self.reindices[:, ks[ik]], :]
return features_p
def apply2features_ind(self, features, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return features[self.reindices[i, k]]
def apply2indice(self, i, k):
"""Apply the transformation to the indices.
Parameters
----------
i: int, list or np.ndarray
the indices of the elements `i`.
k: int, list
the perturbation indices.
Returns
-------
i: int, list or np.ndarray
the indices of the elements `i`.
"""
return self.reindices[i, k]
class PermutationPerturbationGeneration(PermutationPerturbation):
"""Reindice perturbation for the whole features variables."""
def __init__(self, n, m=1, seed=None):
"""Element perturbation for all permutation perturbation.
Parameters
----------
n: int
the size of the sample to create the reindices.
m: int (default=1)
the number of permutations we want to generate.
seed: int (default=Npne)
the seed to initialize and create the same reindices.
"""
self._initialization()
if seed is not None:
np.random.seed(seed)
self._format_reindices((n, m))
class PartialPermutationPerturbationGeneration(PermutationPerturbation):
"""Reindice perturbation for the whole features variables. It can control
the proportion of the whole sample is going to be permuted.
"""
def __init__(self, n, rate_pert=1., m=1, seed=None):
"""Element perturbation for all permutation perturbation.
Parameters
----------
n: int
the size of the sample to create the reindices.
m: int (default=1)
the number of permutations we want to generate.
seed: int (default=Npne)
the seed to initialize and create the same reindices.
"""
self._initialization()
if seed is not None:
np.random.seed(seed)
if rate_pert == 1.:
self._format_reindices((n, m))
else:
n_sample = int(n*rate_pert)
indices = np.random.permutation(n)[:n_sample]
reindices = np.vstack([np.arange(n) for i in xrange(m)]).T
reindices[indices] = np.vstack([np.random.permutation(n_sample)
for i in xrange(m)]).T
self.k_perturb = m
self.reindices = reindices
###############################################################################
############################# Element perturbation ############################
###############################################################################
## TODO:
class MixedFeaturePertubation(BasePerturbation):
"""An individual-column-created perturbation of individual elements."""
_categorytype = "feature"
_perturbtype = "element_mixed"
def __init__(self, perturbations):
"""The MixedFeaturePertubation is the application of different
perturbations to features.
perturbations: list
the list of pst.BasePerturbation objects.
"""
msg = "Perturbations is not a list of individual perturbation methods."
self._initialization()
if type(perturbations) != list:
raise TypeError(msg)
try:
self.typefeats = [p._perturbtype for p in perturbations]
k_perturbs = [p.k_perturb for p in perturbations]
assert all([k == k_perturbs[0] for k in k_perturbs])
self.k_perturb = k_perturbs[0]
self.perturbations = perturbations
except:
raise TypeError(msg)
def apply2features(self, features):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
assert features.shape[1] == len(self.perturbations)
## Apply individual perturbation for each features
features_p, n = [], len(features)
k_pos = list(range(self.k_perturb))
for i in range(len(self.perturbations)):
features_p_k =\
self.perturbations[i].apply2features(features[:, [i]], k_pos)
features_p_k = features_p_k.reshape((n, 1, self.k_perturb))
features_p.append(features_p_k)
features_p = np.concatenate(features_p, axis=1)
return features_p
########################### Individual perturbation ###########################
###############################################################################
class DiscreteIndPerturbation(BasePerturbation):
"""Discrete perturbation of a discrete feature variable."""
_categorytype = "feature"
_perturbtype = "discrete"
def __init__(self, probs):
"""The discrete individual perturbation to a feature variable.
Parameters
----------
probs: np.ndarray
the probabilities to change from a value of a category to another
value.
"""
self._initialization()
if np.all(probs.sum(1) != 1):
raise TypeError("Not correct probs input.")
if probs.shape[0] != probs.shape[1]:
raise IndexError("Probs is noot a square matrix.")
self.probs = probs.cumsum(1)
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
## Prepare loop
categories = np.unique(feature)
if len(categories) != len(self.probs):
msg = "Not matching dimension between probs and features."
raise IndexError(msg)
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
## Compute each change
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
for i in xrange(len(feature)):
r = np.random.random()
idx = np.where(feature[i] == categories)[0]
idx2 = np.where(self.probs[idx] > r)[0][0]
feature_p[i, i_k] = categories[idx2]
return feature_p
class ContiniousIndPerturbation(BasePerturbation):
"""Continious perturbation for an individual feature variable."""
_categorytype = "feature"
_perturbtype = "continious"
def __init__(self, pstd):
"""The continious individual perturbation to a feature variable.
Parameters
----------
pstd: float
the dispersion measure of the jittering.
"""
self._initialization()
self.pstd = pstd
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
jitter_d = np.random.random(len(feature))
feature_p[:, i_k] = np.multiply(self.pstd, jitter_d)
return feature_p
class PermutationIndPerturbation(BasePerturbation):
"""Reindice perturbation for an individual feature variable."""
_categorytype = "feature"
_perturbtype = "permutation_ind"
def __init__(self, reindices=None):
"""Individual feature perturbation.
Parameters
----------
reindices: np.ndarray (default=None)
the reindices to apply permutation perturbations.
"""
self._initialization()
if type(reindices) == np.ndarray:
self.reindices = reindices
self.k_perturb = reindices.shape[1]
else:
raise TypeError("Incorrect reindices.")
def apply2features(self, feature, k=None):
"""Apply perturbation to features.
Parameters
----------
features: np.ndarray or others
the element features collection to be perturbed.
k: int (default=None)
the perturbation indices.
Returns
-------
features: np.ndarray or others
the element features collection perturbated.
"""
if k is None:
k = list(range(self.k_perturb))
if type(k) == int:
k = [k]
feature_p = np.zeros((len(feature), len(k)))
for i_k in k:
feature_p[:, [i_k]] = feature[self.reindices[:, i_k]]
return feature_p
def apply2features_ind(self, feature, i, k):
"""Apply perturbation to features individually for precomputed
applications.
Parameters
----------
features: np.ndarray or others
the element features to be perturbed.
i: int or list
the element indices.
k: int or list
the perturbation indices.
Returns
-------
locations: np.ndarray or others
the element features perturbated.
"""
return feature[self.reindices[i, k]]
###############################################################################
########################### Aggregation perturbation ##########################
###############################################################################
class JitterRelationsPerturbation(BasePerturbation):
"""Jitter module to perturbe relations of the system in order of testing
methods.
"""
_categorytype = "relations"
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-05-14 05:22\nfrom __future__ import unicode_literals\n\nimport uuid\n\nimport django.core.validators\nimport django.db.models.deletion\nimport django.utils.timezone\nimport morango.utils.uuids\ni... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-05-14 05:22\nfrom __future__ import unicode_literals\n\nimport uuid\n\nimport django.core.validators\nimport django.db.models.deletion\nimport django.utils.timezone\nimport morang... | ```python
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-14 05:22
from __future__ import unicode_literals
import uuid
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import morango.utils.uuids
import mptt.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeviceOwner',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('id', morango.utils.uuids.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Collection',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('name', models.CharField(max_length=100)),
('kind', models.CharField(choices=[(b'facility', 'Facility'), (b'classroom', 'Classroom'), (b'learnergroup', 'Learner group')], max_length=20)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityDataset',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('description', models.TextField(blank=True)),
('location', models.CharField(blank=True, max_length=200)),
('learner_can_edit_username', models.BooleanField(default=True)),
('learner_can_edit_name', models.BooleanField(default=True)),
('learner_can_edit_password', models.BooleanField(default=True)),
('learner_can_sign_up', models.BooleanField(default=True)),
('learner_can_delete_account', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityUser',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('kind', models.CharField(choices=[(b'admin', 'Admin'), (b'coach', 'Coach')], max_length=20)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='roles', to='kolibriauth.FacilityUser')),
],
),
migrations.AddField(
model_name='collection',
name='dataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset'),
),
migrations.AddField(
model_name='collection',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='kolibriauth.Collection'),
),
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.AlterUniqueTogether(
name='role',
unique_together=set([('user', 'collection', 'kind')]),
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'collection')]),
),
migrations.AddField(
model_name='facilityuser',
name='facility',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Facility'),
),
migrations.AlterUniqueTogether(
name='facilityuser',
unique_together=set([('username', 'facility')]),
),
]
``` |
[
{
"content": "```python\nfrom flask import current_app\nfrom flask import request\nfrom functools import wraps\nfrom werkzeug import LocalProxy\nfrom werkzeug.exceptions import Forbidden\n\n\nclass Allows(object):\n\n def __init__(self, app=None, identity_loader=None,\n throws=Forbidden, on_f... | [
{
"content": "<|memory_start|>```python\nfrom flask import current_app\nfrom flask import request\nfrom functools import wraps\nfrom werkzeug import LocalProxy\nfrom werkzeug.exceptions import Forbidden\n\n\nclass Allows(object):\n\n def __init__(self, app=None, identity_loader=None,\n throws... | ```python
from flask import current_app
from flask import request
from functools import wraps
from werkzeug import LocalProxy
from werkzeug.exceptions import Forbidden
class Allows(object):
def __init__(self, app=None, identity_loader=None,
throws=Forbidden, on_fail=None):
self._identity_loader = identity_loader
self.throws = throws
self.on_fail = _make_callable(on_fail)
if app:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['allows'] = self
def requires(self, *requirements, **opts):
def raiser():
raise opts.get('throws', self.throws)
def fail(*args, **kwargs):
f = _make_callable(opts.get('on_fail', self.on_fail))
res = f(*args, **kwargs)
if res is not None:
return res
raiser()
def decorator(f):
@wraps(f)
def allower(*args, **kwargs):
if self.fulfill(requirements):
return f(*args, **kwargs)
else:
return fail(*args, **kwargs)
return allower
return decorator
def identity_loader(self, f):
"Provides an identity loader for the instance"
self._identity_loader = f
return f
def fulfill(self, requirements, identity=None):
"Runs each requirement until one is not fulfilled"
identity = identity or self._identity_loader()
return all(r(identity, request) for r in requirements)
def __get_allows():
"Internal helper"
try:
return current_app.extensions['allows']
except (AttributeError, KeyError):
raise RuntimeError("Not configured.")
def _make_callable(func_or_value):
if not callable(func_or_value):
return lambda *a, **k: func_or_value
return func_or_value
_allows = LocalProxy(__get_allows, name="allows")
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\n celery.app.builtins\n ~~~~~~~~~~~~~~~~~~~\n\n Built-in tasks that are always available in all\n app instances. E.g. chord, group and xmap.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\n celery.app.builtins\n ~~~~~~~~~~~~~~~~~~~\n\n Built-in tasks that are always available in all\n app instances. E.g. chord, group and xmap.\n\n\"\"\"\nfrom __future__ import absolute_impor... | ```python
# -*- coding: utf-8 -*-
"""
celery.app.builtins
~~~~~~~~~~~~~~~~~~~
Built-in tasks that are always available in all
app instances. E.g. chord, group and xmap.
"""
from __future__ import absolute_import
from __future__ import with_statement
from collections import deque
from itertools import starmap
from celery._state import get_current_worker_task
from celery.utils import uuid
#: global list of functions defining tasks that should be
#: added to all apps.
_shared_tasks = []
def shared_task(constructor):
"""Decorator that specifies that the decorated function is a function
that generates a built-in task.
The function will then be called for every new app instance created
(lazily, so more exactly when the task registry for that app is needed).
"""
_shared_tasks.append(constructor)
return constructor
def load_shared_tasks(app):
"""Loads the built-in tasks for an app instance."""
for constructor in _shared_tasks:
constructor(app)
@shared_task
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.
This task is also added do the periodic task schedule so that it is
run every day at midnight, but :program:`celerybeat` must be running
for this to be effective.
Note that not all backends do anything for this, what needs to be
done at cleanup is up to each backend, and some backends
may even clean up in realtime so that a periodic cleanup is not necessary.
"""
@app.task(name='celery.backend_cleanup', _force_evaluate=True)
def backend_cleanup():
app.backend.cleanup()
return backend_cleanup
@shared_task
def add_unlock_chord_task(app):
"""The unlock chord task is used by result backends that doesn't
have native chord support.
It creates a task chain polling the header for completion.
"""
from celery.canvas import subtask
from celery import result as _res
@app.task(name='celery.chord_unlock', max_retries=None,
default_retry_delay=1, ignore_result=True, _force_evaluate=True)
def unlock_chord(group_id, callback, interval=None, propagate=False,
max_retries=None, result=None):
if interval is None:
interval = unlock_chord.default_retry_delay
result = _res.GroupResult(group_id, map(_res.AsyncResult, result))
j = result.join_native if result.supports_native_join else result.join
if result.ready():
subtask(callback).delay(j(propagate=propagate))
else:
return unlock_chord.retry(countdown=interval,
max_retries=max_retries)
return unlock_chord
@shared_task
def add_map_task(app):
from celery.canvas import subtask
@app.task(name='celery.map', _force_evaluate=True)
def xmap(task, it):
task = subtask(task).type
return list(map(task, it))
return xmap
@shared_task
def add_starmap_task(app):
from celery.canvas import subtask
@app.task(name='celery.starmap', _force_evaluate=True)
def xstarmap(task, it):
task = subtask(task).type
return list(starmap(task, it))
return xstarmap
@shared_task
def add_chunk_task(app):
from celery.canvas import chunks as _chunks
@app.task(name='celery.chunks', _force_evaluate=True)
def chunks(task, it, n):
return _chunks.apply_chunks(task, it, n)
return chunks
@shared_task
def add_group_task(app):
_app = app
from celery.canvas import maybe_subtask, subtask
from celery.result import from_serializable
class Group(app.Task):
app = _app
name = 'celery.group'
accept_magic_kwargs = False
def run(self, tasks, result, group_id, partial_args):
app = self.app
result = from_serializable(result)
# any partial args are added to all tasks in the group
taskit = (subtask(task).clone(partial_args)
for i, task in enumerate(tasks))
if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
return app.GroupResult(
result.id,
[task.apply(group_id=group_id) for task in taskit],
)
with app.producer_or_acquire() as pub:
[task.apply_async(group_id=group_id, publisher=pub,
add_to_parent=False) for task in taskit]
parent = get_current_worker_task()
if parent:
parent.request.children.append(result)
return result
def prepare(self, options, tasks, args, **kwargs):
AsyncResult = self.AsyncResult
options['group_id'] = group_id = (
options.setdefault('task_id', uuid()))
def prepare_member(task):
task = maybe_subtask(task)
opts = task.options
opts['group_id'] = group_id
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = uuid()
return task, AsyncResult(tid)
try:
tasks, results = zip(*[prepare_member(task) for task in tasks])
except ValueError: # tasks empty
tasks, results = [], []
return (tasks, self.app.GroupResult(group_id, results),
group_id, args)
def apply_async(self, partial_args=(), kwargs={}, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(partial_args, kwargs, **options)
tasks, result, gid, args = self.prepare(
options, args=partial_args, **kwargs
)
super(Group, self).apply_async((
list(tasks), result.serializable(), gid, args), **options
)
return result
def apply(self, args=(), kwargs={}, **options):
return super(Group, self).apply(
self.prepare(options, args=args, **kwargs),
**options).get()
return Group
@shared_task
def add_chain_task(app):
from celery.canvas import Signature, chord, group, maybe_subtask
_app = app
class Chain(app.Task):
app = _app
name = 'celery.chain'
accept_magic_kwargs = False
def prepare_steps(self, args, tasks):
steps = deque(tasks)
next_step = prev_task = prev_res = None
tasks, results = [], []
i = 0
while steps:
# First task get partial args from chain.
task = maybe_subtask(steps.popleft())
task = task.clone() if i else task.clone(args)
res = task._freeze()
i += 1
if isinstance(task, group):
# automatically upgrade group(..) | s to chord(group, s)
try:
next_step = steps.popleft()
# for chords we freeze by pretending it's a normal
# task instead of a group.
res = Signature._freeze(task)
task = chord(task, body=next_step, task_id=res.task_id)
except IndexError:
pass
if prev_task:
# link previous task to this task.
prev_task.link(task)
# set the results parent attribute.
res.parent = prev_res
results.append(res)
tasks.append(task)
prev_task, prev_res = task, res
return tasks, results
def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
task_id=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
options.pop('publisher', None)
tasks, results = self.prepare_steps(args, kwargs['tasks'])
result = results[-1]
if group_id:
tasks[-1].set(group_id=group_id)
if chord:
tasks[-1].set(chord=chord)
if task_id:
tasks[-1].set(task_id=task_id)
result = tasks[-1].type.AsyncResult(task_id)
tasks[0].apply_async()
return result
def apply(self, args=(), kwargs={}, subtask=maybe_subtask, **options):
last, fargs = None, args # fargs passed to first task only
for task in kwargs['tasks']:
res = subtask(task).clone(fargs).apply(last and (last.get(), ))
res.parent, last, fargs = last, res, None
return last
return Chain
@shared_task
def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a subtask, and this generates the task
responsible for that."""
from celery import group
from celery.canvas import maybe_subtask
_app = app
class Chord(app.Task):
app = _app
name = 'celery.chord'
accept_magic_kwargs = False
ignore_result = False
def run(self, header, body, partial_args=(), interval=1,
max_retries=None, propagate=False, eager=False, **kwargs):
group_id = uuid()
AsyncResult = self.app.AsyncResult
prepare_member = self._prepare_member
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([maybe_subtask(s).clone() for s in tasks])
# - eager applies the group inline
if eager:
return header.apply(args=partial_args, task_id=group_id)
results = [AsyncResult(prepare_member(task, body, group_id))
for task in header.tasks]
# - fallback implementations schedules the chord_unlock task here
app.backend.on_chord_apply(group_id, body,
interval=interval,
max_retries=max_retries,
propagate=propagate,
result=results)
# - call the header group, returning the GroupResult.
# XXX Python 2.5 doesn't allow kwargs after star-args.
return header(*partial_args, **{'task_id': group_id})
def _prepare_member(self, task, body, group_id):
opts = task.options
# d.setdefault would work but generating uuid's are expensive
try:
task_id = opts['task_id']
except KeyError:
task_id = opts['task_id'] = uuid()
opts.update(chord=body, group_id=group_id)
return task_id
def apply_async(self, args=(), kwargs={}, task_id=None, **options):
if self.app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, **options)
group_id = options.pop('group_id', None)
chord = options.pop('chord', None)
header = kwargs.pop('header')
body = kwargs.pop('body')
header, body = (list(maybe_subtask(header)),
maybe_subtask(body))
if group_id:
body.set(group_id=group_id)
if chord:
body.set(chord=chord)
callback_id = body.options.setdefault('task_id', task_id or uuid())
parent = super(Chord, self).apply_async((header, body, args),
kwargs, **options)
body_result = self.AsyncResult(callback_id)
body_result.parent = parent
return body_result
def apply(self, args=(), kwargs={}, propagate=True, **options):
body = kwargs['body']
res = super(Chord, self).apply(args, dict(kwargs, eager=True),
**options)
return maybe_subtask(body).apply(
args=(res.get(propagate=propagate).get(), ))
return Chord
``` |
[
{
"content": "Replicate the source code:\n```python\nimport urllib, json\nimport datetime as dt\nimport logging\nlog = logging.getLogger(__name__)\n\n################################################################################\n## REQUIRED parameters:\n#######################################################... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport urllib, json\nimport datetime as dt\nimport logging\nlog = logging.getLogger(__name__)\n\n################################################################################\n## REQUIRED parameters:\n#######################################... | ```python
import urllib, json
import datetime as dt
import logging
log = logging.getLogger(__name__)
################################################################################
## REQUIRED parameters:
################################################################################
## data_url - e.g. "http://api.wunderground.com/api/3a5b82718926c103/conditions/q/MD/College_Park.json"
################################################################################
dataFreq = dt.timedelta(minutes=5)
iconpath='http://icons.wxbug.com/i/c/j/##.gif'
def readData():
'''buffer the data so that we don't do reads too often'''
## is it time to get new data?
ctime = dt.datetime.now()
if ctime - readData.lastRead > dataFreq:
log.debug('downloading new weather data')
readData.lastRead = ctime
response = urllib.urlopen(data_url)
readData.data = json.loads(response.read())
return readData.data
readData.data = None
readData.lastRead = dt.datetime.now() - dt.timedelta(days=3)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python3\n\n#\n# Copyright (c) 2015-2017 Nest Labs, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the Licens... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\n#\n# Copyright (c) 2015-2017 Nest Labs, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance... | ```python
#!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements Weave class that wraps around standalone Weave code library.
#
from __future__ import absolute_import
import os
import sys
from happy.Utils import *
from WeaveState import WeaveState
class Weave(WeaveState):
def __init__(self):
WeaveState.__init__(self)
self.weave_happy_conf_path = None
self.weave_build_path = None
self.weave_cert_path = None
self.gateway_tun = "weave-tun0"
self.service_tun = "service-tun0"
# default maximum running time for every test case is 1800 Sec
self.max_running_time = 1800
def __check_weave_path(self):
# Pick weave path from configuration
if "weave_path" in list(self.configuration.keys()):
self.weave_happy_conf_path = self.configuration["weave_path"]
emsg = "Found weave path: %s." % (self.weave_happy_conf_path)
self.logger.debug("[localhost] Weave: %s" % (emsg))
# Check if Weave build path is set
if "abs_builddir" in list(os.environ.keys()):
self.weave_build_path = os.environ['abs_builddir']
emsg = "Found weave abs_builddir: %s." % (self.weave_build_path)
self.logger.debug("[localhost] Weave: %s" % (emsg))
if self.weave_build_path is not None:
self.weave_path = self.weave_build_path
else:
self.weave_path = self.weave_happy_conf_path
if self.weave_path is None:
emsg = "Unknown path to Weave directory (repository)."
self.logger.error("[localhost] Weave: %s" % (emsg))
self.logger.info(hyellow("Set weave_path with happy-configuration and try again."))
sys.exit(1)
if not os.path.exists(self.weave_path):
emsg = "Weave path %s does not exist." % (self.weave_path)
self.logger.error("[localhost] Weave: %s" % (emsg))
self.logger.info(hyellow("Set correct weave_path with happy-configuration and try again."))
sys.exit(1)
if self.weave_path[-1] == "/":
self.weave_path = self.weave_path[:-1]
def __get_cmd_path(self, cmd_end):
cmd_path = self.weave_path + "/" + str(cmd_end)
if not os.path.exists(cmd_path):
emsg = "Weave path %s does not exist." % (cmd_path)
self.logger.error("[localhost] Weave: %s" % (emsg))
sys.exit(1)
# return None
else:
return cmd_path
def __setup_weave_cert_path(self):
if "weave_cert_path" in list(self.configuration.keys()):
self.weave_cert_path = self.configuration['weave_cert_path']
self.weave_cert_path = os.path.expandvars(self.weave_cert_path)
self.weave_cert_path.rstrip('/')
elif "abs_top_srcdir" in list(os.environ.keys()):
emsg = "Found weave source path: %s" % os.environ['abs_top_srcdir']
self.logger.debug("[localhost] Weave: %s" % (emsg))
self.weave_cert_path = os.environ['abs_top_srcdir'].rstrip('/') + '/certs/development'
elif "WEAVE_HOME" in list(os.environ.keys()):
emsg = "Found weave source path: %s" % os.environ['WEAVE_HOME']
self.logger.debug("[localhost] Weave: %s" % (emsg))
self.weave_cert_path = '${WEAVE_HOME}/certs/development'
if not (self.weave_cert_path and
os.path.isdir(os.path.expandvars(self.weave_cert_path))):
emsg = "Unable to set up weave_cert_path: unknown path to Weave src directory"
self.logger.debug("[localhost] Weave: %s" % (emsg))
def getWeaveConnectionTunnelPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-connection-tunnel")
return cmd_path
def getWeaveMessageLayerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestWeaveMessageLayer")
return cmd_path
def getWeaveInetLayerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestInetLayer")
return cmd_path
def getWeaveInetLayerDNSPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestInetLayerDNS")
return cmd_path
def getWeaveInetLayerMulticastPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestInetLayerMulticast")
return cmd_path
def getWeavePingPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-ping")
return cmd_path
def getWeaveKeyExportPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-key-export")
return cmd_path
def getWeaveHeartbeatPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-heartbeat")
return cmd_path
def getWeaveWRMPPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestWRMP")
return cmd_path
def getWeaveBDXv0ServerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-bdx-server-v0")
return cmd_path
def getWeaveBDXv0ClientPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-bdx-client-v0")
return cmd_path
def getWeaveBDXServerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-bdx-server-development")
return cmd_path
def getWeaveBDXClientPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-bdx-client-development")
return cmd_path
def getWeaveSWUServerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-swu-server")
return cmd_path
def getWeaveSWUClientPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-swu-client")
return cmd_path
def getWeaveTunnelBorderGatewayPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("mock-weave-bg")
return cmd_path
def getWeaveTunnelTestServerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestWeaveTunnelServer")
return cmd_path
def getWeaveTunnelTestBRPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestWeaveTunnelBR")
return cmd_path
def getWeaveMockDevicePath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("mock-device")
return cmd_path
def getWeaveDeviceDescriptionClientPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-dd-client")
return cmd_path
def getWeaveDeviceMgrPath(self):
relative_path = os.path.join("..", "..", "..", "..", "src", "device-manager", "python")
self.__check_weave_path()
cmd_path = self.__get_cmd_path(relative_path)
return cmd_path
def getWeaveDeviceMgrLibPath(self):
relative_path = os.path.join('..', 'device-manager', 'python')
self.__check_weave_path()
cmd_path = self.__get_cmd_path(relative_path)
return cmd_path
def getWeaveWDMv0ServerPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestDataManagement")
return cmd_path
def getWeaveWdmNextPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("TestWdmNext")
return cmd_path
def getWeaveServiceDirPath(self):
self.__check_weave_path()
cmd_path = self.__get_cmd_path("weave-service-dir")
return cmd_path
def getWeaveCertPath(self):
self.__setup_weave_cert_path()
return self.weave_cert_path
def getBluetoothdPath(self):
self.__check_weave_path()
relative_path = os.path.join('..', '..', 'third_party', 'bluez', 'repo', 'src', 'bluetoothd')
cmd_path = self.__get_cmd_path(relative_path)
return cmd_path
def getBtvirtPath(self):
self.__check_weave_path()
relative_path = os.path.join('..', '..', 'third_party', 'bluez', 'repo', 'emulator', 'btvirt')
cmd_path = self.__get_cmd_path(relative_path)
return cmd_path
``` |
[
{
"content": "Return the code unaltered:\n```python\n\"\"\"remove history constraints\n\nRevision ID: 03bdb0a44083\nRevises: 534d805d5dcf\nCreate Date: 2019-03-21 15:32:02.572675\n\n\"\"\"\nfrom alembic import op\n\nfrom rdr_service.dao.alembic_utils import ReplaceableObject\n\n# revision identifiers, used by A... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n\"\"\"remove history constraints\n\nRevision ID: 03bdb0a44083\nRevises: 534d805d5dcf\nCreate Date: 2019-03-21 15:32:02.572675\n\n\"\"\"\nfrom alembic import op\n\nfrom rdr_service.dao.alembic_utils import ReplaceableObject\n\n# revision identi... | ```python
"""remove history constraints
Revision ID: 03bdb0a44083
Revises: 534d805d5dcf
Create Date: 2019-03-21 15:32:02.572675
"""
from alembic import op
from rdr_service.dao.alembic_utils import ReplaceableObject
# revision identifiers, used by Alembic.
revision = "03bdb0a44083"
down_revision = "534d805d5dcf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
sp_drop_index_if_exists = ReplaceableObject(
"sp_drop_index_if_exists",
"""
(
IN in_table VARCHAR(255),
IN in_index VARCHAR(255)
)
BEGIN
# Drop the index only if it exists.
IF ((SELECT COUNT(*)
FROM information_schema.statistics
WHERE table_schema = DATABASE() AND
table_name = in_table AND
index_name = in_index) > 0) THEN
SET @sql = CONCAT('DROP INDEX ', in_index, ' ON ', in_table);
PREPARE stmt from @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
END IF;
END
""",
)
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
op.create_sp(sp_drop_index_if_exists)
op.execute("""call sp_drop_index_if_exists('biobank_dv_order_history', 'uidx_partic_id_order_id')""")
op.execute("""call sp_drop_index_if_exists('biobank_dv_order_history', 'biobank_order_id')""")
op.execute("""call sp_drop_index_if_exists('biobank_dv_order_history', 'biobank_state_id')""")
op.execute("""call sp_drop_index_if_exists('biobank_dv_order_history', 'state_id')""")
def downgrade_rdr():
op.drop_sp(sp_drop_index_if_exists)
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
``` |
[
{
"content": "```python\n\"\"\"\nCopyright (c) 2013, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain ... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nCopyright (c) 2013, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source c... | ```python
"""
Copyright (c) 2013, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Listeners are subscribers that wish to be notified any time
a new topic is added to the hub.
"""
from persistent import Persistent
from repoze.folder import Folder
import requests
from zope.interface import Interface, implements
from .topic import Topics
from ..utils import is_valid_url
import logging
logger = logging.getLogger(__name__)
class Listeners(Folder):
"""Folder to hold listeners"""
title = "Listeners"
class IListener(Interface):
"""Marker interface for listeners"""
pass
class Listener(Persistent):
implements(IListener)
def __init__(self, callback_url):
if not is_valid_url(callback_url):
raise ValueError(
'Malformed URL: %s'
)
self.callback_url = callback_url
self.topics = Topics()
def notify(self, topic):
headers, data = topic.get_request_data()
logger.debug('Notify listener: %s' % self.callback_url)
response = requests.get(self.callback_url, data=data, headers=headers)
return response
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# Copyright (c) 2014-present PlatformIO <contact@platformio.org>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may ob... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# Copyright (c) 2014-present PlatformIO <contact@platformio.org>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the Licens... | ```python
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import re
import time
from platformio import fs
from platformio.compat import IS_MACOS, IS_WINDOWS
from platformio.debug.exception import DebugInvalidOptionsError
from platformio.debug.helpers import escape_gdbmi_stream, is_gdbmi_mode
from platformio.debug.process.base import DebugBaseProcess
from platformio.proc import where_is_program
class DebugServerProcess(DebugBaseProcess):
STD_BUFFER_SIZE = 1024
def __init__(self, debug_config):
super(DebugServerProcess, self).__init__()
self.debug_config = debug_config
self._ready = False
self._std_buffer = {"out": b"", "err": b""}
async def run(self): # pylint: disable=too-many-branches
server = self.debug_config.server
if not server:
return None
server_executable = server["executable"]
if not server_executable:
return None
if server["cwd"]:
server_executable = os.path.join(server["cwd"], server_executable)
if (
IS_WINDOWS
and not server_executable.endswith(".exe")
and os.path.isfile(server_executable + ".exe")
):
server_executable = server_executable + ".exe"
if not os.path.isfile(server_executable):
server_executable = where_is_program(server_executable)
if not os.path.isfile(server_executable):
raise DebugInvalidOptionsError(
"Could not launch Debug Server '%s'. Please check that it "
"is installed and is included in a system PATH\n"
"See https://docs.platformio.org/page/plus/debugging.html"
% server_executable
)
openocd_pipe_allowed = all(
[
not self.debug_config.env_options.get("debug_port"),
"gdb" in self.debug_config.client_executable_path,
"openocd" in server_executable,
]
)
if openocd_pipe_allowed:
args = []
if server["cwd"]:
args.extend(["-s", server["cwd"]])
args.extend(
["-c", "gdb_port pipe; tcl_port disabled; telnet_port disabled"]
)
args.extend(server["arguments"])
str_args = " ".join(
[arg if arg.startswith("-") else '"%s"' % arg for arg in args]
)
return fs.to_unix_path('| "%s" %s' % (server_executable, str_args))
env = os.environ.copy()
# prepend server "lib" folder to LD path
if (
not IS_WINDOWS
and server["cwd"]
and os.path.isdir(os.path.join(server["cwd"], "lib"))
):
ld_key = "DYLD_LIBRARY_PATH" if IS_MACOS else "LD_LIBRARY_PATH"
env[ld_key] = os.path.join(server["cwd"], "lib")
if os.environ.get(ld_key):
env[ld_key] = "%s:%s" % (env[ld_key], os.environ.get(ld_key))
# prepend BIN to PATH
if server["cwd"] and os.path.isdir(os.path.join(server["cwd"], "bin")):
env["PATH"] = "%s%s%s" % (
os.path.join(server["cwd"], "bin"),
os.pathsep,
os.environ.get("PATH", os.environ.get("Path", "")),
)
await self.spawn(
*([server_executable] + server["arguments"]), cwd=server["cwd"], env=env
)
await self._wait_until_ready()
return self.debug_config.port
async def _wait_until_ready(self):
ready_pattern = self.debug_config.server_ready_pattern
timeout = 60 if ready_pattern else 10
elapsed = 0
delay = 0.5
auto_ready_delay = 0.5
while not self._ready and self.is_running() and elapsed < timeout:
await asyncio.sleep(delay)
if not ready_pattern:
self._ready = self._last_activity < (time.time() - auto_ready_delay)
elapsed += delay
def _check_ready_by_pattern(self, data):
if self._ready:
return self._ready
ready_pattern = self.debug_config.server_ready_pattern
if ready_pattern:
if ready_pattern.startswith("^"):
self._ready = re.match(
ready_pattern,
data.decode("utf-8", "ignore"),
)
else:
self._ready = ready_pattern.encode() in data
return self._ready
def stdout_data_received(self, data):
super(DebugServerProcess, self).stdout_data_received(
escape_gdbmi_stream("@", data) if is_gdbmi_mode() else data
)
self._std_buffer["out"] += data
self._check_ready_by_pattern(self._std_buffer["out"])
self._std_buffer["out"] = self._std_buffer["out"][-1 * self.STD_BUFFER_SIZE :]
def stderr_data_received(self, data):
super(DebugServerProcess, self).stderr_data_received(data)
self._std_buffer["err"] += data
self._check_ready_by_pattern(self._std_buffer["err"])
self._std_buffer["err"] = self._std_buffer["err"][-1 * self.STD_BUFFER_SIZE :]
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport tensorflow as tf\n\n\nclass TextCNN(object):\n \"\"\"\n A CNN for text classification.\n Uses an embedding layer, followed by a convolutional, kmax-pooling, convolutional, maxpooling and softmax layer.\n \"\"\"\n\n def __init__(\n ... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport tensorflow as tf\n\n\nclass TextCNN(object):\n \"\"\"\n A CNN for text classification.\n Uses an embedding layer, followed by a convolutional, kmax-pooling, convolutional, maxpooling and softmax layer.\n \"\"\"\n\n def __... | ```python
import tensorflow as tf
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, kmax-pooling, convolutional, maxpooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Variables from paper
assumed_value_K = 15
num_filters_2 = num_filters
filter_size_2 = 4
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(l2_reg_lambda)
# Embedding layer
with tf.device('/gpu:0'), tf.name_scope("embedding"):
W_emb = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W_emb")
self.embedded_chars = tf.nn.embedding_lookup(W_emb, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + kmaxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-kmaxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W1 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W1")
b1 = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b1")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W1,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b1), name="relu")
# Main addition to the original implementation is adding K max pooling
# Optimally we would add a custom op for this
t = tf.transpose(h, perm=[0, 3, 2, 1])
d = tf.shape(t)[-1]
_, indices = tf.nn.top_k(t, k=assumed_value_K, sorted=False, name="k_max")
# Create one - hot boolean tensor
one_hot = tf.one_hot(indices, d, on_value=True, off_value=False, dtype=tf.bool)
# Reduce it to shape t
temporary = tf.reduce_sum(tf.cast(one_hot, tf.float32), 3)
result_flat = tf.boolean_mask(t, tf.not_equal(tf.cast(temporary, tf.bool), False))
result = tf.reshape(result_flat, tf.shape(indices))
kmax_pooled = tf.transpose(result, perm=[0, 3, 2, 1])
pooled_outputs.append(kmax_pooled)
# Combine all the pooled features
self.h_pool = tf.concat(pooled_outputs, 3)
# Add dropout
with tf.name_scope("dropout1"):
self.h1_drop = tf.nn.dropout(self.h_pool, self.dropout_keep_prob)
# Convolutional layer capturing sequential features
with tf.name_scope("conv-maxpool"):
num_filters_total = num_filters * len(filter_sizes)
filter_shape = [filter_size_2, 1, num_filters_total, num_filters_2]
W2 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W2')
b2 = tf.Variable(tf.constant(0.1, shape=[num_filters_2]), name="b2")
conv2 = tf.nn.conv2d(
self.h1_drop,
W2,
strides=[1, 1, 1, 1],
padding='VALID',
name='conv2')
self.h2 = tf.nn.relu(tf.nn.bias_add(conv2, b2), name="relu")
max_pool = tf.nn.max_pool(
self.h2,
ksize=[1, assumed_value_K - filter_size_2 + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name='max-pool')
# Add dropout
with tf.name_scope("dropout2"):
self.h2_drop = tf.nn.dropout(max_pool, self.dropout_keep_prob)
# Add last layer
self.h2_flat = tf.reshape(self.h2_drop, [-1, num_filters_2])
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_2, num_classes], # to fix
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b1")
l2_loss += tf.nn.l2_loss(W_emb)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h2_flat, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
``` |
[
{
"content": "```python\nfrom Square import Square\nimport random\n\nclass Game(object):\n \"\"\"docstring for Game\"\"\"\n def __init__(self, x_dim, y_dim, num_mines):\n super(Game, self).__init__()\n self.x = x_dim\n self.y = y_dim\n self.mines = num_mines\n # TODO: Validation\n ... | [
{
"content": "<|memory_start|>```python\nfrom Square import Square\nimport random\n\nclass Game(object):\n \"\"\"docstring for Game\"\"\"\n def __init__(self, x_dim, y_dim, num_mines):\n super(Game, self).__init__()\n self.x = x_dim\n self.y = y_dim\n self.mines = num_mines\n # TODO... | ```python
from Square import Square
import random
class Game(object):
"""docstring for Game"""
def __init__(self, x_dim, y_dim, num_mines):
super(Game, self).__init__()
self.x = x_dim
self.y = y_dim
self.mines = num_mines
# TODO: Validation
self.populate()
def populate(self):
self.grid = [[Square() for i in range(self.x)] for i in range(self.y)]
# plot the mines
count = 0
while count != self.mines:
mine_x = random.randint(0, self.x-1)
mine_y = random.randint(0, self.y-1)
if self.grid[mine_x][mine_y].mine == False: # If this square isn't already a mine, make it one
self.grid[mine_x][mine_y].mine = True
count = count + 1
# update adjacent mine counts
for i in range(0,self.x):
for j in range(0,self.y):
if self.grid[i][j].mine == False: # Only calculate number of adjacent mines for mineless squares
count = 0
# left
if self.is_mine(i-1, j):
count = count + 1
# up-left
if self.is_mine(i-1, j-1):
count = count + 1
# up
if self.is_mine(i, j-1):
count = count + 1
# up-right
if self.is_mine(i+1, j-1):
count = count + 1
# right
if self.is_mine(i+1, j):
count = count + 1
# down-right
if self.is_mine(i+1, j+1):
count = count + 1
# down
if self.is_mine(i, j+1):
count = count + 1
# down-left
if self.is_mine(i-1, j+1):
count = count + 1
self.grid[i][j].adj_mines = count
def is_mine(self, x, y):
if x in range(0, self.x) and y in range(0, self.y):
return self.grid[x][y].mine
else:
return False
def print_status(self):
for square in self.grid:
print(square)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nfrom setuptools import setup, find_packages\n\n\nsetup(\n version='4.2.0',\n name='vcdriver',\n description='A vcenter driver based on pyvmomi, fabric and pywinrm',\n url='https://github.com/Lantero/vcdriver',\n author='Carlos Ru... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nfrom setuptools import setup, find_packages\n\n\nsetup(\n version='4.2.0',\n name='vcdriver',\n description='A vcenter driver based on pyvmomi, fabric and pywinrm',\n url='https://github.com/Lantero/vcdriver',\n a... | ```python
from setuptools import setup, find_packages
setup(
version='4.2.0',
name='vcdriver',
description='A vcenter driver based on pyvmomi, fabric and pywinrm',
url='https://github.com/Lantero/vcdriver',
author='Carlos Ruiz Lantero',
author_email='carlos.ruiz.lantero@gmail.com',
license='MIT',
install_requires=['colorama', 'Fabric3', 'pyvmomi', 'pywinrm', 'six'],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
)
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENS... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org... | ```python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Google BigQuery API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.bigquery.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import operator
import time
import six
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery.client import Client
ORIGINAL_FRIENDLY_NAME = 'Original friendly name'
ORIGINAL_DESCRIPTION = 'Original description'
LOCALLY_CHANGED_FRIENDLY_NAME = 'Locally-changed friendly name'
LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description'
PATCHED_FRIENDLY_NAME = 'Patched friendly name'
PATCHED_DESCRIPTION = 'Patched description'
UPDATED_FRIENDLY_NAME = 'Updated friendly name'
UPDATED_DESCRIPTION = 'Updated description'
SCHEMA = [
SchemaField('full_name', 'STRING', mode='required'),
SchemaField('age', 'INTEGER', mode='required'),
]
QUERY = (
'SELECT name FROM [bigquery-public-data:usa_names.usa_1910_2013] '
'WHERE state = "TX"')
def snippet(func):
"""Mark ``func`` as a snippet example function."""
func._snippet = True
return func
def _millis():
return time.time() * 1000
class _CloseOnDelete(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def delete(self):
self._wrapped.close()
@snippet
def client_list_datasets(client, _):
"""List datasets for a project."""
def do_something_with(_):
pass
# [START client_list_datasets]
for dataset in client.list_datasets(): # API request(s)
do_something_with(dataset)
# [END client_list_datasets]
@snippet
def dataset_create(client, to_delete):
"""Create a dataset."""
DATASET_NAME = 'dataset_create_%d' % (_millis(),)
# [START dataset_create]
dataset = client.dataset(DATASET_NAME)
dataset.create() # API request
# [END dataset_create]
to_delete.append(dataset)
@snippet
def dataset_exists(client, to_delete):
"""Test existence of a dataset."""
DATASET_NAME = 'dataset_exists_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
to_delete.append(dataset)
# [START dataset_exists]
assert not dataset.exists() # API request
dataset.create() # API request
assert dataset.exists() # API request
# [END dataset_exists]
@snippet
def dataset_reload(client, to_delete):
"""Reload a dataset's metadata."""
DATASET_NAME = 'dataset_reload_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.description = ORIGINAL_DESCRIPTION
dataset.create()
to_delete.append(dataset)
# [START dataset_reload]
assert dataset.description == ORIGINAL_DESCRIPTION
dataset.description = LOCALLY_CHANGED_DESCRIPTION
assert dataset.description == LOCALLY_CHANGED_DESCRIPTION
dataset.reload() # API request
assert dataset.description == ORIGINAL_DESCRIPTION
# [END dataset_reload]
@snippet
def dataset_patch(client, to_delete):
"""Patch a dataset's metadata."""
DATASET_NAME = 'dataset_patch_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.description = ORIGINAL_DESCRIPTION
dataset.create()
to_delete.append(dataset)
# [START dataset_patch]
ONE_DAY_MS = 24 * 60 * 60 * 1000
assert dataset.description == ORIGINAL_DESCRIPTION
dataset.patch(
description=PATCHED_DESCRIPTION,
default_table_expiration_ms=ONE_DAY_MS
) # API request
assert dataset.description == PATCHED_DESCRIPTION
assert dataset.default_table_expiration_ms == ONE_DAY_MS
# [END dataset_patch]
@snippet
def dataset_update(client, to_delete):
"""Update a dataset's metadata."""
DATASET_NAME = 'dataset_update_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.description = ORIGINAL_DESCRIPTION
dataset.create()
to_delete.append(dataset)
dataset.reload()
# [START dataset_update]
from google.cloud.bigquery import AccessGrant
assert dataset.description == ORIGINAL_DESCRIPTION
assert dataset.default_table_expiration_ms is None
grant = AccessGrant(
role='READER', entity_type='domain', entity_id='example.com')
assert grant not in dataset.access_grants
ONE_DAY_MS = 24 * 60 * 60 * 1000
dataset.description = UPDATED_DESCRIPTION
dataset.default_table_expiration_ms = ONE_DAY_MS
grants = list(dataset.access_grants)
grants.append(grant)
dataset.access_grants = grants
dataset.update() # API request
assert dataset.description == UPDATED_DESCRIPTION
assert dataset.default_table_expiration_ms == ONE_DAY_MS
assert grant in dataset.access_grants
# [END dataset_update]
@snippet
def dataset_delete(client, _):
"""Delete a dataset."""
DATASET_NAME = 'dataset_delete_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
# [START dataset_delete]
assert dataset.exists() # API request
dataset.delete()
assert not dataset.exists() # API request
# [END dataset_delete]
@snippet
def dataset_list_tables(client, to_delete):
"""List tables within a dataset."""
DATASET_NAME = 'dataset_list_tables_dataset_%d' % (_millis(),)
TABLE_NAME = 'dataset_list_tables_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
# [START dataset_list_tables]
tables = list(dataset.list_tables()) # API request(s)
assert len(tables) == 0
table = dataset.table(TABLE_NAME)
table.view_query = QUERY
table.create() # API request
tables = list(dataset.list_tables()) # API request(s)
assert len(tables) == 1
assert tables[0].name == TABLE_NAME
# [END dataset_list_tables]
to_delete.insert(0, table)
@snippet
def table_create(client, to_delete):
"""Create a table."""
DATASET_NAME = 'table_create_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_create_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
# [START table_create]
table = dataset.table(TABLE_NAME, SCHEMA)
table.create() # API request
# [END table_create]
to_delete.insert(0, table)
@snippet
def table_exists(client, to_delete):
"""Test existence of a table."""
DATASET_NAME = 'table_exists_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_exists_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
# [START table_exists]
table = dataset.table(TABLE_NAME, SCHEMA)
assert not table.exists() # API request
table.create() # API request
assert table.exists() # API request
# [END table_exists]
to_delete.insert(0, table)
@snippet
def table_reload(client, to_delete):
"""Reload a table's metadata."""
DATASET_NAME = 'table_reload_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_reload_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.friendly_name = ORIGINAL_FRIENDLY_NAME
table.description = ORIGINAL_DESCRIPTION
table.create()
to_delete.insert(0, table)
# [START table_reload]
assert table.friendly_name == ORIGINAL_FRIENDLY_NAME
assert table.description == ORIGINAL_DESCRIPTION
table.friendly_name = LOCALLY_CHANGED_FRIENDLY_NAME
table.description = LOCALLY_CHANGED_DESCRIPTION
table.reload() # API request
assert table.friendly_name == ORIGINAL_FRIENDLY_NAME
assert table.description == ORIGINAL_DESCRIPTION
# [END table_reload]
@snippet
def table_patch(client, to_delete):
"""Patch a table's metadata."""
DATASET_NAME = 'table_patch_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_patch_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.description = ORIGINAL_DESCRIPTION
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.friendly_name = ORIGINAL_FRIENDLY_NAME
table.description = ORIGINAL_DESCRIPTION
table.create()
to_delete.insert(0, table)
# [START table_patch]
assert table.friendly_name == ORIGINAL_FRIENDLY_NAME
assert table.description == ORIGINAL_DESCRIPTION
table.patch(
friendly_name=PATCHED_FRIENDLY_NAME,
description=PATCHED_DESCRIPTION,
) # API request
assert table.friendly_name == PATCHED_FRIENDLY_NAME
assert table.description == PATCHED_DESCRIPTION
# [END table_patch]
@snippet
def table_update(client, to_delete):
"""Update a table's metadata."""
DATASET_NAME = 'table_update_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_update_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.description = ORIGINAL_DESCRIPTION
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.friendly_name = ORIGINAL_FRIENDLY_NAME
table.description = ORIGINAL_DESCRIPTION
table.create()
to_delete.insert(0, table)
# [START table_update]
assert table.friendly_name == ORIGINAL_FRIENDLY_NAME
assert table.description == ORIGINAL_DESCRIPTION
NEW_SCHEMA = table.schema[:]
NEW_SCHEMA.append(SchemaField('phone', 'string'))
table.friendly_name = UPDATED_FRIENDLY_NAME
table.description = UPDATED_DESCRIPTION
table.schema = NEW_SCHEMA
table.update() # API request
assert table.friendly_name == UPDATED_FRIENDLY_NAME
assert table.description == UPDATED_DESCRIPTION
assert table.schema == NEW_SCHEMA
# [END table_update]
def _warm_up_inserted_table_data(table):
# Allow for 90 seconds of "warm up" before rows visible. See:
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
rows = ()
counter = 18
while len(rows) == 0 and counter > 0:
counter -= 1
iterator = table.fetch_data()
page = six.next(iterator.pages)
rows = list(page)
if len(rows) == 0:
time.sleep(5)
@snippet
def table_insert_fetch_data(client, to_delete):
"""Insert / fetch table data."""
DATASET_NAME = 'table_insert_fetch_data_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_insert_fetch_data_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.create()
to_delete.insert(0, table)
# [START table_insert_data]
ROWS_TO_INSERT = [
(u'Phred Phlyntstone', 32),
(u'Wylma Phlyntstone', 29),
]
table.insert_data(ROWS_TO_INSERT)
# [END table_insert_data]
_warm_up_inserted_table_data(table)
found_rows = []
def do_something(row):
found_rows.append(row)
# [START table_fetch_data]
for row in table.fetch_data():
do_something(row)
# [END table_fetch_data]
assert len(found_rows) == len(ROWS_TO_INSERT)
by_age = operator.itemgetter(1)
found_rows = reversed(sorted(found_rows, key=by_age))
for found, to_insert in zip(found_rows, ROWS_TO_INSERT):
assert found == to_insert
@snippet
def table_upload_from_file(client, to_delete):
"""Upload table data from a CSV file."""
import csv
import tempfile
DATASET_NAME = 'table_upload_from_file_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_upload_from_file_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.create()
to_delete.insert(0, table)
csv_file = tempfile.NamedTemporaryFile(suffix='.csv')
to_delete.append(_CloseOnDelete(csv_file))
# [START table_upload_from_file]
writer = csv.writer(csv_file)
writer.writerow((b'full_name', b'age'))
writer.writerow((b'Phred Phlyntstone', b'32'))
writer.writerow((b'Wylma Phlyntstone', b'29'))
csv_file.flush()
with open(csv_file.name, 'rb') as readable:
table.upload_from_file(
readable, source_format='CSV', skip_leading_rows=1)
# [END table_upload_from_file]
_warm_up_inserted_table_data(table)
iterator = table.fetch_data()
page = six.next(iterator.pages)
rows = list(page)
total = iterator.total_rows
token = iterator.next_page_token
assert len(rows) == total == 2
assert token is None
assert rows[0] == (u'Phred Phlyntstone', 32)
assert rows[1] == (u'Wylma Phlyntstone', 29)
@snippet
def table_delete(client, to_delete):
"""Delete a table."""
DATASET_NAME = 'table_delete_dataset_%d' % (_millis(),)
TABLE_NAME = 'table_create_table_%d' % (_millis(),)
dataset = client.dataset(DATASET_NAME)
dataset.create()
to_delete.append(dataset)
table = dataset.table(TABLE_NAME, SCHEMA)
table.create()
# [START table_delete]
assert table.exists() # API request
table.delete() # API request
assert not table.exists() # API request
# [END table_delete]
@snippet
def client_list_jobs(client, _):
"""List jobs for a project."""
def do_something_with(_):
pass
# [START client_list_jobs]
job_iterator = client.list_jobs()
for job in job_iterator: # API request(s)
do_something_with(job)
# [END client_list_jobs]
@snippet
def client_run_sync_query(client, _):
"""Run a synchronous query."""
LIMIT = 100
LIMITED = '%s LIMIT %d' % (QUERY, LIMIT)
TIMEOUT_MS = 1000
# [START client_run_sync_query]
query = client.run_sync_query(LIMITED)
query.timeout_ms = TIMEOUT_MS
query.run() # API request
assert query.complete
assert len(query.rows) == LIMIT
assert [field.name for field in query.schema] == ['name']
# [END client_run_sync_query]
@snippet
def client_run_sync_query_w_param(client, _):
"""Run a synchronous query using a query parameter"""
QUERY_W_PARAM = (
'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
'WHERE state = @state')
LIMIT = 100
LIMITED = '%s LIMIT %d' % (QUERY_W_PARAM, LIMIT)
TIMEOUT_MS = 1000
# [START client_run_sync_query_w_param]
from google.cloud.bigquery import ScalarQueryParameter
param = ScalarQueryParameter('state', 'STRING', 'TX')
query = client.run_sync_query(LIMITED, query_parameters=[param])
query.use_legacy_sql = False
query.timeout_ms = TIMEOUT_MS
query.run() # API request
assert query.complete
assert len(query.rows) == LIMIT
assert [field.name for field in query.schema] == ['name']
# [END client_run_sync_query_w_param]
@snippet
def client_run_sync_query_paged(client, _):
"""Run a synchronous query with paged results."""
TIMEOUT_MS = 1000
PAGE_SIZE = 100
LIMIT = 1000
LIMITED = '%s LIMIT %d' % (QUERY, LIMIT)
all_rows = []
def do_something_with(rows):
all_rows.extend(rows)
# [START client_run_sync_query_paged]
query = client.run_sync_query(LIMITED)
query.timeout_ms = TIMEOUT_MS
query.max_results = PAGE_SIZE
query.run() # API request
assert query.complete
assert query.page_token is not None
assert len(query.rows) == PAGE_SIZE
assert [field.name for field in query.schema] == ['name']
rows = query.rows
token = query.page_token
while True:
do_something_with(rows)
if token is None:
break
rows, total_count, token = query.fetch_data(
page_token=token) # API request
# [END client_run_sync_query_paged]
assert total_count == LIMIT
assert len(all_rows) == LIMIT
@snippet
def client_run_sync_query_timeout(client, _):
"""Run a synchronous query w/ timeout"""
TIMEOUT_MS = 10
all_rows = []
def do_something_with(rows):
all_rows.extend(rows)
# [START client_run_sync_query_timeout]
query = client.run_sync_query(QUERY)
query.timeout_ms = TIMEOUT_MS
query.use_query_cache = False
query.run() # API request
assert not query.complete
job = query.job
job.reload() # API rquest
retry_count = 0
while retry_count < 10 and job.state != u'DONE':
time.sleep(1.5**retry_count) # exponential backoff
retry_count += 1
job.reload() # API request
assert job.state == u'DONE'
rows, total_count, token = query.fetch_data() # API request
while True:
do_something_with(rows)
if token is None:
break
rows, total_count, token = query.fetch_data(
page_token=token) # API request
# [END client_run_sync_query_timeout]
assert len(all_rows) == total_count
def _find_examples():
funcs = [obj for obj in globals().values()
if getattr(obj, '_snippet', False)]
for func in sorted(funcs, key=lambda f: f.func_code.co_firstlineno):
yield func
def main():
client = Client()
for example in _find_examples():
to_delete = []
print('%-30s: %s' % (
example.func_name, example.func_doc))
try:
example(client, to_delete)
except AssertionError as e:
print(' FAIL: %s' % (e,))
except Exception as e: # pylint: disable=broad-except
print(' ERROR: %r' % (e,))
for item in to_delete:
item.delete()
if __name__ == '__main__':
main()
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# encoding: utf-8\n\"\"\"\nRun Fiji is just ImageJ macros headless with python.\n\"\"\"\nimport pydebug, subprocess, os, re\nfrom tempfile import mkstemp\nimport fijibin\n\n# debug with DEBUG=fijibin python script.py\n... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# encoding: utf-8\n\"\"\"\nRun Fiji is just ImageJ macros headless with python.\n\"\"\"\nimport pydebug, subprocess, os, re\nfrom tempfile import mkstemp\nimport fijibin\n\n# debug with DEBUG=fijibin py... | ```python
# encoding: utf-8
"""
Run Fiji is just ImageJ macros headless with python.
"""
import pydebug, subprocess, os, re
from tempfile import mkstemp
import fijibin
# debug with DEBUG=fijibin python script.py
debug = pydebug.debug('fijibin')
##
# Running macros
##
def run(macro, output_files=[], force_close=True):
"""
Runs Fiji with the suplied macro. Output of Fiji can be viewed by
setting environment variable `DEBUG=fijibin`.
Parameters
----------
macro : string or list of strings
IJM-macro(s) to run. If list of strings, it will be joined with
a space, so all statements should end with ``;``.
output_files : list
Files to check if exists after macro has been run. Files specified that
do not exist after macro is done will print a warning message.
force_close : bool
Will add ``eval("script", "System.exit(42);");`` to end of macro. Exit
code 42 is used to overcome that errors in macro efficiently will exit
Fiji with error code 0. In other words, if this line in the macro is
reached, the macro has most probably finished without errors. This
is the default behaviour.
One should also note that Fiji doesn't terminate right away if
``System.exit()`` is left out, and it may take several minutes for
Fiji to close.
Returns
-------
int
Files from output_files which exists after running macro.
"""
if type(macro) == list:
macro = ' '.join(macro)
if len(macro) == 0:
print('fijibin.macro.run got empty macro, not starting fiji')
return _exists(output_files)
if force_close:
# make sure fiji halts immediately when done
# hack: use error code 42 to check if macro has run sucessfully
macro = macro + 'eval("script", "System.exit(42);");'
# escape backslashes (windows file names)
# not \ \ not \ g1 \\ g2
macro = re.sub(r"([^\\])\\([^\\])", r"\1\\\\\2", macro)
debug('macro {}'.format(macro))
# avoid verbose output of Fiji when DEBUG environment variable set
env = os.environ.copy()
debugging = False
if 'DEBUG' in env:
if env['DEBUG'] == 'fijibin' or env['DEBUG'] == '*':
debugging = True
del env['DEBUG']
fptr, temp_filename = mkstemp(suffix='.ijm')
m = os.fdopen(fptr, 'w')
m.write(macro)
m.flush() # make sure macro is written before running Fiji
m.close()
cmd = [fijibin.BIN, '--headless', '-macro', temp_filename]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = proc.communicate()
for line in out.decode('latin1', errors='ignore').splitlines():
debug('stdout:' + line)
for line in err.decode('latin1', errors='ignore').splitlines():
debug('stderr:' + line)
if force_close and proc.returncode != 42:
print('fijibin ERROR: Fiji did not successfully ' +
'run macro {}'.format(temp_filename))
if not debugging:
print('fijibin Try running script with ' +
'`DEBUG=fijibin python your_script.py`')
else:
# only delete if everything is ok
os.remove(temp_filename)
# return output_files which exists
return _exists(output_files)
##
# Collection of macros
##
def stitch(folder, filenames, x_size, y_size, output_filename,
x_start=0, y_start=0, overlap=10):
"""
Creates a Fiji Grid/Collection stitching macro. Parameters are the same as
in the plugin and are described in further detail here:
http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching.
**Default stitch parameters:**
* Filename defined positions
* Compute overlap
* Subpixel accurancy
* Save computation time (but use more RAM)
* Fusion method: Linear blending
* Regression threshold: 0.30
* Max/avg displacement threshold: 2.50
* Absolute displacement threshold: 3.50
Parameters
----------
folder : string
Path to folder with images or folders with images.
Example: */path/to/slide--S00/chamber--U01--V02/*
filenames : string
Filenames of images.
Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif*
x_size : int
Size of grid, number of images in x direction.
y_size : int
Size of grid, number of images in y direction.
output_filename : string
Where to store fused image. Should be `.png`.
x_start : int
Which x position grid start with.
y_start : int
Which y position grid start with.
overlap : number
Tile overlap in percent. Fiji will find the optimal overlap, but a
precise overlap assumption will decrase computation time.
Returns
-------
string
IJM-macro.
"""
macro = []
macro.append('run("Grid/Collection stitching",')
macro.append('"type=[Filename defined position]')
macro.append('order=[Defined by filename ]')
macro.append('grid_size_x={}'.format(x_size))
macro.append('grid_size_y={}'.format(y_size))
macro.append('tile_overlap={}'.format(overlap))
macro.append('first_file_index_x={}'.format(x_start))
macro.append('first_file_index_y={}'.format(y_start))
macro.append('directory=[{}]'.format(folder))
macro.append('file_names=[{}]'.format(filenames))
macro.append('output_textfile_name=TileConfiguration.txt')
macro.append('fusion_method=[Linear Blending]')
macro.append('regression_threshold=0.20')
macro.append('max/avg_displacement_threshold=2.50')
macro.append('absolute_displacement_threshold=3.50')
macro.append('compute_overlap')
macro.append('subpixel_accuracy')
macro.append('computation_parameters=[Save computation time (but use more RAM)]')
# use display, such that we can specify output filename
# this is 'Fused and display' for previous stitching version!!
macro.append('image_output=[Fuse and display]");')
# save to png
macro.append('selectWindow("Fused");')
macro.append('saveAs("PNG", "{}");'.format(output_filename))
macro.append('close();')
return ' '.join(macro)
##
# Helper functions
##
def _exists(filenames):
"""Check if every filename exists. If not, print an error
message and remove the item from the list.
Parameters
----------
filenames : list
List of filenames to check for existence.
Returns
-------
list
Filtered list of filenames that exists.
"""
exists = []
for filename in filenames:
if os.path.isfile(filename):
exists.append(filename)
else:
print('fijibin ERROR missing output file {}'.format(filename))
return exists
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nfrom rest_framework.relations import HyperlinkedRelatedField\n\nfrom drf_queryfields import QueryFieldsMixin\n\nfrom api.models import *\nfrom rest_framework import serializers\n\nfrom api.utils.custom_serializers import TimeStampField\n\n\ncla... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nfrom rest_framework.relations import HyperlinkedRelatedField\n\nfrom drf_queryfields import QueryFieldsMixin\n\nfrom api.models import *\nfrom rest_framework import serializers\n\nfrom api.utils.custom_serializers import TimeSta... | ```python
from rest_framework.relations import HyperlinkedRelatedField
from drf_queryfields import QueryFieldsMixin
from api.models import *
from rest_framework import serializers
from api.utils.custom_serializers import TimeStampField
class AgencySerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):
type = serializers.ReadOnlyField(read_only=True, source="agency_type.name")
orbiters = serializers.ReadOnlyField(read_only=True, source="spacecraft")
class Meta:
model = Agency
fields = ('id', 'url', 'name', 'featured', 'launchers', 'orbiters', 'description', 'image_url', 'nation_url',
'administrator', 'founding_year', 'logo_url', 'launch_library_url', 'country_code', 'abbrev',
'info_url', 'wiki_url', 'type')
class LauncherSerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):
agency = serializers.ReadOnlyField(read_only=True, source="manufacturer.name")
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
model = LauncherConfig
fields = ('id', 'url', 'name', 'agency')
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
class LauncherDetailSerializer(QueryFieldsMixin, serializers.ModelSerializer):
agency = AgencySerializer(many=False, read_only=True, source='manufacturer')
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
def get_rep(self, obj):
rep = obj.rep
serializer_context = {'request': self.context.get('request'),
'id': obj.id}
serializer = AgencySerializer(rep, context=serializer_context)
return serializer.data
class Meta:
model = LauncherConfig
fields = ('id', 'url', 'name', 'description', 'family', 'full_name', 'agency',
'variant', 'alias', 'min_stage', 'max_stage', 'length', 'diameter',
'launch_mass', 'leo_capacity', 'gto_capacity', 'to_thrust',
'apogee', 'vehicle_range', 'image_url', 'info_url', 'wiki_url',)
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
class LauncherDetailSerializerForAgency(QueryFieldsMixin, serializers.ModelSerializer):
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
def get_rep(self, obj):
rep = obj.rep
serializer_context = {'request': self.context.get('request'),
'id': obj.id}
serializer = AgencySerializer(rep, context=serializer_context)
return serializer.data
class Meta:
model = LauncherConfig
fields = ('id', 'url', 'name', 'description', 'family', 'full_name',
'variant', 'alias', 'min_stage', 'max_stage', 'length', 'diameter',
'launch_mass', 'leo_capacity', 'gto_capacity', 'to_thrust',
'apogee', 'vehicle_range', 'image_url', 'info_url', 'wiki_url',)
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
class OrbiterDetailSerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):
agency = serializers.ReadOnlyField(read_only=True, source="manufacturer.name")
class Meta:
model = SpacecraftConfiguration
fields = ('id', 'url', 'name', 'agency', 'history', 'details', 'image_url',
'nation_url', 'wiki_link', 'capability')
class AgencyDetailedSerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):
launcher_list = LauncherDetailSerializerForAgency(many=True, read_only=True)
orbiter_list = OrbiterDetailSerializer(many=True, read_only=True, source='spacecraft_list')
type = serializers.ReadOnlyField(read_only=True, source="agency_type.name")
orbiters = serializers.ReadOnlyField(read_only=True, source="spacecraft")
class Meta:
model = Agency
fields = ('id', 'url', 'name', 'featured', 'launchers', 'orbiters', 'description', 'image_url', 'nation_url',
'administrator', 'founding_year', 'logo_url', 'launch_library_url', 'country_code', 'abbrev',
'launcher_list', 'orbiter_list', 'type')
class OrbiterSerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):
class Meta:
model = SpacecraftConfiguration
fields = ('id', 'url', 'name')
class EventsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Events
fields = ('id', 'url', 'name', 'description', 'location', 'feature_image', 'date')
# class RocketSerializer(serializers.ModelSerializer):
#
# class Meta:
# model = Rocket
# fields = ('id', 'name', 'imageURL', 'family_name')
class PadSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
model = Pad
fields = ('id', 'agency_id', 'name', 'info_url', 'wiki_url', 'map_url', 'latitude', 'longitude')
class LocationSerializer(serializers.ModelSerializer):
pads = PadSerializer(many=True, read_only=True)
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
model = Location
fields = ('id', 'name', 'country_code', 'pads')
class LSPSerializer(serializers.ModelSerializer):
type = serializers.StringRelatedField(many=False, source='mission_type')
class Meta:
model = Agency
fields = ('id', 'name', 'country_code', 'abbrev', 'type', 'info_url', 'wiki_url')
class MissionSerializer(serializers.ModelSerializer):
type = serializers.PrimaryKeyRelatedField(read_only=True, many=False, source='mission_type')
type_name = serializers.StringRelatedField(many=False, source='mission_type')
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
model = Mission
fields = ('id', 'name', 'description', 'type', 'type_name')
class LaunchListSerializer(serializers.HyperlinkedModelSerializer):
location = LocationSerializer(many=False, read_only=True, source='pad.location')
launcher = LauncherSerializer(many=False, read_only=True, source='rocket.configuration')
lsp = LSPSerializer(many=False, read_only=True, source='rocket.configuration.manufacturer')
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
mission = MissionSerializer(many=False, read_only=True)
status = serializers.IntegerField(
read_only=True,
source='status.id'
)
class Meta:
depth = 3
model = Launch
fields = ('id', 'url', 'name', 'status', 'net', 'window_end', 'window_start', 'inhold', 'tbdtime', 'tbddate',
'launcher', 'mission', 'lsp', 'location')
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
class LaunchSerializer(serializers.HyperlinkedModelSerializer):
location = LocationSerializer(many=False, read_only=True, source='pad.location')
pad = PadSerializer(many=False, read_only=True)
launcher = LauncherSerializer(many=False, read_only=True, source='rocket.configuration')
lsp = LSPSerializer(many=False, read_only=True, source='launch_service_provider')
mission = MissionSerializer(many=False, read_only=True)
status = serializers.IntegerField(
read_only=True,
source='status.id'
)
infoURLs = serializers.ReadOnlyField()
vidURLs = serializers.ReadOnlyField()
netstamp = TimeStampField(source='net')
wsstamp = TimeStampField(source='window_start')
westamp = TimeStampField(source='window_end')
isonet = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='net')
isostart = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='window_start')
isoend = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='window_end')
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
depth = 3
model = Launch
fields = ('id', 'url', 'name', 'img_url', 'status', 'netstamp', 'wsstamp', 'westamp', 'net', 'window_end',
'window_start', 'isonet', 'isostart', 'isoend', 'inhold', 'tbdtime', 'tbddate', 'probability',
'holdreason', 'failreason', 'hashtag', 'launcher', 'mission', 'lsp', 'location', 'pad', 'infoURLs',
'vidURLs')
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
class InfoURLSerializer(serializers.ModelSerializer):
class Meta:
model = InfoURLs
fields = ('info_url',)
class LaunchDetailedSerializer(serializers.HyperlinkedModelSerializer):
location = LocationSerializer(many=False, read_only=True, source='pad.location')
pad = PadSerializer(many=False, read_only=True)
launcher = LauncherDetailSerializerForAgency(many=False, read_only=True, source='rocket.configuration')
lsp = AgencySerializer(many=False, read_only=True, source='launch_service_provider')
mission = MissionSerializer(many=False, read_only=True)
infoURLs = serializers.StringRelatedField(read_only=True, many=True, source='info_urls')
vidURLs = serializers.StringRelatedField(read_only=True, many=True, source='vid_urls')
status = serializers.IntegerField(
read_only=True,
source='status.id'
)
netstamp = TimeStampField(source='net')
wsstamp = TimeStampField(source='window_start')
westamp = TimeStampField(source='window_end')
isonet = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='net')
isostart = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='window_start')
isoend = serializers.DateTimeField(format="%Y%m%dT%H%M%SZ", input_formats=None, source='window_end')
id = serializers.ReadOnlyField(read_only=True, source="launch_library_id")
class Meta:
depth = 3
model = Launch
fields = ('id', 'url', 'name', 'img_url', 'status', 'netstamp', 'wsstamp', 'westamp', 'net', 'window_end',
'window_start', 'isonet', 'isostart', 'isoend', 'inhold', 'tbdtime', 'tbddate', 'probability',
'holdreason', 'failreason', 'hashtag', 'launcher', 'mission', 'lsp', 'location', 'pad', 'infoURLs',
'vidURLs')
extra_kwargs = {
'url': {'lookup_field': 'launch_library_id'},
}
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nimport requests\nfrom ml import svm\nimport json\nimport NLProcessor as nlp\nimport lxml.html\nfrom requests import get\nfrom goose import Goose\n\ndef getSuggestions(query):\n url = 'https://api.cognitive.microsoft.com/bing/v5.0/su... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nimport requests\nfrom ml import svm\nimport json\nimport NLProcessor as nlp\nimport lxml.html\nfrom requests import get\nfrom goose import Goose\n\ndef getSuggestions(query):\n url = 'https://api.cognitive.microsoft.... | ```python
import requests
from ml import svm
import json
import NLProcessor as nlp
import lxml.html
from requests import get
from goose import Goose
def getSuggestions(query):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/suggestions/?q=' + query
headers = {'Ocp-Apim-Subscription-Key':'854e8088bb8347418e6f934b996487af'}
r = requests.get(url, headers = headers)
results = []
suggestions = r.json()['suggestionGroups']
max = 3
for suggestion in suggestions:
s = suggestion['searchSuggestions']
for term in s:
if max == 0:
break
max-=1
results.append(str(term['query'].encode("ascii", "ignore")))
return results
def manualSearch(query):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/news/search?q=' + query
# query string parameters
payload = {'q': query, 'freshness':'Week'}
# custom headers
headers = {'Ocp-Apim-Subscription-Key': '22207001cbdc4c2487ad91d1cec1bdf2'}
r = requests.get(url, params=payload, headers=headers)
links = []
descriptions = []
print(r.json())
try:
listOfArticles = r.json()['value']
except:
return []
max = 5
for article in listOfArticles:
if('clusteredArticles' in article):
information = article['clusteredArticles']
else:
information = article
thisList = []
if max == 0:
break
max-=1
if(type(information) is dict):
links.append(information['url'])
descriptions.append(str(information['description'].encode("ascii", "ignore")))
fin = []
rating = 0.0
i = 0
for link in links:
thisDict = {}
rating = svm.compute(link)
thisDict['id'] = str(i+1)
thisDict['description'] = descriptions[i]
thisDict['url'] = link
thisDict['score'] = str(rating)
fin.append(thisDict)
i = i + 1
return json.dumps(fin)
def processURL(url):
toReturn = {}
score = svm.compute(url)
t = lxml.html.parse(url)
title = t.find(".//title").text
response = get(url)
extractor = Goose()
article = extractor.extract(raw_html=response.content)
file = article.cleaned_text
keywords = nlp.generateEntity(file)
toReturn['title'] = title
toReturn['score'] = score
toReturn['keywords'] = keywords
toReturn['url'] = url
return json.dumps(toReturn)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#! /usr/bin/env python3\n# \n# Copy from old style database to new style\n#\nimport mysql.connector\nfrom mysql.connector.errors import Error\nimport MyLogger\nimport MyDB\nimport argparse\n\ndef loadDatatypes(tgt) -> dict:\n a = {}\n tgt.execute('SE... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#! /usr/bin/env python3\n# \n# Copy from old style database to new style\n#\nimport mysql.connector\nfrom mysql.connector.errors import Error\nimport MyLogger\nimport MyDB\nimport argparse\n\ndef loadDatatypes(tgt) -> dict:\n a = {}\n ... | ```python
#! /usr/bin/env python3
#
# Copy from old style database to new style
#
import mysql.connector
from mysql.connector.errors import Error
import MyLogger
import MyDB
import argparse
def loadDatatypes(tgt) -> dict:
a = {}
tgt.execute('SELECT name,id FROM datatype;')
for row in tgt: a[row[0]] = row[1];
return a
def loadSourceNames(tgt) -> dict:
a = {}
tgt.execute('SELECT id,name FROM source;')
for row in tgt: a[row[1]] = row[0]
return a
def loadname2url(cur) -> dict:
a = {}
cur.execute('SELECT url,name FROM url2name;')
for row in cur: a[row[1]] = row[0]
return a
def chompData(args, logger, tgt):
oldDBname = 'levels_data' # Old database name
# data type to id
typ2id = loadDatatypes(tgt)
name2src = loadSourceNames(tgt)
# Open up the old data database
src = mysql.connector.connect(host=args.dbhost, user=args.dbuser, passwd=args.dbpasswd,
db=oldDBname)
cur = src.cursor()
name2url = loadname2url(cur)
# Grab the existing data tables, flow_name
# items = set()
cur.execute('SHOW TABLES;')
tables = cur.fetchall()
logger.info('There are %s tables', len(tables))
for row in tables:
tbl = row[0]
parts = tbl.split('_')
if len(parts) == 1:
logger.info('Skipping table %s', tbl)
continue
dt = parts[0]
name = '_'.join(parts[1:])
if name not in name2src:
logger.warning('Do not know name %s for table %s', name, tbl)
sql = 'INSERT INTO source (url,name) VALUES(%s,(SELECT id FROM url WHERE url=%s));'
tgt.execute(sql, (name, name2url[name]))
name2src[name] = tgt.lastrowid
srcID = name2src[name]
logger.info('Copying data dt=%s name=%s src=%s', dt, name, srcID)
sql = 'INSERT IGNORE INTO data (src, dataType, value, t)' \
+ 'SELECT {} AS src,{} AS datatype, value, time AS t FROM {}.{}'.format(
srcID, typ2id[dt], oldDBname, tbl)
tgt.execute(sql)
src.close()
parser = argparse.ArgumentParser()
MyDB.DB.addArgs(parser) # Database related arguments
MyLogger.addArgs(parser)
args = parser.parse_args()
logger = MyLogger.mkLogger(args, __name__)
tgt = MyDB.DB(args, logger)
curTgt = tgt.cursor()
curTgt.execute('START TRANSACTION;')
chompData(args, logger, curTgt)
curTgt.execute('COMMIT;')
tgt.close()
``` |
[
{
"content": "```python\nfrom __future__ import division, print_function, absolute_import\n\nimport tflearn\n\n# Residual blocks\n# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18\nn = 5\n\n# Data loading\nfrom tflearn.datasets import cifar10\n(X, Y), (testX, testY) = cifar10.load_data()\nY = tflearn.data_util... | [
{
"content": "<|memory_start|>```python\nfrom __future__ import division, print_function, absolute_import\n\nimport tflearn\n\n# Residual blocks\n# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18\nn = 5\n\n# Data loading\nfrom tflearn.datasets import cifar10\n(X, Y), (testX, testY) = cifar10.load_data()\nY = t... | ```python
from __future__ import division, print_function, absolute_import
import tflearn
# Residual blocks
# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
n = 5
# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data()
Y = tflearn.data_utils.to_categorical(Y, 10)
testY = tflearn.data_utils.to_categorical(testY, 10)
# Real-time data preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center(per_channel=True)
# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_crop([32, 32], padding=4)
# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3],
data_preprocessing=img_prep,
data_augmentation=img_aug)
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
max_checkpoints=10, tensorboard_verbose=0,
clip_gradients=0.)
model.fit(X, Y, n_epoch=200, validation_set=(testX, testY),
snapshot_epoch=False, snapshot_step=500,
show_metric=True, batch_size=128, shuffle=True,
run_id='resnet_cifar10')
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\"\nMonitoring (and logging) for ETL steps.\n\nThis module provides a context for the ETL that allows to monitor\nthe start time of an ETL step along with its successful or\nunsuccessful completion. Events for start, finish or failure\nmay be emitted to a pers... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"\nMonitoring (and logging) for ETL steps.\n\nThis module provides a context for the ETL that allows to monitor\nthe start time of an ETL step along with its successful or\nunsuccessful completion. Events for start, finish or failure\nmay be e... | ```python
"""
Monitoring (and logging) for ETL steps.
This module provides a context for the ETL that allows to monitor
the start time of an ETL step along with its successful or
unsuccessful completion. Events for start, finish or failure
may be emitted to a persistence layer.
"""
import http.server
import itertools
import logging
import os
import queue
import random
import socketserver
import sys
import threading
import time
import traceback
import urllib.parse
import uuid
from calendar import timegm
from collections import Counter, OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from decimal import Decimal
from http import HTTPStatus
from operator import itemgetter
from typing import Dict, Iterable, List, Optional, Union
import boto3
import botocore.exceptions
import funcy as fy
import simplejson as json
from boto3.dynamodb.types import TypeDeserializer
from tqdm import tqdm
import etl.assets
import etl.config
import etl.text
from etl.errors import ETLRuntimeError
from etl.json_encoder import FancyJsonEncoder
from etl.timer import Timer, elapsed_seconds, utc_now
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
STEP_START = "start"
STEP_FINISH = "finish"
STEP_FAIL = "fail"
_DUMMY_TARGET = "#.dummy"
def trace_key():
"""
Return a "trace key" suitable to track program execution.
It's most likely unique between invocations.
"""
# We will never make a 32-bit operating system.
return uuid.uuid4().hex[:16].upper()
class MetaMonitor(type):
"""
Metaclass to implement read-only attributes of our ETL's Monitor.
If you need to find out the current trace key, call Monitor.etl_id.
If you want to know the "environment" (selected by using --prefix or the user's login),
then use Monitor.environment.
If you want to know the runtime environment (EMR, instance, step), use Monitor.cluster_info.
Behind the scenes, some properties actually do a lazy evaluation.
"""
@property
def etl_id(cls):
if cls._trace_key is None:
cls._trace_key = trace_key()
return cls._trace_key
@property
def environment(cls):
if cls._environment is None:
raise ValueError("value of 'environment' is None")
return cls._environment
@environment.setter
def environment(cls, value):
cls._environment = value
@property
def cluster_info(cls):
if cls._cluster_info is None:
job_flow = "/mnt/var/lib/info/job-flow.json"
if os.path.exists(job_flow):
with open(job_flow) as f:
data = json.load(f)
cluster_info = {"cluster_id": data["jobFlowId"], "instance_id": data["masterInstanceId"]}
parent_dir, current_dir = os.path.split(os.getcwd())
if parent_dir == "/mnt/var/lib/hadoop/steps":
cluster_info["step_id"] = current_dir
else:
cluster_info = {}
cls._cluster_info = cluster_info
return cls._cluster_info
class Monitor(metaclass=MetaMonitor):
"""
Context manager to monitor ETL steps for some target table.
Monitor instances have these properties which will be stored in the event payload:
environment: a description of the source folder (aka prefix)
etl_id: a UUID for each ETL run (All monitors of the same ETL run with the same 'etl_id'.)
target: name of table or view in the data warehouse
step: command that is running, like 'dump', or 'load'
The payloads will have at least the properties of the Monitor instance and:
event: one of ('start', 'finish', 'fail')
timestamp: UTC timestamp
In case of errors, they are added as an array 'errors'. It is also possible to send
some extra information into monitor payloads. Anything extra must be of type list,
dict, str, or int (or bad things will happen).
Example usage of attributes:
>>> id_ = Monitor.etl_id
>>> isinstance(id_, str)
True
>>> Monitor.etl_id == id_
True
>>> Monitor.environment
Traceback (most recent call last):
...
ValueError: value of 'environment' is None
>>> Monitor.environment = 'saturn'
>>> Monitor.environment
'saturn'
Example use of a monitor instance (with dry_run=True to avoid persistence calls during testing):
>>> m = Monitor('schema.table', 'frobnicate', dry_run=True)
>>> payload = MonitorPayload(m, 'test', utc_now())
>>> payload.step
'frobnicate'
>>> payload.event
'test'
Normally, you would leave the creation of the payload to the context manager:
>>> with Monitor('schema.table', 'frobnicate', dry_run=True):
... pass
"""
# See MetaMonitor class for getters and setters
_trace_key = None
_environment = None
_cluster_info = None
def __init__(self, target: str, step: str, dry_run: bool = False, **kwargs) -> None:
self._monitor_id = trace_key()
self._target = target
self._step = step
self._dry_run = dry_run
# Create a deep copy so that changes made later by the caller don't alter our payload.
self._extra = deepcopy(dict(**kwargs))
self._index = self._extra.get("index")
# Read-only properties (in order of cardinality)
@property
def environment(self):
return Monitor.environment
@property
def cluster_info(self):
return Monitor.cluster_info
@property
def etl_id(self):
return Monitor.etl_id
@property
def target(self):
return self._target
@property
def step(self):
return self._step
@property
def monitor_id(self):
return self._monitor_id
def __enter__(self):
if self._index:
logger.info(
"Starting %s step for '%s' (%d/%d)",
self.step,
self.target,
self._index["current"],
self._index["final"],
)
else:
logger.info("Starting %s step for '%s'", self.step, self.target)
self._start_time = utc_now()
payload = MonitorPayload(self, STEP_START, self._start_time, extra=self._extra)
payload.emit(dry_run=self._dry_run)
return self
def __exit__(self, exc_type, exc_value, tb):
self._end_time = utc_now()
seconds = elapsed_seconds(self._start_time, self._end_time)
if exc_type is None:
event = STEP_FINISH
errors = None
logger.info("Finished %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
else:
event = STEP_FAIL
errors = [
{
"code": (exc_type.__module__ + "." + exc_type.__qualname__).upper(),
"message": traceback.format_exception_only(exc_type, exc_value)[0].strip(),
}
]
logger.warning("Failed %s step for '%s' (%0.2fs)", self._step, self._target, seconds)
payload = MonitorPayload(
self, event, self._end_time, elapsed=seconds, errors=errors, extra=self._extra
)
payload.emit(dry_run=self._dry_run)
def add_extra(self, key, value):
if key in self._extra:
raise KeyError("duplicate key in 'extra' payload")
self._extra[key] = value
@classmethod
def marker_payload(cls, step: str):
monitor = cls(_DUMMY_TARGET, step)
return MonitorPayload(monitor, STEP_FINISH, utc_now(), elapsed=0, extra={"is_marker": True})
class InsertTraceKey(logging.Filter):
"""Called as a logging filter: insert the ETL id as the trace key into the logging record."""
def filter(self, record):
record.trace_key = Monitor.etl_id
return True
class PayloadDispatcher:
def store(self, payload):
"""Send payload to persistence layer."""
raise NotImplementedError("PayloadDispatcher failed to implement store method")
class MonitorPayload:
"""
Simple class to encapsulate data for Monitor events which knows how to morph into JSON etc.
You should consider all attributes to be read-only with the possible exception of 'errors'
that may be set to a list of objects (in JSON-terminology) with 'code' and 'message' fields.
(Which is to say: do not modify the payload object!)
"""
# Append instances with a 'store' method here (skipping writing a metaclass this time)
dispatchers: List[PayloadDispatcher] = []
def __init__(self, monitor, event, timestamp, elapsed=None, errors=None, extra=None):
# Basic info
self.environment = monitor.environment
self.etl_id = monitor.etl_id
self.target = monitor.target
self.step = monitor.step
self.monitor_id = monitor.monitor_id
self.event = event
self.timestamp = timestamp
# Premium info (when available)
self.cluster_info = monitor.cluster_info
self.elapsed = elapsed
self.errors = errors
self.extra = extra
def emit(self, dry_run=False):
payload = vars(self)
# Delete entries that are often not present:
for key in ["cluster_info", "elapsed", "extra", "errors"]:
if not payload[key]:
del payload[key]
compact_text = json.dumps(payload, sort_keys=True, separators=(",", ":"), cls=FancyJsonEncoder)
if dry_run:
logger.debug("Dry-run: payload = %s", compact_text)
return
logger.debug("Monitor payload = %s", compact_text)
for d in MonitorPayload.dispatchers:
d.store(payload)
class DynamoDBStorage(PayloadDispatcher):
"""
Store ETL events in a DynamoDB table.
Note the table is created if it doesn't already exist when a payload needs to be stored.
"""
@staticmethod
def factory() -> "DynamoDBStorage":
table_name = "{}-{}".format(etl.config.get_config_value("resource_prefix"), "events")
return DynamoDBStorage(
table_name,
etl.config.get_config_value("etl_events.read_capacity"),
etl.config.get_config_value("etl_events.write_capacity"),
)
def __init__(self, table_name, read_capacity, write_capacity):
self.table_name = table_name
self.initial_read_capacity = read_capacity
self.initial_write_capacity = write_capacity
# Avoid default sessions and have one table reference per thread
self._thread_local_table = threading.local()
def get_table(self, create_if_not_exists=True):
"""Get table reference from DynamoDB or create it (within a new session)."""
session = boto3.session.Session()
logger.debug(f"Started new boto3 session in region '{session.region_name}'")
dynamodb = session.resource("dynamodb")
try:
table = dynamodb.Table(self.table_name)
status = table.table_status
logger.info(f"Found existing events table '{self.table_name}' in DynamoDB (status: {status})")
except botocore.exceptions.ClientError as exc:
# Check whether this is just a ResourceNotFoundException (sadly a 400, not a 404)
if exc.response["ResponseMetadata"]["HTTPStatusCode"] != 400:
raise
# Nullify assignment and start over
table = None
status = None
if not (status == "ACTIVE" or create_if_not_exists):
raise ETLRuntimeError("DynamoDB table '%s' does not exist or is not active" % self.table_name)
if table is None:
logger.info(f"Creating DynamoDB table: '{self.table_name}'")
table = dynamodb.create_table(
TableName=self.table_name,
KeySchema=[
{"AttributeName": "target", "KeyType": "HASH"},
{"AttributeName": "timestamp", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "target", "AttributeType": "S"},
{"AttributeName": "timestamp", "AttributeType": "N"},
],
ProvisionedThroughput={
"ReadCapacityUnits": self.initial_read_capacity,
"WriteCapacityUnits": self.initial_write_capacity,
},
)
status = table.table_status
if status != "ACTIVE":
logger.info(f"Waiting for events table '{self.table_name}' to become active")
table.wait_until_exists()
logger.debug(
f"Finished creating or updating events table '{self.table_name}' (arn={table.table_arn})"
)
return table
def store(self, payload: dict, _retry: bool = True):
"""
Actually send the payload to the DynamoDB table.
If this is the first call at all, then get a reference to the table, or even create the
table as necessary.
This method will try to store the payload a second time if there's an error in the first
attempt.
"""
try:
table = getattr(self._thread_local_table, "table", None)
if not table:
table = self.get_table()
self._thread_local_table.table = table
item = dict(payload)
# Cast timestamp (and elapsed seconds) into Decimal since DynamoDB cannot handle float.
# But decimals maybe finicky when instantiated from float so we make sure to fix the
# number of decimals.
item["timestamp"] = Decimal("%.6f" % item["timestamp"].timestamp())
if "elapsed" in item:
item["elapsed"] = Decimal("%.6f" % item["elapsed"])
table.put_item(Item=item)
except botocore.exceptions.ClientError:
# Something bad happened while talking to the service ... just try one more time.
if _retry:
logger.warning("Trying to store payload a second time after this mishap:", exc_info=True)
self._thread_local_table.table = None
delay = random.uniform(3, 10)
logger.debug("Snoozing for %.1fs", delay)
time.sleep(delay)
self.store(payload, _retry=False)
else:
raise
class _ThreadingSimpleServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
pass
class MemoryStorage(PayloadDispatcher):
"""
Store ETL events in memory and make the events accessible via HTTP.
When the ETL is running for extract, load, or unload, connect to port 8086.
When the ETL is running on a host other than your local computer, say in EC2, then use
port forwarding, to send requests from your host to an address seen on the other host:
ssh -L 8086:localhost:8086 <hostname>
The output should pass validator at https://validator.w3.org/#validate_by_input+with_options
"""
SERVER_HOST = "" # meaning: all that we can bind to locally
SERVER_PORT = 8086
def __init__(self):
self.queue = queue.Queue()
self.events = OrderedDict()
self.start_server()
def store(self, payload: dict):
self.queue.put(payload)
def _drain_queue(self):
try:
while True:
payload = self.queue.get_nowait()
if not payload.get("extra", {}).get("is_marker", False):
# Overwrite earlier events by later ones
key = payload["target"], payload["step"]
self.events[key] = payload
except queue.Empty:
pass
def get_indices(self):
self._drain_queue()
indices = {}
counter = Counter()
for payload in self.events.values():
index = dict(payload.get("extra", {}).get("index", {}))
name = index.setdefault("name", "N/A")
if name not in indices:
indices[name] = index
elif index["current"] > indices[name]["current"]:
indices[name].update(index)
if payload["event"] != STEP_START:
counter[name] += 1
indices[name]["counter"] = counter[name]
indices_as_list = [indices[name] for name in sorted(indices)]
return etl.assets.Content(json=indices_as_list)
def get_events(self, event_id: Optional[str]):
self._drain_queue()
if event_id is None:
events_as_list = sorted(
(self.events[key] for key in self.events),
key=lambda p: (2 if p["event"] == STEP_START else 1, p["timestamp"]),
reverse=True,
)
else:
events_as_list = [event for event in self.events.values() if event["monitor_id"] == event_id]
return etl.assets.Content(json=events_as_list)
def create_handler(self):
"""Return a handler that serves our storage content, used as factory method."""
storage = self
http_logger = logging.getLogger("arthur_http")
class MonitorHTTPHandler(http.server.BaseHTTPRequestHandler):
server_version = "MonitorHTTPServer/1.0"
log_error = http_logger.error
log_message = http_logger.info
def do_GET(self):
"""
Serve a GET (or HEAD) request.
We serve assets or JSON via the API.
If the command is HEAD (and not GET), only the header is sent. Duh.
"""
parts = urllib.parse.urlparse(self.path.rstrip("/"))
path = (parts.path or "/index.html").lstrip("/")
if path == "api/etl-id":
result = etl.assets.Content(json={"id": Monitor.etl_id})
elif path == "api/indices":
result = storage.get_indices()
elif path.startswith("api/events"):
segment = path.replace("api/events", "").strip("/")
result = storage.get_events(segment or None)
elif path == "api/command-line":
result = etl.assets.Content(json={"args": " ".join(sys.argv)})
elif etl.assets.asset_exists(path):
result = etl.assets.get_asset(path)
else:
# self.send_response(HTTPStatus.NOT_FOUND)
self.send_response(HTTPStatus.MOVED_PERMANENTLY)
new_parts = (parts.scheme, parts.netloc, "/", None, None)
new_url = urllib.parse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", result.content_type)
self.send_header("Content-Length", result.content_length)
if result.content_encoding is not None:
self.send_header("Content-Encoding", result.content_encoding)
self.send_header("Last-Modified", result.last_modified)
if result.cache_control is not None:
self.send_header("Cache-Control", result.cache_control)
self.end_headers()
if self.command == "GET":
self.wfile.write(result.content)
do_HEAD = do_GET
return MonitorHTTPHandler
def start_server(self):
"""Start background daemon to serve our events."""
handler_class = self.create_handler()
class BackgroundServer(threading.Thread):
def run(self):
logger.info("Starting background server for monitor on port %d", MemoryStorage.SERVER_PORT)
try:
httpd = _ThreadingSimpleServer(
(MemoryStorage.SERVER_HOST, MemoryStorage.SERVER_PORT), handler_class
)
httpd.serve_forever()
except Exception as exc:
logger.info("Background server stopped: %s", str(exc))
try:
thread = BackgroundServer(daemon=True)
thread.start()
except RuntimeError:
logger.warning("Failed to start monitor server:", exc_info=True)
def start_monitors(environment):
Monitor.environment = environment
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
if etl.config.get_config_value("etl_events.enabled"):
ddb = DynamoDBStorage.factory()
MonitorPayload.dispatchers.append(ddb)
else:
logger.warning("Writing events to a DynamoDB table is disabled in settings.")
def _format_output_column(key: str, value: str) -> str:
if value is None:
return "---"
elif key == "timestamp":
# Make timestamp readable by turning epoch seconds into a date.
return datetime.utcfromtimestamp(float(value)).replace(microsecond=0).isoformat()
elif key == "elapsed":
# Reduce number of decimals to 2.
return "{:6.2f}".format(float(value))
elif key == "rowcount":
return "{:9d}".format(int(value))
else:
return value
def _query_for_etls(step=None, hours_ago=0, days_ago=0) -> List[dict]:
"""Search for ETLs by looking for the "marker" event at the start of an ETL command."""
start_time = datetime.utcnow() - timedelta(days=days_ago, hours=hours_ago)
epoch_seconds = timegm(start_time.utctimetuple())
attribute_values = {
":marker": _DUMMY_TARGET,
":epoch_seconds": epoch_seconds,
":finish_event": STEP_FINISH,
}
if step is not None:
attribute_values[":step"] = step
filter_exp = "event = :finish_event"
if step is not None:
filter_exp += " and step = :step"
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
response = table.query(
ConsistentRead=True,
ExpressionAttributeNames={"#timestamp": "timestamp"}, # "timestamp" is a reserved word.
ExpressionAttributeValues=attribute_values,
KeyConditionExpression="target = :marker and #timestamp > :epoch_seconds",
FilterExpression=filter_exp,
ProjectionExpression="etl_id, step, #timestamp",
ReturnConsumedCapacity="TOTAL",
)
if "LastEvaluatedKey" in response:
logger.warning("This is is a partial result! Last evaluated key: '%s'", response["LastEvaluatedKey"])
logger.info(
"Query result: count = %d, scanned count = %d, consumed capacity = %f",
response["Count"],
response["ScannedCount"],
response["ConsumedCapacity"]["CapacityUnits"],
)
return response["Items"]
def query_for_etl_ids(hours_ago=0, days_ago=0) -> None:
"""Show recent ETLs with their step and execution start."""
etl_info = _query_for_etls(hours_ago=hours_ago, days_ago=days_ago)
keys = ["etl_id", "step", "timestamp"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in etl_info]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def scan_etl_events(etl_id, selected_columns: Optional[Iterable[str]] = None) -> None:
"""
Scan for all events belonging to a specific ETL.
If a list of columns is provided, then the output is limited to those columns.
But note that the target (schema.table) and the event are always present.
"""
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
available_columns = ["target", "step", "event", "timestamp", "elapsed", "rowcount"]
if selected_columns is None:
selected_columns = available_columns
# We will always select "target" and "event" to have a meaningful output.
columns = list(fy.filter(frozenset(selected_columns).union(["target", "event"]), available_columns))
keys = ["extra.rowcount" if column == "rowcount" else column for column in columns]
# We need to scan here since the events are stored by "target" and not by "etl_id".
# TODO Try to find all the "known" relations and query on them with a filter on the etl_id.
client = boto3.client("dynamodb")
paginator = client.get_paginator("scan")
response_iterator = paginator.paginate(
TableName=table.name,
ConsistentRead=False,
ExpressionAttributeNames={"#timestamp": "timestamp"},
ExpressionAttributeValues={
":etl_id": {"S": etl_id},
":marker": {"S": _DUMMY_TARGET},
":start_event": {"S": STEP_START},
},
FilterExpression="etl_id = :etl_id and target <> :marker and event <> :start_event",
ProjectionExpression="target, step, event, #timestamp, elapsed, extra.rowcount",
ReturnConsumedCapacity="TOTAL",
# PaginationConfig={
# "PageSize": 100
# }
)
logger.info("Scanning events table '%s' for elapsed times", table.name)
consumed_capacity = 0.0
scanned_count = 0
rows: List[List[str]] = []
deserialize = TypeDeserializer().deserialize
for response in response_iterator:
consumed_capacity += response["ConsumedCapacity"]["CapacityUnits"]
scanned_count += response["ScannedCount"]
# We need to turn something like "'event': {'S': 'finish'}" into "'event': 'finish'".
deserialized = [
{key: deserialize(value) for key, value in item.items()} for item in response["Items"]
]
# Lookup "elapsed" or "extra.rowcount" (the latter as ["extra", "rowcount"]).
items = [{key: fy.get_in(item, key.split(".")) for key in keys} for item in deserialized]
# Scope down to selected keys and format the columns.
rows.extend([_format_output_column(key, item[key]) for key in keys] for item in items)
logger.info("Scan result: scanned count = %d, consumed capacity = %f", scanned_count, consumed_capacity)
if "timestamp" in keys:
rows.sort(key=itemgetter(keys.index("timestamp")))
else:
rows.sort(key=itemgetter(keys.index("target")))
print(etl.text.format_lines(rows, header_row=columns))
class EventsQuery:
def __init__(self, step: Optional[str] = None) -> None:
self._keys = ["target", "step", "event", "timestamp", "elapsed", "extra.rowcount"]
values = {
":target": None, # will be set when called
":epoch_seconds": None, # will be set when called
":start_event": STEP_START,
}
# Only look for finish or fail events
filter_exp = "event <> :start_event"
if step is not None:
values[":step"] = step
filter_exp += " and step = :step"
base_query = {
"ConsistentRead": False,
"ExpressionAttributeNames": {"#timestamp": "timestamp"},
"ExpressionAttributeValues": values,
"KeyConditionExpression": "target = :target and #timestamp > :epoch_seconds",
"FilterExpression": filter_exp,
"ProjectionExpression": "target, step, event, #timestamp, elapsed, extra.rowcount",
}
self._base_query = base_query
@property
def keys(self):
return self._keys[:]
def __call__(self, table, target, epoch_seconds):
query = deepcopy(self._base_query)
query["ExpressionAttributeValues"][":target"] = target
query["ExpressionAttributeValues"][":epoch_seconds"] = epoch_seconds
response = table.query(**query)
events = [{key: fy.get_in(item, key.split(".")) for key in self.keys} for item in response["Items"]]
# Return latest event or None
if events:
events.sort(key=itemgetter("timestamp"))
return events[-1]
return None
class BackgroundQueriesRunner(threading.Thread):
"""
An instance of this thread will repeatedly try to run queries on a DynamoDB table.
Every time a query returns a result, this result is sent to a queue and the query will no
longer be tried.
"""
def __init__(
self, targets, query, consumer_queue, start_time, update_interval, idle_time_out, **kwargs
) -> None:
super().__init__(**kwargs)
self.targets = list(targets)
self.query = query
self.queue = consumer_queue
self.start_time = start_time
self.update_interval = update_interval
self.idle_time_out = idle_time_out
def run(self):
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
targets = self.targets
start_time = self.start_time
idle = Timer()
while targets:
logger.debug(
"Waiting for events for %d target(s), start time = '%s'",
len(targets),
datetime.utcfromtimestamp(start_time).isoformat(),
)
new_start_time = datetime.utcnow() - timedelta(seconds=1) # avoid rounding errors
query_loop = Timer()
retired = set()
for target in targets:
latest_event = self.query(table, target, start_time)
if latest_event:
self.queue.put(latest_event)
retired.add(latest_event["target"])
targets = [t for t in targets if t not in retired]
start_time = timegm(new_start_time.utctimetuple())
if self.update_interval is None or not targets:
break
if retired:
idle = Timer()
elif self.idle_time_out and idle.elapsed > self.idle_time_out:
logger.info(
"Idle time-out: Waited for %d seconds but no events arrived, " "%d target(s) remaining",
self.idle_time_out,
len(targets),
)
break
if query_loop.elapsed < self.update_interval:
time.sleep(self.update_interval - query_loop.elapsed)
logger.info(
"Found events for %d out of %d target(s)", len(self.targets) - len(targets), len(self.targets)
)
self.queue.put(None)
def recently_extracted_targets(source_relations, start_time):
"""
Query the events table for "extract" events on the provided source_relations after start_time.
Waits for up to an hour, sleeping for 30s between checks.
Return the set of targets (ie, relation.identifier or event["target"]) with successful extracts.
"""
targets = [relation.identifier for relation in source_relations]
query = EventsQuery("extract")
consumer_queue = queue.Queue() # type: ignore
start_as_epoch = timegm(start_time.utctimetuple())
timeout = 60 * 60
extract_querying_thread = BackgroundQueriesRunner(
targets, query, consumer_queue, start_as_epoch, update_interval=30, idle_time_out=timeout, daemon=True
)
extract_querying_thread.start()
extracted_targets = set()
while True:
try:
event = consumer_queue.get(timeout=timeout)
if event is None:
break
if event["event"] == STEP_FINISH:
extracted_targets.add(event["target"])
except queue.Empty:
break
return extracted_targets
def summarize_events(relations, step: Optional[str] = None) -> None:
"""Summarize latest ETL step for the given relations by showing elapsed time and row count."""
etl_info = _query_for_etls(step=step, days_ago=7)
if not len(etl_info):
logger.warning("Found no ETLs within the last 7 days")
return
latest_etl = sorted(etl_info, key=itemgetter("timestamp"))[-1]
latest_start = latest_etl["timestamp"]
logger.info("Latest ETL: %s", latest_etl)
ddb = DynamoDBStorage.factory()
table = ddb.get_table(create_if_not_exists=False)
query = EventsQuery(step)
events = []
schema_events: Dict[str, Dict[str, Union[str, Decimal]]] = {}
for relation in tqdm(
desc="Querying for events", disable=None, iterable=relations, leave=False, unit="table"
):
event = query(table, relation.identifier, latest_start)
if event:
# Make the column for row counts easier to read by dropping "extra.".
event["rowcount"] = event.pop("extra.rowcount")
events.append(dict(event, kind=relation.kind))
schema = relation.target_table_name.schema
if schema not in schema_events:
schema_events[schema] = {
"target": schema,
"kind": "---",
"step": event["step"],
"timestamp": Decimal(0),
"event": "complete",
"elapsed": Decimal(0),
"rowcount": Decimal(0),
}
if event["timestamp"] > schema_events[schema]["timestamp"]:
schema_events[schema]["timestamp"] = event["timestamp"]
schema_events[schema]["elapsed"] += event["elapsed"]
schema_events[schema]["rowcount"] += event["rowcount"] if event["rowcount"] else 0
# Add pseudo events to show schemas are done.
events.extend(schema_events.values())
keys = ["target", "kind", "step", "timestamp", "event", "elapsed", "rowcount"]
rows = [[_format_output_column(key, info[key]) for key in keys] for info in events]
rows.sort(key=itemgetter(keys.index("timestamp")))
print(etl.text.format_lines(rows, header_row=keys))
def tail_events(
relations, start_time, update_interval=None, idle_time_out=None, step: Optional[str] = None
) -> None:
"""Tail the events table and show latest finish or fail events coming in."""
targets = [relation.identifier for relation in relations]
query = EventsQuery(step)
consumer_queue = queue.Queue() # type: ignore
epoch_seconds = timegm(start_time.utctimetuple())
thread = BackgroundQueriesRunner(
targets, query, consumer_queue, epoch_seconds, update_interval, idle_time_out, daemon=True
)
thread.start()
events = []
n_printed = 0
done = False
while not done:
progress = Timer()
while progress.elapsed < 10:
try:
event = consumer_queue.get(timeout=10)
if event is None:
done = True
break
event["timestamp"] = datetime.utcfromtimestamp(event["timestamp"]).isoformat()
events.append(event)
except queue.Empty:
break
# Keep printing tail of table that accumulates the events.
if len(events) > n_printed:
lines = etl.text.format_lines(
[[event[header] for header in query.keys] for event in events], header_row=query.keys
).split("\n")
if n_printed:
print("\n".join(lines[n_printed + 2 : -1])) # skip header and final "(x rows)" line
else:
print("\n".join(lines[:-1])) # only skip the "(x rows)" line
n_printed = len(lines) - 3 # header, separator, final = 3 extra rows
if done:
print(lines[-1])
def test_run():
Monitor.environment = "test" # type: ignore
memory = MemoryStorage()
MonitorPayload.dispatchers.append(memory)
schema_names = ["auburn", "burgundy", "cardinal", "flame", "fuchsia"]
table_names = ["apple", "banana", "cantaloupe", "durian", "fig"]
index = {"current": 0, "final": len(schema_names) * len(table_names)}
host = MemoryStorage.SERVER_HOST if MemoryStorage.SERVER_HOST else "localhost"
print("Creating events ... follow along at http://{}:{}/".format(host, MemoryStorage.SERVER_PORT))
with Monitor("color.fruit", "test", index={"current": 1, "final": 1, "name": "outer"}):
for i, names in enumerate(itertools.product(schema_names, table_names)):
try:
with Monitor(".".join(names), "test", index=dict(index, current=i + 1)):
time.sleep(random.uniform(0.5, 2.0))
# Create an error on one "table" so that highlighting of errors can be tested:
if i == 9:
raise RuntimeError("An error occurred!")
except RuntimeError:
pass
input("Press return (or Ctrl-c) to stop server\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# This allows to test the HTTP server. When running inside a Docker container, make sure
# that port 8086 is exposed. (bin/run_arthur.sh -w).
# Invoke using "python -m etl.monitor" inside the Docker container and follow along
# with "open http://localhost:8086" from your host.
test_run()
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nfrom __future__ import unicode_literals\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom djblets.webapi.errors import DOES_NOT_EXIST\n\nfrom reviewboard.webapi.resources import resources\nfrom reviewboard.webapi.resources.base_... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nfrom __future__ import unicode_literals\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom djblets.webapi.errors import DOES_NOT_EXIST\n\nfrom reviewboard.webapi.resources import resources\nfrom reviewboard.webapi... | ```python
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.webapi.errors import DOES_NOT_EXIST
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_original_file import \
BaseOriginalFileResource
class OriginalFileResource(BaseOriginalFileResource):
"""Provides the unpatched file corresponding to a file diff."""
name = 'original_file'
def get_filediff(self, request, *args, **kwargs):
"""Returns the FileDiff, or an error, for the given parameters."""
review_request_resource = resources.review_request
try:
review_request = review_request_resource.get_object(
request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_request_resource.has_access_permissions(request,
review_request):
return self._no_access_error(request.user)
try:
return resources.filediff.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
original_file_resource = OriginalFileResource()
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/bin/python\nfrom __future__ import print_function\nfrom binascii import hexlify\nimport hashlib\nimport base64\nfrom getpass import getpass\nimport sys\nimport argparse\ncharset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\"\ncham... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/bin/python\nfrom __future__ import print_function\nfrom binascii import hexlify\nimport hashlib\nimport base64\nfrom getpass import getpass\nimport sys\nimport argparse\ncharset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234... | ```python
#!/bin/python
from __future__ import print_function
from binascii import hexlify
import hashlib
import base64
from getpass import getpass
import sys
import argparse
charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
chameleon_charset = "ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz123456789?!#@&$="
numbers = "123456789"
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
punct = "?!#@&$"
def hashify(item):
m = hashlib.sha1()
m.update(item)
return m.digest()
def generate(secretpassword, sitename):
chained = "{0}:{1}".format(secretpassword,sitename.lower())
secret_hash = hashify(chained)
base64_secret = base64.b64encode(secret_hash)[:10]
encoded_secret = change_encoding(base64_secret)
pwd = ensurenumberandletter(encoded_secret)
return pwd
def change_encoding(s):
encoded = ""
for character in s:
index = charset.index(character)
encoded = encoded + chameleon_charset[index]
return encoded
def ensurenumberandletter(s):
hasnumber = False
hasletter = False
haspunct = False
for character in s:
if character in numbers:
hasnumber = True
if character in letters:
hasletter = True
if character in punct:
haspunct = True
if not hasnumber:
s = "1" + s[1:]
if not hasletter:
s = s[:1] + "a" + s[2:]
if not haspunct:
s = s[:2] + "@" + s[3:]
return s
def copy_passwd_to_clipboard(passwd):
try:
import pyperclip
pyperclip.copy(passwd)
except ImportError:
print("cannot copy to clipboard because the pyperclip package is not installed.")
def main(args):
print("generating password for site: {0}".format(args.sitename))
master_passwd = getpass("enter the master password: ")
generated_passwd = generate(master_passwd, args.sitename)
print("generated password: {}".format(generated_passwd))
if args.copy:
copy_passwd_to_clipboard(generated_passwd)
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser()
parser.add_argument("-n","--sitename", help="the sitename to generated password to", type=str, required=True)
parser.add_argument("-c","--copy", help="copy to clipboard", action="store_true", default=False)
args = parser.parse_args()
main(args)
except KeyboardInterrupt:
print("\nbye!")
pass
``` |
[
{
"content": "Here is a code file:\n```python\nfrom PyQt4 import QtGui, QtCore\n\n\nclass MainWindow(QtGui.QDialog):\n def __init__(self):\n QtGui.QDialog.__init__(self)\n\n self.setWindowTitle(\"PacaBackUp\")\n\n # Vertical layout\n self.layout = QtGui.QVBoxLayout(self)\n\n ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom PyQt4 import QtGui, QtCore\n\n\nclass MainWindow(QtGui.QDialog):\n def __init__(self):\n QtGui.QDialog.__init__(self)\n\n self.setWindowTitle(\"PacaBackUp\")\n\n # Vertical layout\n self.layout = QtGui.QVBoxLayout... | ```python
from PyQt4 import QtGui, QtCore
class MainWindow(QtGui.QDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setWindowTitle("PacaBackUp")
# Vertical layout
self.layout = QtGui.QVBoxLayout(self)
# Create widgets
self.main_menu_bar = QtGui.QMenuBar()
self.save_menu = QtGui.QMenu("Save Settings")
self.label = QtGui.QLabel("AWS Keys")
self.key_id = QtGui.QLineEdit("Access Key ID")
self.secret_key = QtGui.QLineEdit("Secret Access Key")
self.bucket = QtGui.QLineEdit("Bucket Name")
self.local_folder = QtGui.QLineEdit("Local Folder")
self.label2 = QtGui.QLabel("Shall I begin?")
self.button = QtGui.QPushButton("Engage")
# Add widgets to the layout
self.layout.addWidget(self.main_menu_bar)
self.layout.addWidget(self.save_menu)
self.layout.addWidget(self.label)
self.layout.addWidget(self.key_id)
self.layout.addWidget(self.secret_key)
self.layout.addWidget(self.bucket)
self.layout.addWidget(self.local_folder)
self.layout.addWidget(self.label2)
self.layout.addWidget(self.button)
# Connect self.key_id with self.sayHello(name) when text is changed
#self.connect(self.key_id, QtCore.SIGNAL("textChanged(const QString&)"), self.sayHello)
# Button event
self.connect(self.button, QtCore.SIGNAL("clicked()"), self.buttonClicked)
#self.button.clicked(self.buttonClicked())
#Clear AWS Settings
#self.connect(self.key_id, QtCore.SIGNAL("focus()"), self.clearSettings)
#def sayHello(self, name):
# Set the output text
#self.secret_key.setText("Hello " + name + "!")
def buttonClicked(self):
sender = self.sender()
self.secret_key.setText("Button Pushed!!")
#def clearSettings(self):
``` |
[
{
"content": "Repeat the code precisely:\n```python\n#!/usr/bin/python\n'''\nCreates text file of Cocoa superclasses in given filename or in\n./cocoa_indexes/classes.txt by default.\n'''\nimport os, re\nfrom cocoa_definitions import write_file, find\nfrom commands import getoutput\n\ndef find_headers(frameworks... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n#!/usr/bin/python\n'''\nCreates text file of Cocoa superclasses in given filename or in\n./cocoa_indexes/classes.txt by default.\n'''\nimport os, re\nfrom cocoa_definitions import write_file, find\nfrom commands import getoutput\n\ndef find_he... | ```python
#!/usr/bin/python
'''
Creates text file of Cocoa superclasses in given filename or in
./cocoa_indexes/classes.txt by default.
'''
import os, re
from cocoa_definitions import write_file, find
from commands import getoutput
def find_headers(frameworks):
'''Returns a dictionary of the headers for each given framework.'''
headers_and_frameworks = {}
for framework in frameworks:
headers_and_frameworks[framework] = \
' '.join(find('/System/Library/Frameworks/%s.framework'
% framework, '.h'))
return headers_and_frameworks
def get_classes(header_files_and_frameworks):
'''Returns list of Cocoa Protocols classes & their framework.'''
classes = {}
for framework, files in header_files_and_frameworks:
for line in getoutput(r"grep -ho '@\(interface\|protocol\) [A-Z]\w\+' "
+ files).split("\n"):
cocoa_class = re.search(r'[A-Z]\w+', line)
if cocoa_class and not classes.has_key(cocoa_class.group(0)):
classes[cocoa_class.group(0)] = framework
classes = classes.items()
classes.sort()
return classes
def get_superclasses(classes_and_frameworks):
'''
Given a list of Cocoa classes & their frameworks, returns a list of their
superclasses in the form: "class\|superclass\|superclass\|...".
'''
args = ''
for classname, framework in classes_and_frameworks:
args += classname + ' ' + framework + ' '
return getoutput('./superclasses ' + args).split("\n")
def output_file(fname=None):
'''Output text file of Cocoa classes to given filename.'''
if fname is None:
fname = './cocoa_indexes/classes.txt'
if not os.path.isdir(os.path.dirname(fname)):
os.mkdir(os.path.dirname(fname))
frameworks = ('Foundation', 'AppKit', 'AddressBook', 'CoreData',
'PreferencePanes', 'QTKit', 'ScreenSaver', 'SyncServices',
'WebKit')
headers_and_frameworks = find_headers(frameworks).items()
superclasses = get_superclasses(get_classes(headers_and_frameworks))
write_file(fname, superclasses)
if __name__ == '__main__':
from sys import argv
output_file(argv[1] if len(argv) > 1 else None)
``` |
[
{
"content": "Here is the source code:\n```python\nimport codecs\nimport htmlentitydefs\nimport json\nimport logging\nimport nltk # Uses NLTK Version 2.0b9\nimport os\nimport re\nimport unicodedata\n\nclass positive_match:\n def __init__(self, match, offset):\n self.match = match\n self.offset ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport codecs\nimport htmlentitydefs\nimport json\nimport logging\nimport nltk # Uses NLTK Version 2.0b9\nimport os\nimport re\nimport unicodedata\n\nclass positive_match:\n def __init__(self, match, offset):\n self.match = match\n ... | ```python
import codecs
import htmlentitydefs
import json
import logging
import nltk # Uses NLTK Version 2.0b9
import os
import re
import unicodedata
class positive_match:
def __init__(self, match, offset):
self.match = match
self.offset = offset
def getMatch(self):
return self.match
def getOffset(self):
return self.offset
def printMatch(self):
log = logging.getLogger('classify')
log.debug("match = %s ;; offset = %s" % (self.match, self.offset))
class text:
def __init__(self, gdbm_files, filter_file, path, category):
self.category = category
self.filter_file = filter_file
self.gdbm_files = gdbm_files
self.path = path
return
def processUnicodeText(self, tokens):
log = logging.getLogger('classify')
log.debug("text.processUnicodeText()")
log.debug("tokens = %s" % tokens)
symbols = [".", "\&", "'", "-", "/", ","] # Punctuation that will be removed individually from each token
punctuation = {0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22, 0x2014:0x2D} # Unicode to ASCII equivalent
matches = [] # All matches found in the document
# Takes a list of tokenized words and adds them into a hash with the key = token and value = location of token in text (offset)
for index in range(len(tokens)):
token_possibilities = []
log.debug("unmodifed token = %s ;; index = %s" % (tokens[index], index))
# Converts Unicode Punctuation to ASCII equivalent - ADD ENTRIES AS NECESSARY
token = tokens[index].translate(punctuation).encode('ascii', 'ignore')
log.debug("token translate = %s" % token)
token_possibilities.append(token)
# Converts Unicode to ASCII equivalent - If no equivalent is found, it ignores the unicode
token1 = unicodedata.normalize('NFKD', tokens[index]).encode('ascii', 'ignore')
log.debug("token normalize = %s" % token1)
if token != token1:
log.debug("token != token1")
token_possibilities.append(token1)
log.debug("token possibilities = %s" % token_possibilities)
for token in token_possibilities:
potential_match = []
offset_match = []
token = re.sub("[^\&/\w\d.',-]", "", token) # Removes all characters that aren't words, digits, ', ".", "-", "/", "&", or ","
token = token.lower()
log.debug("token = %s ;; index = %s" % (token, index))
if token == "":
log.debug("token is empty string")
continue
# If the chosen category is "geography", we optimize it so that it looks for the inital word to have their first letter upper-cased.
# This helps to reduce the number of false positives found.
# Case: City of Industry ;; (London)
if self.category == "geography" and tokens[index][0].isupper() == False:
if len(tokens[index]) > 1 and tokens[index][1].isupper() == False:
continue
# Peeks at the next 4 words to the current key's location and appends each word one at a time to see if it forms a word that
# is found in a related category dbm file
for offset in range(5):
if index + offset >= len(tokens):
break
single_word_possibilities = [] # Possible variants for a given word
# Gets word from text without any modifications to it
word = tokens[index + offset].lower()
word1 = word.translate(punctuation).encode('ascii', 'ignore')
log.debug("word 1 translate = %s" % word1)
if word1 != "":
single_word_possibilities.append(word1)
word2 = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore')
log.debug("word 2 normalize = %s" % word2)
if word1 != word2:
log.debug("word1 != word2")
single_word_possibilities.append(word2)
offset_match.append(index + offset)
log.debug("word = %s ;; offset = %s" % (word, index + offset))
possible_words = single_word_possibilities[:] # Copies list
for word in single_word_possibilities:
# Removes all symbols except ".", ', "/", "-", and "," from the word in question
new_word = re.sub("[^\&/\w\d.',-]", "", word)
if new_word != word:
log.debug("[new_word != word] = %s" % new_word)
possible_words.append(new_word)
# Checks if the word has any punctuation specified. If it does, it removes each one of the punctutation individually and
# adds the newly created word back to the single_word_possiblities list for re-evalualtion.
if re.search("[\&/.',-]", new_word):
for element in symbols:
regular_expression = "[%s]" % element
if re.search(regular_expression, new_word):
new_words = re.split(regular_expression, new_word)
log.debug("new words = %s ;; re = %s" % (new_words, regular_expression))
for w in new_words:
new_word1 = w.rstrip().lstrip()
if new_word1 == "":
log.debug("new word is empty string")
continue
elif len(new_word1) < 2:
log.debug("new word has less than 2 characters = %s" % new_word1)
continue
element_seen = 0
for e in possible_words:
if new_word1 == e:
element_seen = 1
break
if element_seen == 0:
possible_words.append(new_word1)
single_word_possibilities.append(new_word1)
single_word_possibilities = possible_words[:]
log.debug("potential match - before = %s" % potential_match)
if not potential_match:
for word in single_word_possibilities:
potential_match.append(word)
elif single_word_possibilities:
tmp = []
for phrase in potential_match:
for word in single_word_possibilities:
potential_word = phrase + " " + word
tmp.append(potential_word)
potential_match = tmp
log.debug("potential match - after = %s" % potential_match)
# Iterates through all of the related category dbm files and sees if the potential match is found in any of them
# gdbm_files contains a list of gdbm_file objects that contain [path, gdbm_obj]
for gdbm_obj in self.gdbm_files:
for phrase in potential_match:
if phrase in gdbm_obj[1]:
log.debug("phrase matches = %s" % phrase)
log.debug("match offset = %s" % offset_match)
# Ignore matches that are just numbers
if phrase.isdigit():
log.debug("phrase match are digits = %s" % phrase)
continue
# If the chosen category is "geography," ignore matches that are found in the filter dbm file
if self.category == "geography" and phrase in self.filter_file:
log.debug("phrase match is in filter dbm = %s" % phrase)
continue
match_offset = offset_match[:] # Makes copy of offset_match
match_found = positive_match(phrase, match_offset)
matches.append(match_found)
# Eliminates duplicates found in the all matches by making sure that no two matches have the same offset
matches = sorted(matches, key=lambda positive_match: positive_match.offset)
all_matches = []
for match in matches:
found = 0
if not all_matches:
all_matches.append(match)
continue
match_offset = match.getOffset()
log.debug("match offset = %s" % match_offset)
for element in all_matches:
element_offset = element.getOffset()
for index in element_offset:
if match_offset[0] == index:
# The case where the offset is found in the previous stored entry and the current match has MORE words than the previous match
# (Ex) chicago and offset = [923] versus chicago bears and offset = [923, 924]
if len(match_offset) > len(element_offset):
found = 1
# The case where the offset is found in the previous stored entry and the current match has LESS words than the previous match
# (Ex) baltimore ravens and offset = [880, 881] versus ravens and offset = [881]
elif len(match_offset) < len(element_offset):
found = 2
# The case where the offset is found in previous stored entry and current match has the SAME number of words as the previous match
# (Ex) dallas and offset = [24] versus dallas and offset = [24]
elif len(match_offset) == len(element_offset) and match.getMatch() == element.getMatch():
found = 2
if found == 0: # The offsets have not been seen yet
all_matches.append(match)
elif found == 1:
all_matches[-1] = match
elif found == 2:
continue
return all_matches
# Processes an html file. Assumes the html file contains html entities that need to be escaped and converted to unicode.
# Function escapes html entities to unicode, tokenizes the entire text, and sends it for processing.
def processUnicodeString(self, string):
log = logging.getLogger('classify')
log.debug("text.processUnicodeString()")
# Html entities consist of the format &...; What we want is the ... portion. That is why we separated into a group in the RE.
string_unicode = re.sub("&(#?\\w+);", self.substituteEntity, string)
log.debug("string unicode = %s" % string_unicode)
token = nltk.tokenize.WhitespaceTokenizer().tokenize(string_unicode)
#token = nltk.wordpunct_tokenize(string_unicode)
matches = self.processUnicodeText(token)
return matches
# Processes a text file. Assumes that text file contains unescaped unicode literals.
# Function decodes text into unicode, tokenizes the entire text, and sends it for processing.
def processUTFString(self, string):
log = logging.getLogger('classify')
log.debug("text.processUTFString()")
log.debug("string = %s" % string)
string_utf = string.decode("utf-8")
log.debug("string utf = %s" % string_utf)
token = nltk.tokenize.WhitespaceTokenizer().tokenize(string_utf)
#token = nltk.wordpunct_tokenize(string_ascii)
matches = self.processUnicodeText(token)
return matches
# Function escapes all html entities and converts it to unicode
def substituteEntity(self, match):
log = logging.getLogger('classify')
name = match.group(1)
if name in htmlentitydefs.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[name])
elif name.startswith("#"):
try:
return unichr(int(name[1:]))
except:
pass
log.debug("Cannot replace html entities with corresponding UTF-8 characters")
return '?'
``` |
[
{
"content": "Here is the code content:\n```python\nimport asyncio\n\nimport pytest\nimport pytest_asyncio.plugin # noqa F401\n\nfrom pypeman import nodes, msgstore, channels\nfrom pypeman.channels import BaseChannel\nfrom pypeman.remoteadmin import RemoteAdminClient, RemoteAdminServer\nfrom pypeman.test impor... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nimport asyncio\n\nimport pytest\nimport pytest_asyncio.plugin # noqa F401\n\nfrom pypeman import nodes, msgstore, channels\nfrom pypeman.channels import BaseChannel\nfrom pypeman.remoteadmin import RemoteAdminClient, RemoteAdminServer\nfrom py... | ```python
import asyncio
import pytest
import pytest_asyncio.plugin # noqa F401
from pypeman import nodes, msgstore, channels
from pypeman.channels import BaseChannel
from pypeman.remoteadmin import RemoteAdminClient, RemoteAdminServer
from pypeman.test import TearDownProjectTestCase as TestCase
from pypeman.tests.common import generate_msg
class TestNode(nodes.BaseNode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Used to test if node is processed during test
def process(self, msg):
print("Process %s" % self.name)
return msg
class RemoteAdminTests(TestCase):
@pytest.fixture(autouse=True)
def initfixture(self, unused_tcp_port):
self.tcp_port = unused_tcp_port
def clean_loop(self):
# Useful to execute future callbacks
pending = asyncio.Task.all_tasks(loop=self.loop)
if pending:
self.loop.run_until_complete(asyncio.gather(*pending))
def start_channels(self):
# Start channels
for chan in channels.all:
self.loop.run_until_complete(chan.start())
def setUp(self):
# Create class event loop used for tests to avoid failing
# previous tests to impact next test ? (Not sure)
self.loop = asyncio.new_event_loop()
self.loop.set_debug(True)
# Remove thread event loop to be sure we are not using
# another event loop somewhere
asyncio.set_event_loop(None)
# Avoid calling already tested channels
channels.all.clear()
def tearDown(self):
super().tearDown()
self.clean_loop()
def test_remote_admin_list(self):
""" Channel remote listing working """
port = self.tcp_port # port used for rmt admin
store_factory = msgstore.MemoryMessageStoreFactory()
chan = BaseChannel(name="test_remote050", loop=self.loop, message_store_factory=store_factory)
n = TestNode()
n2 = TestNode(name="sub")
n3 = TestNode(name="sub1")
n4 = TestNode(name="sub2")
msg = generate_msg(with_context=True)
msg2 = generate_msg(timestamp=(1982, 11, 27, 12, 35))
msg3 = generate_msg(timestamp=(1982, 11, 28, 12, 35))
msg4 = generate_msg(timestamp=(1982, 11, 28, 14, 35))
idref_msg3 = msg3.uuid
chan.add(n)
sub = chan.fork(name="subchannel")
sub.append(n2, n3, n4)
# Launch channel processing
self.start_channels()
self.loop.run_until_complete(chan.handle(msg))
self.loop.run_until_complete(chan.handle(msg2))
self.loop.run_until_complete(chan.handle(msg3))
self.loop.run_until_complete(chan.handle(msg4))
server = RemoteAdminServer(loop=self.loop, port=port)
self.loop.run_until_complete(server.start())
client = RemoteAdminClient(loop=self.loop, url="ws://localhost:%d" % port)
client.init()
# List channels
chans = client.channels()
print(chans)
self.assertEqual(chans[0]['name'], 'test_remote050', "Channel listing not working")
self.assertEqual(
chans[0]['subchannels'][0]['name'],
'test_remote050.subchannel',
"Subchannel listing not working")
# Stop channel
result = client.stop('test_remote050')
self.assertEqual(chan.status, BaseChannel.STOPPED, "Stopping channel doesn't work")
# Start channel
result = client.start('test_remote050')
self.assertEqual(chan.status, BaseChannel.WAITING, "Starting channel doesn't work")
# Search message
msg_list = client.list_msg(channel='test_remote050', start=2, count=5, order_by='-timestamp')
print(msg_list)
self.assertEqual(msg_list['total'], 4, 'List channel messages broken')
self.assertEqual(msg_list['messages'][0]['id'], idref_msg3, 'List channel messages broken')
# Replay message
result = client.replay_msg('test_remote050', [idref_msg3])
msg_list = client.list_msg(channel='test_remote050', start=0, count=5, order_by='-timestamp')
self.assertEqual(msg_list['total'], 5, 'List channel messages broken')
self.assertEqual(msg_list['messages'][0]['id'], result[0].uuid, 'Replay messages broken')
# Push message
result = client.push_msg(channel='test_remote050', text="Yaaay")
msg_list = client.list_msg(channel='test_remote050', start=0, count=5, order_by='-timestamp')
self.assertEqual(msg_list['total'], 6, 'Push message broken')
self.assertEqual(msg_list['messages'][0]['id'], result.uuid, 'Push message broken')
``` |
[
{
"content": "Repeat the code precisely:\n```python\nfrom unittest import TestCase\n\nfrom lie2me import Form, fields\nfrom lie2me.exceptions import BadValidation\n\n\nclass FormTestCase(TestCase):\n\n def test_form_without_fields_is_always_valid(self):\n form = Form({'foo': 'bar'})\n form.subm... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom unittest import TestCase\n\nfrom lie2me import Form, fields\nfrom lie2me.exceptions import BadValidation\n\n\nclass FormTestCase(TestCase):\n\n def test_form_without_fields_is_always_valid(self):\n form = Form({'foo': 'bar'})\n ... | ```python
from unittest import TestCase
from lie2me import Form, fields
from lie2me.exceptions import BadValidation
class FormTestCase(TestCase):
def test_form_without_fields_is_always_valid(self):
form = Form({'foo': 'bar'})
form.submit()
self.assertEqual(form.errors, {})
def test_before_submission_form_valid_attribute_is_none(self):
form = Form()
self.assertEqual(form.valid, None)
def test_form_data_is_accessible_and_unchanged_before_validation(self):
form = SignupForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
def test_form_validation_against_valid_data(self):
form = SignupForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
form.submit()
self.assertEqual(form.valid, True)
self.assertEqual(form.errors, {})
def test_successful_validation_replaces_form_data_with_new_data(self):
form = SignupForm({
'name': ' John Doe ',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
form.submit()
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
'observations': None,
})
def test_unsuccessful_validation_does_not_replace_form_data_with_new_data(self):
form = SignupForm({
'name': ' John Doe ',
})
form.submit()
self.assertEqual(form.data['name'], ' John Doe ')
def test_form_validation_against_invalid_data(self):
form = SignupForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'password': '123',
'password2': '1234',
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'Must have no more than 200 characters.',
'email': 'Invalid email.',
'password2': 'Password confirmation does not match.',
})
def test_form_has_no_errors_before_calling_validate_even_if_data_is_invalid(self):
form = SignupForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'password': '12',
'password2': '123',
})
self.assertEqual(form.errors, {})
def test_form_without_errors_returning_none_in_validation_method_raises_exception(self):
form = BadValidationForm()
with self.assertRaises(BadValidation) as context:
form.submit()
self.assertEqual(str(context.exception), 'Form validation did not return any data.')
def test_nested_form_empty_data(self):
form = ProfileForm()
self.assertEqual(form.data, {'address': {}})
def test_nested_form_validated_data(self):
form = ProfileForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': 42,
}
})
form.submit()
self.assertEqual(form.valid, True)
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': 42,
'complement': None,
}
})
def test_nested_form_errors(self):
form = ProfileForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'address': {
'street': 'a' * 201,
'number': -1,
}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'Must have no more than 200 characters.',
'email': 'Invalid email.',
'address': {
'street': 'Must have no more than 200 characters.',
'number': 'Must not be lower than 0.',
}
})
def test_nested_form_with_error_only_in_nested_form(self):
form = ProfileForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': -1,
}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'address': {
'number': 'Must not be lower than 0.',
}
})
def test_invalid_data_object_gets_replaced_by_no_data(self):
form = ProfileForm([1, 2, 3])
self.assertEqual(form.data, {
'address': {}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'This is required.',
'email': 'This is required.',
'address': {
'street': 'This is required.',
'number': 'This is required.',
},
})
def test_weird_values_as_data_do_not_cause_exceptions(self):
form = ProfileForm()
form.submit()
form = ProfileForm(None)
form.submit()
form = ProfileForm(42)
form.submit()
form = ProfileForm([])
form.submit()
form = ProfileForm([1, 2, 3])
form.submit()
form = ProfileForm({1, 2, 3})
form.submit()
form = ProfileForm(object())
form.submit()
class SignupForm(Form):
name = fields.Text(max=200)
email = fields.Email()
password = fields.Text(min=3, trim=False)
password2 = fields.Text(trim=False)
observations = fields.Text(required=False)
_ignored_field = fields.Text()
def validate(self, data):
if 'password' in data and 'password2' in data:
if data['password'] != data['password2']:
self.error('password2', 'Password confirmation does not match.')
return data
class BadValidationForm(Form):
name = fields.Text(required=False)
def validate(self, data):
pass
class AddressForm(Form):
street = fields.Text(max=200)
number = fields.Integer(min=0)
complement = fields.Text(required=False)
class ProfileForm(Form):
name = fields.Text(max=200)
email = fields.Email()
address = AddressForm
``` |
[
{
"content": "Write the code verbatim:\n```python\nfrom ..model_tools import string_to_list, get_property\nfrom .channel_filters import filter_options, in_compare, term_match_check\n\n\ntry:\n from urllib.parse import quote_plus, unquote_plus\nexcept ImportError:\n from urllib import quote_plus, unquote_p... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nfrom ..model_tools import string_to_list, get_property\nfrom .channel_filters import filter_options, in_compare, term_match_check\n\n\ntry:\n from urllib.parse import quote_plus, unquote_plus\nexcept ImportError:\n from urllib import quote... | ```python
from ..model_tools import string_to_list, get_property
from .channel_filters import filter_options, in_compare, term_match_check
try:
from urllib.parse import quote_plus, unquote_plus
except ImportError:
from urllib import quote_plus, unquote_plus
def make_safe(val):
"""
Make strings in filters save.
i.e 'foo bar' becomes 'foo+bar'
"""
if not isinstance(val, str):
return val
return quote_plus(val)
def remove_channel_filter(channel):
"""
Remove filters from channel strings
i.e foo_contains becomes foo
"""
if '__' not in channel:
return channel
chan, channel_filter = channel.rsplit('__', 1)
if filter_options.get(channel_filter):
return chan
return channel
def get_channel_filter(channel):
if '__' not in channel:
return filter_options['eq']
chan, channel_filter_name = channel.rsplit('__', 1)
channel_filter = filter_options.get(channel_filter_name)
if not channel_filter:
return filter_options['eq']
return channel_filter
def get_property_and_value_from_channel(channel):
"""
Get a list of tuples with properties and channels.
i.e foo|bar__name__contains:baz returns a list: [('bar__name__contains', 'baz')]
"""
filters = filter(None, str(channel).split('|')[1:])
if not filters:
return None
properties = []
for channel_filter, val in [tuple(f.split(':', 1)) for f in filters]:
filter_option = filter_options.get(channel_filter.split('__')[-1])
if filter_option == in_compare:
val = string_to_list(val)
properties.append((channel_filter, val))
return properties
def channel_match_check(channel, data):
terms = filter(None, channel.split('|')[1:])
option = None
for term in terms:
key, val = term.split(':')
if '__' in key and key.split('__')[-1] in filter_options.keys():
option = key.rsplit('__', 1)[-1]
if key not in data:
return False
if not term_match_check(data[key], val, option):
return False
return True
def properties_match_channel_by_object(obj, channel_properties):
result = True
for prop, val in channel_properties:
if not has_val(obj, prop, val) and not has_related_value(obj, prop, val):
return False
return result
def properties_match_channel_by_dict(dict, channel_properties):
result = True
for prop, val in channel_properties:
if prop not in dict:
return False
val_type = type(val)
if not val_type(dict[prop]) == val:
return False
return result
def get_value(obj, prop):
data = {}
val = get_property(obj, prop)
if val:
data[prop] = val
return data
def has_val(obj, prop, val):
obj_val = get_property(obj, remove_channel_filter(prop))
if not obj_val:
return False
channel_filter = get_channel_filter(prop)
return channel_filter(val, obj_val)
def has_related_value(obj, field, channel_val):
if '__' not in field:
filter_by_val = channel_val
property_name = field
else:
property_name, filter_by_val = field.split('__', 1)
attr = getattr(obj, property_name)
if hasattr(attr, 'all'):
return getattr(obj, property_name).filter(**{filter_by_val: channel_val}).exists()
else:
filter_query = {'pk': obj.pk}
filter_query[field] = channel_val
return obj.__class__.objects.filter(**filter_query).exists()
``` |
[
{
"content": "Here is a code snippet:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\ncrashdump_use_jinja2 = False\n\ndef _(msg):\n return msg\n\ndef tag_a(name, title=None, href=None, alt=None):\n from xml.etree.Elem... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\ncrashdump_use_jinja2 = False\n\ndef _(msg):\n return msg\n\ndef tag_a(name, title=None, href=None, alt=None):\n fro... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
crashdump_use_jinja2 = False
def _(msg):
return msg
def tag_a(name, title=None, href=None, alt=None):
from xml.etree.ElementTree import Element, tostring
a = Element('a')
a.text = name
if href:
a.set('href', href)
if title:
a.set('title', title)
if alt:
a.set('alt', alt)
return tostring(a, encoding="utf8", method='html').decode()
def _hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, str):
try:
number = int(number)
except ValueError:
number = None
if number is None:
return '(none)'
if bits is not None:
if bits == 32:
number = number & 0xffffffff
if width is None:
width = 8
elif bits == 64:
number = number & 0xffffffffffffffff
if width is None:
width = 16
if width is None:
if number > 2**48:
width = 16
elif number > 2**40:
width = 12
elif number > 2**32:
width = 10
elif number > 2**24:
width = 8
elif number > 2**16:
width = 6
elif number > 2**8:
width = 4
else:
width = 2
fmt = '%%0%ix' % width
return prefix + fmt % number
def hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, list):
nums = []
for n in number:
nums.append(_hex_format(n, prefix, width, bits))
return ','.join(nums)
else:
return _hex_format(number, prefix, width, bits)
def hex_format_bits(number, bits):
return hex_format(number, bits=bits)
def addr_format(number, prefix='0x', bits=64):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=bits)
else:
return hex_format(number, prefix, bits=bits)
def addr_format_64(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=64)
else:
return hex_format(number, prefix, bits=64)
def addr_format_32(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=32)
else:
return hex_format(number, prefix, bits=32)
def addr_format_bits(number, bits=64):
return addr_format(number, bits=bits)
def exception_code(platform_type, code, name):
if platform_type is None:
return 'Platform unknown'
elif platform_type == 'Linux':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Unix_signal')
elif platform_type == 'Windows NT':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def format_bool_yesno(val):
if isinstance(val, str) or isinstance(val, unicode):
try:
val = bool(val)
except ValueError:
val = None
if val is None:
return '(none)'
elif val == True:
return _('yes')
elif val == False:
return _('no')
else:
return _('neither')
def format_source_line(source, line, line_offset=None, source_url=None):
if source is None:
return _('unknown')
else:
title = str(source) + ':' + str(line)
if line_offset is not None:
title += '+' + hex_format(line_offset)
if source_url is not None:
href = source_url
else:
href='file:///' + str(source)
return tag_a(title, href=href)
def format_function_plus_offset(function, funcoff=None):
if function is None:
return _('unknown')
else:
if funcoff:
return str(function) + '+' + hex_format(funcoff)
else:
return str(function)
def str_or_unknown(str):
if str is None:
return _('unknown')
else:
return str
def format_cpu_type(cputype):
cputype = cputype.lower()
if cputype == 'amd64':
href='http://en.wikipedia.org/wiki/X86-64'
title = 'x86-64 (also known as x64, x86_64 and AMD64)'
elif cputype == 'x86':
href='http://en.wikipedia.org/wiki/X86'
title = 'x86 (also known as i386)'
elif cputype == 'mips':
href='http://en.wikipedia.org/wiki/MIPS_instruction_set'
title = 'MIPS instruction set'
elif cputype == 'alpha':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha, originally known as Alpha AXP'
elif cputype == 'alpha64':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha64, originally known as Alpha AXP'
elif cputype == 'powerpc':
href='http://en.wikipedia.org/wiki/PowerPC'
title = 'PowerPC'
elif cputype == 'powerpc64':
href='http://en.wikipedia.org/wiki/Ppc64'
title = 'PowerPC64 or ppc64'
elif cputype == 'arm':
href='http://en.wikipedia.org/wiki/ARM_architecture'
title = 'ARM'
elif cputype == 'arm64':
href='http://en.wikipedia.org/wiki/ARM_architecture#64-bit'
title = 'ARM 64-bit'
elif cputype == 'sparc':
href='http://en.wikipedia.org/wiki/SPARC'
title = 'SPARC ("scalable processor architecture")'
elif cputype == 'ia64':
href='http://en.wikipedia.org/wiki/Itanium'
title = 'Intel Itanium architecture (IA-64)'
elif cputype == 'msil':
href='http://en.wikipedia.org/wiki/Common_Intermediate_Language'
title = 'Microsoft Intermediate Language (MSIL)'
elif cputype == 'x64 wow':
href='http://en.wikipedia.org/wiki/WoW64'
title = 'Microsoft WoW64'
else:
href = 'http://en.wikipedia.org/wiki/Central_processing_unit'
title = 'Unknown:%s' % cputype
return tag_a(title, title=cputype, href=href)
def format_cpu_vendor(vendor):
if vendor == 'AuthenticAMD':
title = 'AMD'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif vendor == 'GenuineIntel':
title = 'Intel'
href = 'http://en.wikipedia.org/wiki/Intel'
elif vendor == 'Microsoft Hv':
title = 'Microsoft Hyper-V'
href = 'http://en.wikipedia.org/wiki/Hyper-V'
elif vendor == 'VMwareVMware':
title = 'VMware'
href = 'http://en.wikipedia.org/wiki/VMware'
elif vendor == 'KVMKVMKVMKVM':
title = 'KVM'
href = 'http://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine'
elif vendor == 'XenVMMXenVMM':
title = 'Xen'
href = 'http://en.wikipedia.org/wiki/Xen'
else:
title = vendor
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(title, title=vendor, href=href)
def format_cpu_name(vendor, name):
# http://en.wikipedia.org/wiki/CPUID
# http://www.sandpile.org/x86/cpuid.htm
if vendor == 'AuthenticAMD':
if name is None:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif name.startswith('AMD Ryzen'):
href = 'https://en.wikipedia.org/wiki/Ryzen'
title = 'AMD Ryzen'
elif name.startswith('AMD FX'):
href = 'http://en.wikipedia.org/wiki/List_of_AMD_FX_microprocessors'
title = 'AMD FX-series'
elif name.startswith('AMD Phenom'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Phenom_microprocessors'
title = 'AMD Phenom family'
elif name.startswith('AMD Opteron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Opteron_microprocessors'
title = 'AMD Opteron family'
elif name.startswith('AMD Sempron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Sempron_microprocessors'
title = 'AMD Sempron family'
elif name.startswith('AMD Turion'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Turion_microprocessors'
title = 'AMD Turion family'
elif name.startswith('AMD A'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_accelerated_processing_unit_microprocessors'
title = 'AMD APU series'
else:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
title = title + ' (%s)' % name
elif vendor == 'GenuineIntel':
if name is None:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
elif name.startswith('Intel(R) Core(TM) i3'):
title = 'Intel Core i3 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i5'):
title = 'Intel Core i5 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i7'):
title = 'Intel Core i7 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i9'):
title = 'Intel Core i9 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM)'):
title = 'Unknown Intel Core series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Xeon(R)') or name.startswith('Intel(R) Xeon(TM)'):
title = 'Intel Xeon series'
href = 'http://en.wikipedia.org/wiki/Xeon'
else:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
title = title + ' (%s)' % name
else:
title = name
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(name, title=title, href=href)
def format_distribution_id(distro_id):
if distro_id == 'Debian':
name = 'Debian'
href = 'http://www.debian.org'
elif distro_id == 'Ubuntu':
name = 'Ubuntu'
href = 'http://www.ubuntu.com'
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_distribution_codename(distro_id, distro_codename):
if distro_id == 'Debian':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://www.debian.org/%s%s' % (distro_id.capitalize(), distro_codename.capitalize())
elif distro_id == 'Ubuntu':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://ubuntuguide.org/wiki/%s_%s' % (distro_id.capitalize(), distro_codename.capitalize())
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_seconds(s):
if s is None:
return 'None'
elif s >= 3600:
hr = int(float(s) / 3600.0)
from math import fmod
m = fmod(float(s), 3600.0) / 60.0
return '%ihr %0.1fmin' % (hr, m)
elif s >= 60:
m = float(s) / 60.0
return '%0.1fmin' % m
elif s >= 1:
return '%0.1fs' % s
else:
return '%0.1fms' % ( s * 1000.0 )
def format_milliseconds(ms):
if ms is None:
return 'None'
elif ms > 1000:
s = float(ms) / 1000.0
return format_seconds(s)
else:
return '%ims' % ms
def format_trust_level(tl):
if tl == 0 or tl is None:
return 'Unknown'
elif tl == 1:
return 'Stack scan'
elif tl == 2:
return 'CFI scan'
elif tl == 3:
return 'FP'
elif tl == 4:
return 'CFI'
elif tl == 5:
return 'External'
elif tl == 6:
return 'IP'
else:
return 'unknown(%i)' % tl
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def format_size(nbytes):
if isinstance(nbytes, str):
try:
nbytes = int(nbytes)
except ValueError:
nbytes = None
if nbytes == 0: return '0 B'
elif nbytes is None: return 'None'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def format_memory_usagetype(usage):
if usage == 0 or usage is None:
return 'Unknown'
elif usage == 1:
return 'Stack'
elif usage == 2:
return 'TEB'
elif usage == 3:
return 'PEB'
elif usage == 4:
return 'Process Parameters'
elif usage == 5:
return 'Environment'
elif usage == 6:
return 'IP'
elif usage == 7:
return 'Process Heap Handles'
elif usage == 8:
return 'Process Heap'
elif usage == 9:
return 'TLS'
elif usage == 10:
return 'Thread info block'
else:
return 'unknown(%i)' % usage
def format_gl_extension_name(ext):
khronos_extension_base_url = 'https://www.khronos.org/registry/OpenGL/extensions'
unknown_extension_url = 'https://www.khronos.org/opengl/wiki/OpenGL_Extension'
title = ext
name = ext
href = unknown_extension_url
vendor = None
ext_name = None
if ext.startswith('GL_'):
vendor_end = ext.index('_', 3)
if vendor_end > 0:
vendor = ext[3:vendor_end]
ext_name = ext[3:]
elif ext.startswith('GLX_') or ext.startswith('WGL_'):
vendor_end = ext.index('_', 4)
if vendor_end > 0:
vendor = ext[4:vendor_end]
ext_name = ext
if vendor and ext_name:
href = khronos_extension_base_url + '/%s/%s.txt' % (vendor, ext_name)
return tag_a(name, title=title, href=href)
def format_version_number(num):
if isinstance(num, str) or isinstance(num, unicode):
try:
num = int(num)
except ValueError:
num = None
if num is None: return 'None'
m, n, o, p = (num >> 48) & 0xffff, (num >> 32) & 0xffff, (num >> 16) & 0xffff, (num >> 0) & 0xffff
return '%i.%i.%i.%i' % (m, n, o, p)
def format_platform_type(platform_type):
if platform_type is None:
return _('Platform unknown')
elif platform_type == 'Linux':
return tag_a('Linux', href='https://en.wikipedia.org/wiki/Linux')
elif platform_type == 'Windows NT':
return tag_a('Windows NT',href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a('Windows', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(platform_type, href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def _get_version_from_string(number_str):
elems = number_str.split('.')
major = 0
minor = 0
patch = 0
build = 0
if len(elems) >= 1:
major = int(elems[0])
if len(elems) >= 2:
minor = int(elems[1])
if len(elems) >= 3:
patch = int(elems[2])
if len(elems) >= 4:
build = int(elems[3])
return major, minor, patch, build
def _get_version_from_numbers(os_version_number, os_build_number):
print('_get_version_from_numbers %s, %s' % (os_version_number, os_build_number))
if isinstance(os_version_number, int):
major = os_version_number >> 48 & 0xffff
minor = os_version_number >> 32 & 0xffff
patch = os_version_number >> 16 & 0xffff
build = os_version_number & 0xffff
if build == 0 and os_build_number:
build = int(os_build_number) if os_build_number is not None else 0
else:
major, minor, patch, build = _get_version_from_string(os_version_number)
#print('%x, %s -> %i.%i.%i.%i' % (os_version_number, os_build_number, major, minor, patch, build))
return major, minor, patch, build
def get_os_version_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
major, minor, patch, build = _get_version_from_string(os_version_number)
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
patch = 0
else:
major = 0
minor = 0
patch = 0
build = 0
ret = (major << 48) | (minor << 32) | (patch << 16) | build
print('ver in %s -> %x' % (os_version_number, ret))
return ret
def get_os_build_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
build = 0
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
else:
build = 0
print('build in %s -> %x' % (os_version_number, build))
return build
def os_version_info(platform_type, os_version_number, os_build_number):
ret = {'text': 'unknown' }
if platform_type is None or os_version_number is None:
return ret
major, minor, patch, build = _get_version_from_numbers(os_version_number, os_build_number)
if platform_type == 'Linux':
ret['text'] = 'Linux %i.%i.%i.%i' % (major, minor, patch, build)
ret['href'] = 'https://en.wikipedia.org/wiki/Linux'
elif platform_type == 'Windows NT':
productName = 'Windows %i.%i' % (major, minor)
marketingName = None
if (major < 6):
productName = "Windows XP"
ret['short'] = 'WinXP'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_XP'
elif (major == 6 and minor == 0):
productName = "Windows Vista"
ret['short'] = 'WinVista'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_Vista'
elif (major == 6 and minor == 1):
productName = "Windows 7"
ret['short'] = 'Win7'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_7'
elif (major == 6 and minor == 2):
productName = "Windows 8"
ret['short'] = 'Win8'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 6 and minor == 3):
productName = "Windows 8.1"
ret['short'] = 'Win8.1'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 10):
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_10'
# See https://en.wikipedia.org/wiki/Windows_10_version_history
if build <= 10240:
ret['short'] = 'Win10'
productName = "Windows 10"
marketingName = ''
elif(build <= 10586):
ret['short'] = 'Win10/1511'
productName = "Windows 10 Version 1511"
marketingName = "November Update"
elif (build <= 14393):
ret['short'] = 'Win10/1607'
productName = "Windows 10 Version 1607"
marketingName = "Anniversary Update"
elif (build <= 15063):
ret['short'] = 'Win10/1703'
productName = "Windows 10 Version 1703"
marketingName = "Creators Update"
elif (build <= 16299):
ret['short'] = 'Win10/1709'
productName = "Windows 10 Version 1709"
marketingName = "Fall Creators Update"
elif (build <= 17134):
ret['short'] = 'Win10/1803'
productName = "Windows 10 Version 1803"
marketingName = "April 2018 Update"
elif (build <= 18204):
ret['short'] = 'Win10/1809'
productName = "Windows 10 Version 1809"
marketingName = "October 2018 Update"
elif (build <= 18362):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 1903"
marketingName = "May 2019 Update"
elif (build <= 18363):
ret['short'] = 'Win10/1909'
productName = "Windows 10 Version 1909"
marketingName = "November 2019 Update"
elif (build <= 19041):
ret['short'] = 'Win10/2004'
productName = "Windows 10 Version 2004"
marketingName = "May 2020 Update"
elif (build <= 19042):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 20H2"
marketingName = '' # TBA
else:
ret['short'] = 'Win10/TBA'
productName = 'Windows 10 Build %i' % build
if marketingName:
ret['text'] = '%s (%s)' % (productName, marketingName)
else:
ret['text'] = productName
ret['full'] = ret['text'] + ' %i.%i.%i.%i' % (major, minor, patch, build)
elif platform_type == 'Windows':
ret['text'] = 'Windows %i.%i' % (major, minor)
ret['href'] = 'https://en.wikipedia.org/wiki/Microsoft_Windows'
return ret
def format_os_version(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'href' in info:
return tag_a(info.get('text'), href=info.get('href'))
else:
return info.get('text')
def format_os_version_short(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'short' in info:
return info.get('short')
else:
return info.get('text')
def language_from_qlocale_language_enum(num):
_codes = {
0: 'Any language',
31: 'English',
42: 'German',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# See https://doc.qt.io/qt-5/qlocale.html#Country-enum
def country_from_qlocale_country_enum(num):
_codes = {
0: 'Any country',
82: 'Germany',
224: 'United Kingdom',
225: 'United States',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# https://doc.qt.io/qt-5/qlocale.html#Script-enum
def script_from_qlocale_script_enum(num):
_codes = {
0: 'Any script',
1: 'Arabic',
2: 'Cyrillic',
16: 'Greek',
7: 'Latin',
}
if num in _codes:
return _codes[num]
else:
return str(num)
def thread_extra_info(thread):
if thread is None:
return _('N/A')
elif thread.main_thread:
return '*@' if thread.exception else '@'
elif thread.rpc_thread:
return '*[RPC]' if thread.exception else '[RPC]'
elif thread.exception:
return '*'
else:
return ''
def format_thread(thread):
if thread is None:
return _('N/A')
else:
if thread.main_thread:
ret = _('Main thread')
elif thread.rpc_thread:
ret = _('RPC thread')
else:
ret = _('Thread')
ret = ret + ' ' + hex_format(thread.id)
if thread.name:
ret = ret + ' ' + thread.name
if thread.exception:
ret = ret + ' ' + _('with exception')
return ret
def format_stack_frame(frame):
if frame is None:
return _('N/A')
else:
if frame.function is None:
offset = frame.addr - frame.module_base
if frame.module:
return frame.module + '+' + hex_format(offset)
else:
return frame.addr
else:
return format_function_plus_offset(frame.function, frame.funcoff)
``` |
[
{
"content": "Write the code verbatim:\n```python\n#!/usr/bin/env python\n\ndef configuration(parent_package='', top_path=None):\n import os.path as op\n from numpy.distutils.misc_util import Configuration\n\n from sfepy import Config\n\n site_config = Config()\n os_flag = {'posix' : 0, 'windows'... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n#!/usr/bin/env python\n\ndef configuration(parent_package='', top_path=None):\n import os.path as op\n from numpy.distutils.misc_util import Configuration\n\n from sfepy import Config\n\n site_config = Config()\n os_flag = {'posix... | ```python
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
os_flag = {'posix' : 0, 'windows' : 1}
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
defines = [('__SDIR__', "'\"%s\"'" % auto_dir),
('SFEPY_PLATFORM', os_flag[site_config.system()])]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
common_path = '../../common/extmods'
fem_src = ['fmfield.c', 'geommech.c', 'common_python.c']
fem_src = [op.join(common_path, ii) for ii in fem_src]
src = ['igac.pyx', 'nurbs.c']
config.add_extension('igac',
sources=src + fem_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, common_path],
define_macros=defines)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; val... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values hav... | ```python
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# Support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../pyrate")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrate'
copyright = u'2016, Julia Schaumeier, Sam Macbeth, Will Usher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from pyrate import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrate-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'pyrate Documentation',
u'Will Usher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
``` |
[
{
"content": "Repeat the code exactly:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\n\nfrom collections import OrderedDict, defaultdict, ChainMap\nfrom intervalt... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom builtins import *\n\nfrom collections import OrderedDict, defaultdict, ChainMap... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict, defaultdict, ChainMap
from intervaltree import Interval, IntervalTree
import numpy as np
from txttk.report import Report
from txttk.corpus import Annotation
"""
The measurements of features
"""
def concept_measurements(candidate, godata):
"""
Measure the ceoncept features: GOID, STATID, NAMESPACE
from the given candidate
"""
statement = candidate.statement
statid = statement.statid
goid, sep, sulfix = statid.partition('%')
namespace = godata[goid].namespace
measurements = OrderedDict([('GOID', goid),
('STATID', statid),
('NAMESPACE', namespace)])
return measurements
def evidence_measurements(candidate):
"""
Measure the evidence features: LENGTH, TEXT and
TEXT[:3], TEXT[-3:] from the given candidate
"""
evidences = candidate.evidences
sentence_text = candidate.sentence.text
offset = candidate.sentence.offset
starts = [e.start for e in evidences]
ends = [e.end for e in evidences]
raw_start = min(starts) - offset
raw_end = max(ends) - offset
length = raw_end - raw_start
text = sentence_text[raw_start:raw_end].lower()
boostscore = {'boost':1, 'boost2': 100}
boostlevel = max([boostscore.get(term.ref, 0) for term in candidate.statement.terms()])
measurements = OrderedDict([('LENGTH', length),
('TEXT=' + text, True),
('TEXT[:3]=' + text[:3], True),
('TEXT[-3:]=' + text[-3:], True),
('BOOST', boostlevel)])
return measurements
def bias_measurements(candidate):
"""
Measure the bias features: OMIT, SATURATION from the
given candidate
"""
measurements = OrderedDict()
statement = candidate.statement
evidences = candidate.evidences
terms_in_evidences = set([e.term for e in evidences])
for term in statement.terms():
if term in terms_in_evidences:
continue
key = 'OMIT=' + term.lemma
measurements[key] = True
measurements['SATURATION'] = len(evidences) / len(statement.evidences)
return measurements
def all_measurements(candidate, godata):
"""
Return all the measurements from the given candidate
"""
measurements = OrderedDict()
measurements.update(concept_measurements(candidate, godata))
measurements.update(evidence_measurements(candidate))
measurements.update(bias_measurements(candidate))
return measurements
def bulk_measurements(candidates, godata):
result = []
for candidate in candidates:
result.append(all_measurements(candidate, godata))
return result
class LabelMarker(object):
"""
Handeling the labels from given goldstandard
"""
def __init__(self, goldstandard):
self.goldstandard = goldstandard
forest = defaultdict(IntervalTree)
for pmid, goid, start, end, text in goldstandard:
t = forest[pmid]
t[start:end] = (goid, text)
self.forest = dict(forest)
def mark(self, candidate):
pmid = candidate.sentence.docid
statid = candidate.statement.statid
evidences = candidate.evidences
goid = statid.partition('%')[0]
starts = [e.start for e in evidences]
ends = [e.end for e in evidences]
start = min(starts)
end = max(ends)
span = (start, end)
gold_goids = {iv.data[0] for iv in self.forest[pmid][slice(*span)]}
if goid in gold_goids:
return 1
return 0
def markall(self, candidates):
labels = []
for candidate in candidates:
labels.append(self.mark(candidate))
return labels
def process(self, candidates):
return np.array(self.markall(candidates))
def recover(candidates, y):
result = Annotation()
for candidate, label in zip(candidates, y):
if label == 0:
continue
pmid = candidate.sentence.docid
statid = candidate.statement.statid
goid = statid.partition('%')[0]
start = min([e.start for e in candidate.evidences])
end = max([e.end for e in candidate.evidences])
raw_start = start - candidate.sentence.offset
raw_end = end - candidate.sentence.offset
text = candidate.sentence.text[raw_start:raw_end]
result.add((pmid, goid, start, end, text))
return result
def evaluate(system, goldstandard, message):
slim_system = {i[:4] for i in system}
slim_goldstandard = {i[:4] for i in goldstandard}
slim2gold = ChainMap({i[:4]: i for i in goldstandard},
{i[:4]: i for i in system})
slim_tp = slim_system & slim_goldstandard
slim_fp = slim_system - slim_goldstandard
slim_fn = slim_goldstandard - slim_system
tp = {slim2gold[i] for i in slim_tp}
fp = {slim2gold[i] for i in slim_fp}
fn = {slim2gold[i] for i in slim_fn}
return Report(tp, fp, fn, message)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# importing libraries:\nimport maya.cmds as cmds\n\nfrom Library import dpUtils as utils\nimport dpBaseClass as Base\nimport dpLayoutClass as Layout\n\n\n# global variables to this module: \nCLASS_NAME = \"FkLine\"\... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# importing libraries:\nimport maya.cmds as cmds\n\nfrom Library import dpUtils as utils\nimport dpBaseClass as Base\nimport dpLayoutClass as Layout\n\n\n# global variables to this module: \nCLASS_NA... | ```python
# importing libraries:
import maya.cmds as cmds
from Library import dpUtils as utils
import dpBaseClass as Base
import dpLayoutClass as Layout
# global variables to this module:
CLASS_NAME = "FkLine"
TITLE = "m001_fkLine"
DESCRIPTION = "m002_fkLineDesc"
ICON = "/Icons/dp_fkLine.png"
class FkLine(Base.StartClass, Layout.LayoutClass):
def __init__(self, *args, **kwargs):
#Add the needed parameter to the kwargs dict to be able to maintain the parameter order
kwargs["CLASS_NAME"] = CLASS_NAME
kwargs["TITLE"] = TITLE
kwargs["DESCRIPTION"] = DESCRIPTION
kwargs["ICON"] = ICON
Base.StartClass.__init__(self, *args, **kwargs)
def createModuleLayout(self, *args):
Base.StartClass.createModuleLayout(self)
Layout.LayoutClass.basicModuleLayout(self)
def createGuide(self, *args):
Base.StartClass.createGuide(self)
# Custom GUIDE:
cmds.addAttr(self.moduleGrp, longName="nJoints", attributeType='long')
cmds.setAttr(self.moduleGrp+".nJoints", 1)
cmds.addAttr(self.moduleGrp, longName="flip", attributeType='bool')
cmds.setAttr(self.moduleGrp+".flip", 0)
cmds.setAttr(self.moduleGrp+".moduleNamespace", self.moduleGrp[:self.moduleGrp.rfind(":")], type='string')
cmds.addAttr(self.moduleGrp, longName="articulation", attributeType='bool')
cmds.setAttr(self.moduleGrp+".articulation", 0)
self.cvJointLoc = self.ctrls.cvJointLoc(ctrlName=self.guideName+"_JointLoc1", r=0.3, d=1, guide=True)
self.jGuide1 = cmds.joint(name=self.guideName+"_JGuide1", radius=0.001)
cmds.setAttr(self.jGuide1+".template", 1)
cmds.parent(self.jGuide1, self.moduleGrp, relative=True)
self.cvEndJoint = self.ctrls.cvLocator(ctrlName=self.guideName+"_JointEnd", r=0.1, d=1, guide=True)
cmds.parent(self.cvEndJoint, self.cvJointLoc)
cmds.setAttr(self.cvEndJoint+".tz", 1.3)
self.jGuideEnd = cmds.joint(name=self.guideName+"_JGuideEnd", radius=0.001)
cmds.setAttr(self.jGuideEnd+".template", 1)
cmds.transformLimits(self.cvEndJoint, tz=(0.01, 1), etz=(True, False))
self.ctrls.setLockHide([self.cvEndJoint], ['tx', 'ty', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])
cmds.parent(self.cvJointLoc, self.moduleGrp)
cmds.parent(self.jGuideEnd, self.jGuide1)
cmds.parentConstraint(self.cvJointLoc, self.jGuide1, maintainOffset=False, name=self.jGuide1+"_PaC")
cmds.parentConstraint(self.cvEndJoint, self.jGuideEnd, maintainOffset=False, name=self.jGuideEnd+"_PaC")
def changeJointNumber(self, enteredNJoints, *args):
""" Edit the number of joints in the guide.
"""
utils.useDefaultRenderLayer()
# get the number of joints entered by user:
if enteredNJoints == 0:
try:
self.enteredNJoints = cmds.intField(self.nJointsIF, query=True, value=True)
except:
return
else:
self.enteredNJoints = enteredNJoints
# get the number of joints existing:
self.currentNJoints = cmds.getAttr(self.moduleGrp+".nJoints")
# start analisys the difference between values:
if self.enteredNJoints != self.currentNJoints:
# unparent temporarely the Ends:
self.cvEndJoint = self.guideName+"_JointEnd"
cmds.parent(self.cvEndJoint, world=True)
self.jGuideEnd = (self.guideName+"_JGuideEnd")
cmds.parent(self.jGuideEnd, world=True)
# verify if the nJoints is greather or less than the current
if self.enteredNJoints > self.currentNJoints:
for n in range(self.currentNJoints+1, self.enteredNJoints+1):
# create another N cvJointLoc:
self.cvJointLoc = self.ctrls.cvJointLoc(ctrlName=self.guideName+"_JointLoc"+str(n), r=0.3, d=1, guide=True)
# set its nJoint value as n:
cmds.setAttr(self.cvJointLoc+".nJoint", n)
# parent it to the lastGuide:
cmds.parent(self.cvJointLoc, self.guideName+"_JointLoc"+str(n-1), relative=True)
cmds.setAttr(self.cvJointLoc+".translateZ", 2)
# create a joint to use like an arrowLine:
self.jGuide = cmds.joint(name=self.guideName+"_JGuide"+str(n), radius=0.001)
cmds.setAttr(self.jGuide+".template", 1)
#Prevent a intermidiate node to be added
cmds.parent(self.jGuide, self.guideName+"_JGuide"+str(n-1), relative=True)
#Do not maintain offset and ensure cv will be at the same place than the joint
cmds.parentConstraint(self.cvJointLoc, self.jGuide, maintainOffset=False, name=self.jGuide+"_PaC")
cmds.scaleConstraint(self.cvJointLoc, self.jGuide, maintainOffset=False, name=self.jGuide+"_ScC")
elif self.enteredNJoints < self.currentNJoints:
# re-define cvEndJoint:
self.cvJointLoc = self.guideName+"_JointLoc"+str(self.enteredNJoints)
self.cvEndJoint = self.guideName+"_JointEnd"
self.jGuide = self.guideName+"_JGuide"+str(self.enteredNJoints)
# re-parent the children guides:
childrenGuideBellowList = utils.getGuideChildrenList(self.cvJointLoc)
if childrenGuideBellowList:
for childGuide in childrenGuideBellowList:
cmds.parent(childGuide, self.cvJointLoc)
# delete difference of nJoints:
cmds.delete(self.guideName+"_JointLoc"+str(self.enteredNJoints+1))
cmds.delete(self.guideName+"_JGuide"+str(self.enteredNJoints+1))
# re-parent cvEndJoint:
pTempParent = cmds.listRelatives(self.cvEndJoint, p=True)
cmds.parent(self.cvEndJoint, self.cvJointLoc)
#Ensure to remove temp parent from the unparenting done on the end joint
if pTempParent:
cmds.delete(pTempParent)
cmds.setAttr(self.cvEndJoint+".tz", 1.3)
pTempParent = cmds.listRelatives(self.jGuideEnd, p=True)
cmds.parent(self.jGuideEnd, self.jGuide, relative=True)
if pTempParent:
cmds.delete(pTempParent)
cmds.setAttr(self.moduleGrp+".nJoints", self.enteredNJoints)
self.currentNJoints = self.enteredNJoints
# re-build the preview mirror:
Layout.LayoutClass.createPreviewMirror(self)
cmds.select(self.moduleGrp)
def rigModule(self, *args):
Base.StartClass.rigModule(self)
# verify if the guide exists:
if cmds.objExists(self.moduleGrp):
try:
hideJoints = cmds.checkBox('hideJointsCB', query=True, value=True)
except:
hideJoints = 1
# articulation joint:
self.addArticJoint = self.getArticulation()
# start as no having mirror:
sideList = [""]
# analisys the mirror module:
self.mirrorAxis = cmds.getAttr(self.moduleGrp+".mirrorAxis")
if self.mirrorAxis != 'off':
# get rigs names:
self.mirrorNames = cmds.getAttr(self.moduleGrp+".mirrorName")
# get first and last letters to use as side initials (prefix):
sideList = [ self.mirrorNames[0]+'_', self.mirrorNames[len(self.mirrorNames)-1]+'_' ]
for s, side in enumerate(sideList):
duplicated = cmds.duplicate(self.moduleGrp, name=side+self.userGuideName+'_Guide_Base')[0]
allGuideList = cmds.listRelatives(duplicated, allDescendents=True)
for item in allGuideList:
cmds.rename(item, side+self.userGuideName+"_"+item)
self.mirrorGrp = cmds.group(name="Guide_Base_Grp", empty=True)
cmds.parent(side+self.userGuideName+'_Guide_Base', self.mirrorGrp, absolute=True)
# re-rename grp:
cmds.rename(self.mirrorGrp, side+self.userGuideName+'_'+self.mirrorGrp)
# do a group mirror with negative scaling:
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 0:
for axis in self.mirrorAxis:
gotValue = cmds.getAttr(side+self.userGuideName+"_Guide_Base.translate"+axis)
flipedValue = gotValue*(-2)
cmds.setAttr(side+self.userGuideName+'_'+self.mirrorGrp+'.translate'+axis, flipedValue)
else:
for axis in self.mirrorAxis:
cmds.setAttr(side+self.userGuideName+'_'+self.mirrorGrp+'.scale'+axis, -1)
# joint labelling:
jointLabelAdd = 1
else: # if not mirror:
duplicated = cmds.duplicate(self.moduleGrp, name=self.userGuideName+'_Guide_Base')[0]
allGuideList = cmds.listRelatives(duplicated, allDescendents=True)
for item in allGuideList:
cmds.rename(item, self.userGuideName+"_"+item)
self.mirrorGrp = cmds.group(self.userGuideName+'_Guide_Base', name="Guide_Base_Grp", relative=True)
#for Maya2012: self.userGuideName+'_'+self.moduleGrp+"_Grp"
# re-rename grp:
cmds.rename(self.mirrorGrp, self.userGuideName+'_'+self.mirrorGrp)
# joint labelling:
jointLabelAdd = 0
# store the number of this guide by module type
dpAR_count = utils.findModuleLastNumber(CLASS_NAME, "dpAR_type") + 1
# run for all sides
for s, side in enumerate(sideList):
self.base = side+self.userGuideName+'_Guide_Base'
self.ctrlZeroGrp = side+self.userGuideName+"_00_Ctrl_Zero_0_Grp"
self.skinJointList = []
# get the number of joints to be created:
self.nJoints = cmds.getAttr(self.base+".nJoints")
for n in range(0, self.nJoints):
cmds.select(clear=True)
# declare guide:
self.guide = side+self.userGuideName+"_Guide_JointLoc"+str(n+1)
self.cvEndJoint = side+self.userGuideName+"_Guide_JointEnd"
self.radiusGuide = side+self.userGuideName+"_Guide_Base_RadiusCtrl"
# create a joint:
self.jnt = cmds.joint(name=side+self.userGuideName+"_%02d_Jnt"%(n), scaleCompensate=False)
cmds.addAttr(self.jnt, longName='dpAR_joint', attributeType='float', keyable=False)
# joint labelling:
utils.setJointLabel(self.jnt, s+jointLabelAdd, 18, self.userGuideName+"_%02d"%(n))
self.skinJointList.append(self.jnt)
# create a control:
self.jntCtrl = self.ctrls.cvControl("id_007_FkLine", side+self.userGuideName+"_%02d_Ctrl"%(n), r=self.ctrlRadius, d=self.curveDegree)
# position and orientation of joint and control:
cmds.delete(cmds.parentConstraint(self.guide, self.jnt, maintainOffset=False))
cmds.delete(cmds.parentConstraint(self.guide, self.jntCtrl, maintainOffset=False))
# zeroOut controls:
self.zeroOutCtrlGrp = utils.zeroOut([self.jntCtrl])[0]
# hide visibility attribute:
cmds.setAttr(self.jntCtrl+'.visibility', keyable=False)
# fixing flip mirror:
if s == 1:
if cmds.getAttr(self.moduleGrp+".flip") == 1:
cmds.setAttr(self.zeroOutCtrlGrp+".scaleX", -1)
cmds.setAttr(self.zeroOutCtrlGrp+".scaleY", -1)
cmds.setAttr(self.zeroOutCtrlGrp+".scaleZ", -1)
cmds.addAttr(self.jntCtrl, longName='scaleCompensate', attributeType="bool", keyable=False)
cmds.setAttr(self.jntCtrl+".scaleCompensate", 1, channelBox=True)
cmds.connectAttr(self.jntCtrl+".scaleCompensate", self.jnt+".segmentScaleCompensate", force=True)
if n == 0:
utils.originedFrom(objName=self.jntCtrl, attrString=self.base+";"+self.guide+";"+self.radiusGuide)
self.ctrlZeroGrp = self.zeroOutCtrlGrp
elif n == self.nJoints-1:
utils.originedFrom(objName=self.jntCtrl, attrString=self.guide+";"+self.cvEndJoint)
else:
utils.originedFrom(objName=self.jntCtrl, attrString=self.guide)
# grouping:
if n > 0:
# parent joints as a simple chain (line)
self.fatherJnt = side+self.userGuideName+"_%02d_Jnt"%(n-1)
cmds.parent(self.jnt, self.fatherJnt, absolute=True)
# parent zeroCtrl Group to the before jntCtrl:
self.fatherCtrl = side+self.userGuideName+"_%02d_Ctrl"%(n-1)
cmds.parent(self.zeroOutCtrlGrp, self.fatherCtrl, absolute=True)
# control drives joint:
cmds.parentConstraint(self.jntCtrl, self.jnt, maintainOffset=False, name=self.jnt+"_PaC")
cmds.scaleConstraint(self.jntCtrl, self.jnt, maintainOffset=True, name=self.jnt+"_ScC")
# add articulationJoint:
if n > 0:
if self.addArticJoint:
artJntList = utils.articulationJoint(self.fatherJnt, self.jnt) #could call to create corrective joints. See parameters to implement it, please.
utils.setJointLabel(artJntList[0], s+jointLabelAdd, 18, self.userGuideName+"_%02d_Jar"%(n))
cmds.select(self.jnt)
# end chain:
if n == self.nJoints-1:
# create end joint:
self.endJoint = cmds.joint(name=side+self.userGuideName+"_JEnd", radius=0.5)
cmds.delete(cmds.parentConstraint(self.cvEndJoint, self.endJoint, maintainOffset=False))
# create a masterModuleGrp to be checked if this rig exists:
self.toCtrlHookGrp = cmds.group(self.ctrlZeroGrp, name=side+self.userGuideName+"_Control_Grp")
self.toScalableHookGrp = cmds.group(self.skinJointList[0], name=side+self.userGuideName+"_Joint_Grp")
self.toStaticHookGrp = cmds.group(self.toCtrlHookGrp, self.toScalableHookGrp, name=side+self.userGuideName+"_Grp")
# create a locator in order to avoid delete static group
loc = cmds.spaceLocator(name=side+self.userGuideName+"_DO_NOT_DELETE_PLEASE_Loc")[0]
cmds.parent(loc, self.toStaticHookGrp, absolute=True)
cmds.setAttr(loc+".visibility", 0)
self.ctrls.setLockHide([loc], ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v'])
# add hook attributes to be read when rigging integrated modules:
utils.addHook(objName=self.toCtrlHookGrp, hookType='ctrlHook')
utils.addHook(objName=self.toScalableHookGrp, hookType='scalableHook')
utils.addHook(objName=self.toStaticHookGrp, hookType='staticHook')
cmds.addAttr(self.toStaticHookGrp, longName="dpAR_name", dataType="string")
cmds.addAttr(self.toStaticHookGrp, longName="dpAR_type", dataType="string")
cmds.setAttr(self.toStaticHookGrp+".dpAR_name", self.userGuideName, type="string")
cmds.setAttr(self.toStaticHookGrp+".dpAR_type", CLASS_NAME, type="string")
# add module type counter value
cmds.addAttr(self.toStaticHookGrp, longName='dpAR_count', attributeType='long', keyable=False)
cmds.setAttr(self.toStaticHookGrp+'.dpAR_count', dpAR_count)
if hideJoints:
cmds.setAttr(self.toScalableHookGrp+".visibility", 0)
# delete duplicated group for side (mirror):
cmds.delete(side+self.userGuideName+'_'+self.mirrorGrp)
# finalize this rig:
self.integratingInfo()
cmds.select(clear=True)
# delete UI (moduleLayout), GUIDE and moduleInstance namespace:
self.deleteModule()
def integratingInfo(self, *args):
Base.StartClass.integratingInfo(self)
``` |
[
{
"content": "Recreate the original code text:\n```python\n\"\"\"run nose tests on <package-name> with coverage\"\"\"\n\n\nfrom __future__ import print_function, division\nimport os\nimport nose\nimport importlib\n\n\ndef main():\n \"\"\"run nose tests with coverage on \"<package-name>\" where <package-name ... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n\"\"\"run nose tests on <package-name> with coverage\"\"\"\n\n\nfrom __future__ import print_function, division\nimport os\nimport nose\nimport importlib\n\n\ndef main():\n \"\"\"run nose tests with coverage on \"<package-name>\" wher... | ```python
"""run nose tests on <package-name> with coverage"""
from __future__ import print_function, division
import os
import nose
import importlib
def main():
"""run nose tests with coverage on "<package-name>" where <package-name is ".." dir"""
path = os.path.abspath(os.path.join('..'))
package_name = os.path.basename(path)
try:
my_module = importlib.import_module(package_name)
except ImportError:
raise ImportError('Cound not import {} so cannot '
'run nose tests'.format(package_name))
# need to change the working directory to the installed package
# otherwise nose will just find <package-name> based on the current
# directory
cwd = os.getcwd()
package_path = os.path.dirname(my_module.__file__)
os.chdir(package_path)
# print(os.path.join(cwd, 'cover'))
print('nose tests with coverage on "{}" package.'.format(package_name))
#'nose ignores 1st argument http://stackoverflow.com/a/7070571/2530083'
nose.main(argv=['nose_ignores_1st_arg',
'-v',
'--with-doctest',
'--doctest-options=+ELLIPSIS',
'--doctest-options=+IGNORE_EXCEPTION_DETAIL'
'--with-coverage',
'--cover-erase',
'--cover-package=geotecha',
'--cover-tests',
'--cover-html',
'--cover-html-dir={}'.format(os.path.join(cwd, 'cover'))])
os.chdir(cwd)
if __name__ == '__main__':
main()
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nimport math\nimport statistics\nfrom itertools import groupby\nfrom random import randint\nfrom typing import Dict, Tuple, Counter\n\nimport pandas as pd\n\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegr... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nimport math\nimport statistics\nfrom itertools import groupby\nfrom random import randint\nfrom typing import Dict, Tuple, Counter\n\nimport pandas as pd\n\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.... | ```python
import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()])
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License a... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General ... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_datacenters
short_description: Module to manage data centers in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage data centers in oVirt/RHV"
options:
name:
description:
- "Name of the the data center to manage."
required: true
state:
description:
- "Should the data center be present or absent"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the data center."
comment:
description:
- "Comment of the data center."
local:
description:
- "I(True) if the data center should be local, I(False) if should be shared."
- "Default value is set by engine."
compatibility_version:
description:
- "Compatibility version of the data center."
quota_mode:
description:
- "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
choices: ['disabled', 'audit', 'enabled']
mac_pool:
description:
- "MAC pool to be used by this datacenter."
- "IMPORTANT: This option is deprecated in oVirt/RHV 4.1. You should
use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
set per cluster since 4.1."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create datacenter
- ovirt_datacenters:
name: mydatacenter
local: True
compatibility_version: 4.0
quota_mode: enabled
# Remove datacenter
- ovirt_datacenters:
state: absent
name: mydatacenter
'''
RETURN = '''
id:
description: "ID of the managed datacenter"
returned: "On success if datacenter is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
data_center:
description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/datacenter."
returned: "On success if datacenter is found."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class DatacentersModule(BaseModule):
def __get_major(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.major
return int(full_version.split('.')[0])
def __get_minor(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.minor
return int(full_version.split('.')[1])
def _get_mac_pool(self):
mac_pool = None
if self._module.params.get('mac_pool'):
mac_pool = search_by_name(
self._connection.system_service().mac_pools_service(),
self._module.params.get('mac_pool'),
)
return mac_pool
def build_entity(self):
return otypes.DataCenter(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
mac_pool=otypes.MacPool(
id=getattr(self._get_mac_pool(), 'id', None),
) if self._module.params.get('mac_pool') else None,
quota_mode=otypes.QuotaModeType(
self._module.params['quota_mode']
) if self._module.params['quota_mode'] else None,
local=self._module.params['local'],
version=otypes.Version(
major=self.__get_major(self._module.params['compatibility_version']),
minor=self.__get_minor(self._module.params['compatibility_version']),
) if self._module.params['compatibility_version'] else None,
)
def update_check(self, entity):
minor = self.__get_minor(self._module.params.get('compatibility_version'))
major = self.__get_major(self._module.params.get('compatibility_version'))
return (
equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
equal(self._module.params.get('local'), entity.local) and
equal(minor, self.__get_minor(entity.version)) and
equal(major, self.__get_major(entity.version))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
local=dict(type='bool'),
compatibility_version=dict(default=None),
quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
comment=dict(default=None),
mac_pool=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
data_centers_service = connection.system_service().data_centers_service()
clusters_module = DatacentersModule(
connection=connection,
module=module,
service=data_centers_service,
)
state = module.params['state']
if state == 'present':
ret = clusters_module.create()
elif state == 'absent':
ret = clusters_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
``` |
[
{
"content": "Repeat the code exactly:\n```python\nimport numpy\n\n### sample standard deviation\nclass stdevs:\n def __init__(self):\n self.list = []\n self.x = 0\n def step(self, value):\n if value != None:\n self.list.append(value)\n def finalize(self):\n #prin... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\nimport numpy\n\n### sample standard deviation\nclass stdevs:\n def __init__(self):\n self.list = []\n self.x = 0\n def step(self, value):\n if value != None:\n self.list.append(value)\n def finalize(self)... | ```python
import numpy
### sample standard deviation
class stdevs:
def __init__(self):
self.list = []
self.x = 0
def step(self, value):
if value != None:
self.list.append(value)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
self.x = numpy.std(self.list, ddof=1)
else:
self.x = None
return self.x
### population standard deviation
class stdevp:
def __init__(self):
self.list = []
self.x = 0
def step(self, value):
if value != None:
self.list.append(value)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
self.x = numpy.std(self.list, ddof=0)
else:
self.x = None
return self.x
### weighted mean
class meanw:
def __init__(self):
self.wgtlist = []
self.list = []
self.x = 0
def step(self, value, wgt):
if wgt == None:
wgt = 1
if value != None:
self.list.append(value)
self.wgtlist.append(wgt)
def finalize(self):
#print(self.list)
if len(self.list) >= 1:
y = numpy.array(self.list)
w = numpy.array(self.wgtlist)
self.x = (numpy.sum(w*y))/(numpy.sum(w))
else:
self.x = None
return self.x
### weighted standard deviation
class stdevw:
def __init__(self):
self.wgtlist = []
self.list = []
self.x = 0
def step(self, value, wgt):
if wgt == None:
wgt = 1
if value != None:
self.list.append(value)
self.wgtlist.append(wgt)
def finalize(self):
#print(self.list)
if len(self.list) > 1:
#unbiased estimator of variance with sample weights
#https://www.gnu.org/software/gsl/manual/html_node/Weighted-Samples.html
#https://en.wikipedia.org/wiki/Weighted_arithmetic_mean ###Reliability weights
y = numpy.array(self.list)
w = numpy.array(self.wgtlist)
V1 = numpy.sum(w)
V2 = numpy.sum(w**2)
mu = (numpy.sum(w*y)/V1) #weighted mean
muArray = numpy.full(y.size, mu)
sigma2w = numpy.sum(w*((y-muArray)**2))
self.x = (sigma2w/(V1-(V2/V1)))**(0.5)
#print("mu:",mu,"V1:",V1,"V2:",V2,"sigma2w:", sigma2w,"x:", self.x)
else:
self.x = None
return self.x
``` |
[
{
"content": "Write the code verbatim:\n```python\nimport func\nfrom var import var\nimport sys\nimport pf\n\n\nclass NodeBranchPart(object):\n\n def __init__(self):\n self.rMatrixNum = -1\n self.gdasrvNum = -1\n #self.bigP = None\n\n\nclass NodeBranch(object):\n\n def __init__(self):... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport func\nfrom var import var\nimport sys\nimport pf\n\n\nclass NodeBranchPart(object):\n\n def __init__(self):\n self.rMatrixNum = -1\n self.gdasrvNum = -1\n #self.bigP = None\n\n\nclass NodeBranch(object):\n\n def... | ```python
import func
from var import var
import sys
import pf
class NodeBranchPart(object):
def __init__(self):
self.rMatrixNum = -1
self.gdasrvNum = -1
#self.bigP = None
class NodeBranch(object):
def __init__(self):
self.len = 0.1
# self.textDrawSymbol = '-' # See var.modelSymbols for some
# alternative symbols
self.rawSplitKey = None # Odd or even
self.splitKey = None # Only even
#self.name = None
# self.uName = None # under-name
# self.color = None # US spelling.
# self.support = None # A float, so that it can preserve all its significant
# digits, yet can be formatted flexibly for output.
# self.biRootCount = None # For cons trees, where the input trees are
# bi-Rooted, ie have bifurcating roots. This
# is the number of compatible input trees that
# were rooted on this branch.
self.parts = [] # NodeBranchPart() objects
self.lenChanged = False
class NodePart(object):
def __init__(self):
#self.pats = None
#self.nPats = 0
self.compNum = -1
#self.cl = None
#self.cl2 = None
class Node(object):
"""A Node is a vertex in a Tree. All but the root have a branch.
A Node has pointers to its parent, leftChild, and sibling, any of which may be None.
"""
# def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode,
# mysys=sys):
def __del__(self, freeNode=pf.p4_freeNode, mysys=sys):
# def __del__(self, freeNode=pf.p4_freeNode, dp_freeNode=pf.dp_freeNode):
# def __del__(self, freeNode=pf.p4_freeNode):
# if self.nodeNum == 0:
#mysys.stdout.write('Node.__del__() deleting node %i\n' % self.nodeNum)
# mysys.stdout.flush()
# Generally, cNodes are deleted before the cTree is freed. freeNode
# requires the cTree!
if self.cNode:
mysys.stdout.write('Node.__del__() node %i (%s) has a cNode (%s). How?!?\n' % (
self.nodeNum, self, self.cNode))
if self.doDataPart:
dp_freeNode(self.cNode)
else:
freeNode(self.cNode)
self.cNode = None
def __init__(self):
self.name = None
self.nodeNum = -1
self.parent = None
self.leftChild = None
self.sibling = None
self.isLeaf = 0
self.cNode = None # Pointer to a c-struct
# Zero-based seq numbering of course, so -1 means no sequence.
self.seqNum = -1
self.br = NodeBranch()
# self.rootCount = None # For cons trees, where the input trees do not
# have bifurcating roots. This is the number of
# compatible input trees that were rooted on this node.
self.parts = [] # NodePart objects
self.doDataPart = 0
self.flag = 0
def wipe(self):
"""Set the pointers parent, leftChild, and sibling to None"""
self.parent = None
self.leftChild = None
self.sibling = None
def rightmostChild(self):
"""Find and return the rightmostChild of self.
If self has no children, return None.
"""
n = self.leftChild
if not n:
return None
while n.sibling:
n = n.sibling
return n
def leftSibling(self):
"""Find and return the sibling on the left.
A node has a pointer to its sibling, but that is the sibling
on the right. It is a bit awkward to find the sibling on the
left, as you need to go via the parent and the leftChild of
the parent.
If there is no parent, return None. If there is no
leftSibling, return None.
"""
if not self.parent:
# print 'leftSibling(%i). No parent. returning None.' %
# self.nodeNum
return None
lsib = self.parent.leftChild
if lsib == self:
# print 'leftSibling(%i). self is self.parent.leftChild.
# returning None.' % self.nodeNum
return None
while lsib:
if lsib.sibling == self:
# print 'leftSibling(%i): returning node %i' % (self.nodeNum,
# lsib.nodeNum)
return lsib
lsib = lsib.sibling
# These next 3 were suggestions from Rick Ree. Thanks, Rick!
# Then I added a couple more. Note that all of these use
# recursion, and so could bump into the recursion limit, and might
# fail on large trees. However, I tried iterPreOrder() on a
# random tree of 10,000 taxa, and it was fine.
# You can temporarily set a different recursion limit with the sys module.
# oldlimit = sys.getrecursionlimit()
# sys.setrecursionlimit(newLimit)
# See also Tree.iterNodesNoRoot()
def iterChildren(self):
n = self.leftChild
while n:
yield n
n = n.sibling
def iterPostOrder(self):
for c in self.iterChildren():
for n in c.iterPostOrder():
yield n
yield self
def iterPreOrder(self):
yield self
for c in self.iterChildren():
for n in c.iterPreOrder():
yield n
def iterLeaves(self):
for n in self.iterPreOrder():
if n.isLeaf:
yield n
def iterInternals(self):
for n in self.iterPreOrder():
if not n.isLeaf:
yield n
def iterDown(self, showDown=False):
"""Iterates over all the nodes below self (including self)
Starts by returning self. And then iterates over all nodes below self.
It does so by a combination of Node.iterPreOrder() and
Node.iterDown() (ie recursively). Now sometimes we want to
know if the nodes that are returned come from iterDown()
(strictly) or not (ie from iterPreOrder()). If that bit of
info is needed, then you can turn on the arg ``showDown``.
(The following is probably bad Python practice!) When that is done, whenever
iterDown() is called the first node that is returned will have
the attribute ``down`` set to True. But after it is returned,
that ``down`` attribute is zapped (to try to keep the bloat
down ...). So you need to test ``if hasattr(yourNode,
'down'):`` before you actually use it.
"""
if showDown:
self.down = True
yield self
if showDown:
del(self.down)
if self.parent:
for c in self.parent.iterChildren():
if c == self:
for n in c.parent.iterDown(showDown):
yield n
else:
for n in c.iterPreOrder():
yield n
# ###############################
def getNChildren(self):
"""Returns the number of children that the node has."""
if not self.leftChild:
return 0
c = self.leftChild
counter = 0
while c:
c = c.sibling
counter += 1
return counter
def isAncestorOf(self, otherNode):
"""Asks whether self is an an ancestor of otherNode."""
n = otherNode
while 1:
n = n.parent
if not n:
return False
elif n == self:
return True
def _ladderize(self, biggerGroupsOnBottom):
"""This is only used by Tree.ladderize()."""
# print '====Node %i' % self.nodeNum
if not self.leftChild:
pass
else:
nLeaves = []
children = []
ch = self.leftChild
while ch:
nL = len([n2 for n2 in ch.iterLeaves()])
nLeaves.append(nL)
ch.nLeaves = nL
children.append(ch)
ch = ch.sibling
# print ' nLeaves = %s' % nLeaves
allOnes = True
for ch in children:
if ch.nLeaves > 1:
allOnes = False
break
if not allOnes:
children = func.sortListOfObjectsOnAttribute(
children, 'nLeaves')
if not biggerGroupsOnBottom:
children.reverse()
# print '\n Children\n ------------'
# for ch in children:
# print ' node=%i, nLeaves=%i' % (ch.nodeNum, ch.nLeaves)
self.leftChild = children[0]
theLeftChild = self.leftChild
theLeftChild.sibling = None
for ch in children[1:]:
theLeftChild.sibling = ch
theLeftChild = ch
theLeftChild.sibling = None
for ch in children:
del(ch.nLeaves)
for ch in self.iterChildren():
ch._ladderize(biggerGroupsOnBottom)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n\"\"\"\nSAX driver for the Pyexpat C module. This driver works with\npyexpat.__version__ == '2.22'.\n\"\"\"\n\nversion = \"0.20\"\n\nfrom xml.sax._exceptions import *\n\n# xml.parsers.expat does not raise ImportError in Jython\nimport sys\nif ... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n\"\"\"\nSAX driver for the Pyexpat C module. This driver works with\npyexpat.__version__ == '2.22'.\n\"\"\"\n\nversion = \"0.20\"\n\nfrom xml.sax._exceptions import *\n\n# xml.parsers.expat does not raise ImportError in Jython\... | ```python
"""
SAX driver for the Pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
import string
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"SAX driver for the Pyexpat C module."
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(self)
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() != None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandle to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == handler.feature_namespaces:
return self._namespaces
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == handler.feature_namespaces:
self._namespaces = state
else:
raise SAXNotRecognizedException("Feature '%s' not recognized" %
name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
else:
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error:
error_code = self._parser.ErrorCode
exc = SAXParseException(expat.ErrorString(error_code), None, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
self._parser.CommentHandler = self._lex_handler_prop.comment
self._parser.StartCdataSectionHandler = self._lex_handler_prop.startCDATA
self._parser.EndCdataSectionHandler = self._lex_handler_prop.endCDATA
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(None, " ")
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate()
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = string.split(name)
if len(pair) == 1:
pair = (None, name)
else:
pair = tuple(pair)
newattrs = {}
for (aname, value) in attrs.items():
apair = string.split(aname)
if len(apair) == 1:
apair = (None, aname)
else:
apair = tuple(apair)
newattrs[apair] = value
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, {}))
def end_element_ns(self, name):
pair = string.split(name)
if len(pair) == 1:
pair = (None, name)
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
# ---
def create_parser(*args, **kwargs):
return apply(ExpatParser, args, kwargs)
# ---
if __name__ == "__main__":
import xml.sax
p = create_parser()
p.setContentHandler(xml.sax.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("../../../hamlet.xml")
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n\"\"\"Class for the quaternions with inversion.\"\"\"\n\nimport numpy as np\n\nimport utils\n\n# quaternion parameters for the group O from Table 71.1 in:\n# Simon L. Altmann, Peter Herzig, \"Point-Group Theory Tables\", \n# Second Edition (cor... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n\"\"\"Class for the quaternions with inversion.\"\"\"\n\nimport numpy as np\n\nimport utils\n\n# quaternion parameters for the group O from Table 71.1 in:\n# Simon L. Altmann, Peter Herzig, \"Point-Group Theory Tables\", \n# Sec... | ```python
"""Class for the quaternions with inversion."""
import numpy as np
import utils
# quaternion parameters for the group O from Table 71.1 in:
# Simon L. Altmann, Peter Herzig, "Point-Group Theory Tables",
# Second Edition (corrected), Wien (2011)
V12 = np.sqrt(0.5) # sqrt(1/2)
# [[ lambda, Lambda_1, Lambda_2, Lambda_3 ]]
qPar = np.asarray(
[[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.5, 0.5, 0.5, 0.5 ],
[ 0.5,-0.5,-0.5, 0.5 ],
[ 0.5, 0.5,-0.5,-0.5 ],
[ 0.5,-0.5, 0.5,-0.5 ],
[ 0.5,-0.5,-0.5,-0.5 ],
[ 0.5, 0.5, 0.5,-0.5 ],
[ 0.5,-0.5, 0.5, 0.5 ],
[ 0.5, 0.5,-0.5, 0.5 ],
[ V12, V12, 0.0, 0.0 ],
[ V12, 0.0, V12, 0.0 ],
[ V12, 0.0, 0.0, V12 ],
[ V12,-V12, 0.0, 0.0 ],
[ V12, 0.0,-V12, 0.0 ],
[ V12, 0.0, 0.0,-V12 ],
[ 0.0, V12, V12, 0.0 ],
[ 0.0,-V12, V12, 0.0 ],
[ 0.0, V12, 0.0, V12 ],
[ 0.0, 0.0,-V12,-V12 ],
[ 0.0, V12, 0.0,-V12 ],
[ 0.0, 0.0,-V12, V12 ]])
class QNew(object):
def __init__(self):
self.q = np.zeros((4,))
self.i = int(1)
self.prec = 1e-6
@classmethod
def create_from_vector(cls, vector, inversion):
tmp = cls()
_vec = np.asarray(vector)
tmp.q = _vec.copy()
_inv = int(inversion)
tmp.i = _inv
return tmp
def __eq__(self, other):
if not isinstance(other, QNew):
return False
if np.allclose(self.q, other.q) and self.i == other.i:
return True
return False
def __ne__(self, other):
if not isinstance(other, QNew):
return True
if not np.allclose(self.q, other.q) or self.i != other.i:
return True
return False
def __abs__(self):
return np.sqrt(np.dot(self.q, self.q))
def __neg__(self):
self.q = -self.q
self.i = -self.i
return self
def __mul__(self, other):
q1 = self.q
q2 = other.q
tvec = np.zeros_like(q1)
tvec[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
tvec[1] = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]
tvec[2] = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]
tvec[3] = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]
tinv = self.i * other.i
return QNew.create_from_vector(tvec, tinv)
def conj(self):
tvec = self.q * np.asarray([1., -1., -1., -1.])
return QNew.create_from_vector(tvec, self.i)
def norm(self):
return np.dot(self.q, self.q)
def __str__(self):
return "[%r, %r, %r, %r] with inversion %d" % (self.q[0], self.q[1], self.q[2], self.q[3], self.i)
def comp(self, vec):
return np.allclose(self.q, vec)
# code inspired by the quaternion package of moble
# https://github.com/moble/quaternion
def rotation_matrix(self, inv=False):
n = self.norm()
if np.abs(n) < self.prec:
raise ZeroDivisionError("Norm of quaternion is zero.")
_q = self.q
if np.abs(1-n) < self.prec:
res = np.array(
[[1-2*(_q[2]**2 + _q[3]**2), 2*(_q[1]*_q[2] - _q[3]*_q[0]),
2*(_q[1]*_q[3] + _q[2]*_q[0])],
[2*(_q[1]*_q[2] + _q[3]*_q[0]), 1-2*(_q[1]**2 + _q[3]**2),
2*(_q[2]*_q[3] - _q[1]*_q[0])],
[2*(_q[1]*_q[3] - _q[2]*_q[0]), 2*(_q[2]*_q[3] + _q[1]*_q[0]),
1-2*(_q[1]**2 + _q[2]**2)]])
else:
res = np.array(
[[1-2*(_q[2]**2 + _q[3]**2)/n, 2*(_q[1]*_q[2] - _q[3]*_q[0])/n,
2*(_q[1]*_q[3] + _q[2]*_q[0])/n],
[2*(_q[1]*_q[2] + _q[3]*_q[0])/n, 1-2*(_q[1]**2 + _q[3]**2)/n,
2*(_q[2]*_q[3] - _q[1]*_q[0])/n],
[2*(_q[1]*_q[3] - _q[2]*_q[0])/n, 2*(_q[2]*_q[3] + _q[1]*_q[0])/n,
1-2*(_q[1]**2 + _q[2]**2)/n]])
if inv:
res *= self.i
return res
def base(self, inv=False):
pass
def R(self, j, mp, m):
"""compute transformation matrix element
j j 0 1 2 3
R (Q) = R (Q , Q , Q , Q )
m'm m'm
-j
in __
j \ j j
[R.u] = /__ u R (Q) ,
m m'=j m' m'm
according to the formula:
__ ___________________________
j \ /(j-m')(j+m' )(j-m )(j+m) j+m-k j-m'-k m'-m+k k
R = /__ \/ ( k )(m'-m+k)(m'-m+k)( k ) (a) (a*) (b) (-b*)
m'm k
0 3 2 1
where a := Q - i.Q ; b := -Q -i.Q .
first three arguments to be provided as multiplicities:
[J] = 2j+1, [M] = 2m+1, [MP] = 2m'+1, these are always integer
[-3/2] --> -2; [-1] --> -1; [-1/2] --> 0; [0] --> 1; [1/2] --> 2, etc.
"""
a = complex( self.q[0], -self.q[3] )
ac = complex( self.q[0], self.q[3] ) # complex conjugate of a
b = complex(-self.q[2], -self.q[1] )
mbc = complex( self.q[2], -self.q[1] ) # - complex conjugate of b
res = complex( 0.0 )
j_p_mp = ( j + mp - 2 ) // 2 # j+m'
j_m_mp = ( j - mp ) // 2 # j-m'
j_p_m = ( j + m - 2 ) // 2 # j+m
j_m_m = ( j - m ) // 2 # j-m
if j_p_mp < 0 or j_m_mp < 0 or j_p_m < 0 or j_m_m < 0:
return res
# prepare constant arrays
mp_m_m = j_p_mp - j_p_m
n = np.asarray([j_m_mp, j_p_mp, j_m_m, j_p_m])
kp = np.asarray([0, mp_m_m, mp_m_m, 0])
_a = np.asarray([a, ac, b, mbc])
aexp = np.asarray([j_p_m, j_m_mp, mp_m_m, 0])
# get range for loop
k_mx = int(j_p_m if (j_p_m < j_m_mp) else j_m_mp)
k_mn = int(-j_p_mp+j_p_m if (-j_p_mp+j_p_m > 0) else 0)
for k in range(k_mn, k_mx+1):
_k = kp + k
factor = np.sqrt(np.prod(utils.binomial(n, _k))*complex(1.))
_aexp = aexp + np.asarray([-k, -k, k, k])
prod = np.prod(np.power(_a, _aexp))
res += factor * prod
return res
def R_matrix(self, j):
multi = int(2*j+1)
res = np.zeros((multi, multi), dtype=complex)
# the sorting is important, start at largest m
# and decrease
for im in range(multi):
m = j - im
for imp in range(multi):
mp = j - imp
res[im, imp] = self.R(multi, int(2*m+1), int(2*mp+1))
if j%2:
res *= self.i
return res
def omega(self):
return 2*np.arccos(self.q[0])
if __name__ == "__main__":
print("for checks execute the test script")
``` |
[
{
"content": "Here is the source code:\n```python\nimport unittest\n\nimport django\nfrom django.db import IntegrityError\nfrom django.conf import settings\n\nfrom django_migration_testcase import MigrationTest\nfrom django_migration_testcase.base import InvalidModelStateError, idempotent_transaction\n\n\nclass... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport unittest\n\nimport django\nfrom django.db import IntegrityError\nfrom django.conf import settings\n\nfrom django_migration_testcase import MigrationTest\nfrom django_migration_testcase.base import InvalidModelStateError, idempotent_transa... | ```python
import unittest
import django
from django.db import IntegrityError
from django.conf import settings
from django_migration_testcase import MigrationTest
from django_migration_testcase.base import InvalidModelStateError, idempotent_transaction
class ExampleMigrationTest(MigrationTest):
before = '0001_initial'
after = '0002_mymodel_number'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('MyModel')
for i in range(10):
mymodel = MyModel()
mymodel.name = 'example name {}'.format(i)
mymodel.save()
self.assertEqual(MyModel.objects.count(), 10)
self.run_migration()
MyModel = self.get_model_after('MyModel')
self.assertEqual(MyModel.objects.count(), 10)
def test_run_reverse_migration(self):
MyModel = self.get_model_before('MyModel')
for i in range(10):
mymodel = MyModel()
mymodel.name = 'example name {}'.format(i)
mymodel.save()
self.assertEqual(MyModel.objects.count(), 10)
self.run_migration()
MyModel = self.get_model_after('MyModel')
self.assertEqual(MyModel.objects.count(), 10)
self.run_reverse_migration()
self.assertEqual(MyModel.objects.count(), 10)
def test_invalid_field(self):
MyModel = self.get_model_before('MyModel')
mymodel = MyModel()
mymodel.number = 10
mymodel.save()
mymodel = MyModel.objects.get()
with self.assertRaises(AttributeError):
mymodel.number
self.run_migration()
MyModel = self.get_model_after('MyModel')
mymodel = MyModel.objects.get()
self.assertEqual(mymodel.number, None)
mymodel.number = 10
mymodel.save()
mymodel = MyModel.objects.get()
self.assertEqual(mymodel.number, 10)
def field_names(model_class):
try:
return model_class._meta.get_all_field_names()
except AttributeError:
return [f.name for f in model_class._meta.get_fields()]
class AddDoubleNumberTest(MigrationTest):
before = '0002_mymodel_number'
after = '0003_mymodel_double_number'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('MyModel')
self.assertNotIn('double_number', field_names(MyModel))
self.run_migration()
MyModel = self.get_model_after('MyModel')
self.assertIn('double_number', field_names(MyModel))
class MigrationsByNumberOnlyTest(MigrationTest):
before = '0002'
after = '0003'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('MyModel')
self.assertNotIn('double_number', field_names(MyModel))
self.run_migration()
MyModel = self.get_model_after('MyModel')
self.assertIn('double_number', field_names(MyModel))
class PopulateDoubleNumberTest(MigrationTest):
before = '0003_mymodel_double_number'
after = '0004_populate_mymodel_double_number'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('MyModel')
for i in range(10):
mymodel = MyModel()
mymodel.name = 'example name {}'.format(i)
mymodel.number = i
mymodel.save()
self.run_migration()
MyModel = self.get_model_after('MyModel')
for mymodel in MyModel.objects.all():
self.assertEqual(mymodel.number * 2, mymodel.double_number)
class GetModelMigrationTest(MigrationTest):
before = '0001_initial'
after = '0002_mymodel_number'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('test_app.MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
self.run_migration()
MyModel = self.get_model_after('test_app.MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
class ForeignKeyTest(MigrationTest):
before = '0004_populate_mymodel_double_number'
after = '0005_foreignmodel'
app_name = 'test_app'
def test_migration(self):
MyModel = self.get_model_before('test_app.MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
self.run_migration()
ForeignModel = self.get_model_after('test_app.ForeignModel')
self.assertEqual(ForeignModel.__name__, 'ForeignModel')
MyModel = self.get_model_after('test_app.MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
my = MyModel(name='test_my', number=1, double_number=3.14)
my.save()
ForeignModel(name='test_foreign', my=my)
def test_migration2(self):
"""Same test as test_migration, but this one passes."""
MyModel = self.get_model_before('test_app.MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
self.run_migration()
ForeignModel = self.get_model_after('test_app.ForeignModel')
self.assertEqual(ForeignModel.__name__, 'ForeignModel')
# get_model_before/get_model_after seems to not get the same model as
# this crazy thing.
if django.VERSION >= (2, 0):
MyModel = ForeignModel.my.field.related_model
else:
MyModel = ForeignModel.my.field.rel.to
self.assertEqual(MyModel.__name__, 'MyModel')
my = MyModel(name='test_my', number=1, double_number=3.14)
my.save()
ForeignModel(name='test_foreign', my=my)
def test_migration_clearly(self):
"""A clear illustration of the problem."""
self.run_migration()
ForeignModel = self.get_model_after('test_app.ForeignModel')
# get_model_before/get_model_after seems to not get the same model as
# this crazy thing.
if django.VERSION >= (2, 0):
MyModel = ForeignModel.my.field.related_model
else:
MyModel = ForeignModel.my.field.rel.to
MyModel2 = self.get_model_after('test_app.MyModel')
self.assertEqual(MyModel, MyModel2)
class UtilsMigrationTest(MigrationTest):
before = '0001_initial'
after = '0002_mymodel_number'
app_name = 'test_app'
def test_migration_not_run_exception(self):
with self.assertRaises(InvalidModelStateError):
self.get_model_after('MyModel')
with self.assertRaises(InvalidModelStateError):
self.run_reverse_migration()
def test_migration_already_run_exception(self):
self.run_migration()
with self.assertRaises(InvalidModelStateError):
self.get_model_before('MyModel')
class MigrateFromZero(MigrationTest):
before = 'zero'
after = '0001_initial'
app_name = 'test_app'
def test_model_exists(self):
with self.assertRaises(LookupError):
self.get_model_before('MyModel')
self.run_migration()
MyModel = self.get_model_after('MyModel')
self.assertEqual(MyModel.__name__, 'MyModel')
class TeardownCanFail(MigrationTest):
before = '0006'
after = '0007'
app_name = 'test_app'
def test_second_model_name_is_unique(self):
model_before = self.get_model_before('MySecondModel')
model_before.objects.create(name='foo')
model_before.objects.create(name='foo')
with self.assertRaises(IntegrityError):
self.run_migration()
def tearDown(self):
self.assertTrue(self.get_model_before('MySecondModel').objects.all().exists())
with self.assertRaises(IntegrityError):
# tearDown fails since migrations runs again with the data
super(TeardownCanFail, self).tearDown()
self.get_model_before('MySecondModel').objects.all().delete()
super(TeardownCanFail, self).tearDown()
@unittest.skipIf(django.VERSION < (1, 7), 'Not supported by older django versions')
@unittest.skipIf(django.VERSION >= (2, 0) and settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3',
'Not supported with django2 with sqlite3')
class TeardownFailCanBeAvoidedWithIdempotentTransaction(MigrationTest):
before = '0006'
after = '0007'
app_name = 'test_app'
@idempotent_transaction
def test_second_model_name_is_unique(self):
model_before = self.get_model_before('MySecondModel')
model_before.objects.create(name='foo')
model_before.objects.create(name='foo')
with self.assertRaises(IntegrityError):
self.run_migration()
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nOnionShare | https://onionshare.org/\n\nCopyright (C) 2014-2021 Micah Lee, et al. <micah@micahflee.com>\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Gene... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nOnionShare | https://onionshare.org/\n\nCopyright (C) 2014-2021 Micah Lee, et al. <micah@micahflee.com>\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms... | ```python
# -*- coding: utf-8 -*-
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014-2021 Micah Lee, et al. <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import shutil
from pkg_resources import resource_filename
from . import strings
from onionshare_cli.onion import (
Onion,
TorErrorInvalidSetting,
TorErrorAutomatic,
TorErrorSocketPort,
TorErrorSocketFile,
TorErrorMissingPassword,
TorErrorUnreadableCookieFile,
TorErrorAuthError,
TorErrorProtocolError,
BundledTorTimeout,
BundledTorBroken,
TorTooOldEphemeral,
TorTooOldStealth,
PortNotAvailable,
)
class GuiCommon:
"""
The shared code for all of the OnionShare GUI.
"""
MODE_SHARE = "share"
MODE_RECEIVE = "receive"
MODE_WEBSITE = "website"
MODE_CHAT = "chat"
def __init__(self, common, qtapp, local_only):
self.common = common
self.qtapp = qtapp
self.local_only = local_only
# Are we running in a flatpak package?
self.is_flatpak = os.path.exists("/.flatpak-info")
# Load settings
self.common.load_settings()
# Load strings
strings.load_strings(self.common, self.get_resource_path("locale"))
# Start the Onion
self.onion = Onion(common, get_tor_paths=self.get_tor_paths)
# Lock filename
self.lock_filename = os.path.join(self.common.build_data_dir(), "lock")
# Events filenames
self.events_dir = os.path.join(self.common.build_data_dir(), "events")
if not os.path.exists(self.events_dir):
os.makedirs(self.events_dir, 0o700, True)
self.events_filename = os.path.join(self.events_dir, "events")
self.css = self.get_css(qtapp.color_mode)
self.color_mode = qtapp.color_mode
def get_css(self, color_mode):
header_color = "#4E064F" # purple in light
title_color = "#333333" # dark gray color in main window
stop_button_color = "#d0011b" # red button color for stopping server
new_tab_button_background = "#ffffff"
new_tab_button_border = "#efeff0"
new_tab_button_text_color = "#4e0d4e"
if color_mode == "dark":
header_color = "#F2F2F2"
title_color = "#F2F2F2"
stop_button_color = "#C32F2F"
new_tab_button_background = "#5F5F5F"
new_tab_button_border = "#878787"
new_tab_button_text_color = "#FFFFFF"
return {
# OnionShareGui styles
"tab_widget": """
QTabBar::tab { width: 170px; height: 30px; }
""",
"tab_widget_new_tab_button": """
QPushButton {
font-weight: bold;
font-size: 20px;
}""",
"mode_new_tab_button": """
QPushButton {
font-weight: bold;
font-size: 30px;
color: #601f61;
}""",
"mode_header_label": """
QLabel {
color: """
+ header_color
+ """;
font-size: 48px;
margin-bottom: 16px;
}""",
"settings_button": """
QPushButton {
border: 0;
border-radius: 0;
}""",
"server_status_indicator_label": """
QLabel {
font-style: italic;
color: #666666;
padding: 2px;
}""",
"status_bar": """
QStatusBar {
font-style: italic;
color: #666666;
}
QStatusBar::item {
border: 0px;
}""",
# Common styles between modes and their child widgets
"mode_settings_toggle_advanced": """
QPushButton {
color: #3f7fcf;
text-align: left;
}
""",
"mode_info_label": """
QLabel {
font-size: 12px;
color: #666666;
}
""",
"server_status_url": """
QLabel {
background-color: #ffffff;
color: #000000;
padding: 10px;
border: 1px solid #666666;
font-size: 12px;
}
""",
"server_status_url_buttons": """
QPushButton {
padding: 4px 8px;
text-align: center;
}
""",
"server_status_button_stopped": """
QPushButton {
background-color: #5fa416;
color: #ffffff;
padding: 10px 30px 10px 30px;
border: 0;
border-radius: 5px;
}""",
"server_status_button_working": """
QPushButton {
background-color: #4c8211;
color: #ffffff;
padding: 10px 30px 10px 30px;
border: 0;
border-radius: 5px;
font-style: italic;
}""",
"server_status_button_started": """
QPushButton {
background-color: """
+ stop_button_color
+ """;
color: #ffffff;
padding: 10px 30px 10px 30px;
border: 0;
border-radius: 5px;
}""",
"downloads_uploads_empty": """
QWidget {
background-color: #ffffff;
border: 1px solid #999999;
}
QWidget QLabel {
background-color: none;
border: 0px;
}
""",
"downloads_uploads_empty_text": """
QLabel {
color: #999999;
}""",
"downloads_uploads_label": """
QLabel {
font-weight: bold;
font-size 14px;
text-align: center;
background-color: none;
border: none;
}""",
"downloads_uploads_clear": """
QPushButton {
color: #3f7fcf;
}
""",
"download_uploads_indicator": """
QLabel {
color: #ffffff;
background-color: #f44449;
font-weight: bold;
font-size: 10px;
padding: 2px;
border-radius: 7px;
text-align: center;
}""",
"downloads_uploads_progress_bar": """
QProgressBar {
border: 1px solid """
+ header_color
+ """;
background-color: #ffffff !important;
text-align: center;
color: #9b9b9b;
font-size: 14px;
}
QProgressBar::chunk {
background-color: """
+ header_color
+ """;
width: 10px;
}""",
"history_individual_file_timestamp_label": """
QLabel {
color: #666666;
}""",
"history_individual_file_status_code_label_2xx": """
QLabel {
color: #008800;
}""",
"history_individual_file_status_code_label_4xx": """
QLabel {
color: #cc0000;
}""",
# New tab
"new_tab_button_image": """
QLabel {
padding: 30px;
text-align: center;
}
""",
"new_tab_button_text": """
QLabel {
border: 1px solid """
+ new_tab_button_border
+ """;
border-radius: 4px;
background-color: """
+ new_tab_button_background
+ """;
text-align: center;
color: """
+ new_tab_button_text_color
+ """;
}
""",
"new_tab_title_text": """
QLabel {
text-align: center;
color: """
+ title_color
+ """;
font-size: 25px;
}
""",
# Share mode and child widget styles
"share_delete_all_files_button": """
QPushButton {
color: #3f7fcf;
}
""",
"share_zip_progess_bar": """
QProgressBar {
border: 1px solid """
+ header_color
+ """;
background-color: #ffffff !important;
text-align: center;
color: #9b9b9b;
}
QProgressBar::chunk {
border: 0px;
background-color: """
+ header_color
+ """;
width: 10px;
}""",
"share_filesize_warning": """
QLabel {
padding: 10px 0;
font-weight: bold;
color: """
+ title_color
+ """;
}
""",
"share_file_selection_drop_here_header_label": """
QLabel {
color: """
+ header_color
+ """;
font-size: 48px;
}""",
"share_file_selection_drop_here_label": """
QLabel {
color: #666666;
}""",
"share_file_selection_drop_count_label": """
QLabel {
color: #ffffff;
background-color: #f44449;
font-weight: bold;
padding: 5px 10px;
border-radius: 10px;
}""",
"share_file_list_drag_enter": """
FileList {
border: 3px solid #538ad0;
}
""",
"share_file_list_drag_leave": """
FileList {
border: none;
}
""",
"share_file_list_item_size": """
QLabel {
color: #666666;
font-size: 11px;
}""",
# Receive mode and child widget styles
"receive_file": """
QWidget {
background-color: #ffffff;
}
""",
"receive_file_size": """
QLabel {
color: #666666;
font-size: 11px;
}""",
"receive_message_button": """
QPushButton {
padding: 5px 10px;
}""",
# Settings dialog
"settings_version": """
QLabel {
color: #666666;
}""",
"settings_tor_status": """
QLabel {
background-color: #ffffff;
color: #000000;
padding: 10px;
}""",
"settings_whats_this": """
QLabel {
font-size: 12px;
}""",
"settings_connect_to_tor": """
QLabel {
font-style: italic;
}""",
}
def get_tor_paths(self):
if self.common.platform == "Linux":
tor_path = shutil.which("tor")
obfs4proxy_file_path = shutil.which("obfs4proxy")
prefix = os.path.dirname(os.path.dirname(tor_path))
tor_geo_ip_file_path = os.path.join(prefix, "share/tor/geoip")
tor_geo_ipv6_file_path = os.path.join(prefix, "share/tor/geoip6")
elif self.common.platform == "Windows":
base_path = self.get_resource_path("tor")
tor_path = os.path.join(base_path, "Tor", "tor.exe")
obfs4proxy_file_path = os.path.join(base_path, "Tor", "obfs4proxy.exe")
tor_geo_ip_file_path = os.path.join(base_path, "Data", "Tor", "geoip")
tor_geo_ipv6_file_path = os.path.join(base_path, "Data", "Tor", "geoip6")
elif self.common.platform == "Darwin":
base_path = self.get_resource_path("tor")
tor_path = os.path.join(base_path, "tor")
obfs4proxy_file_path = os.path.join(base_path, "obfs4proxy")
tor_geo_ip_file_path = os.path.join(base_path, "geoip")
tor_geo_ipv6_file_path = os.path.join(base_path, "geoip6")
elif self.common.platform == "BSD":
tor_path = "/usr/local/bin/tor"
tor_geo_ip_file_path = "/usr/local/share/tor/geoip"
tor_geo_ipv6_file_path = "/usr/local/share/tor/geoip6"
obfs4proxy_file_path = "/usr/local/bin/obfs4proxy"
return (
tor_path,
tor_geo_ip_file_path,
tor_geo_ipv6_file_path,
obfs4proxy_file_path,
)
@staticmethod
def get_resource_path(filename):
"""
Returns the absolute path of a resource
"""
return resource_filename("onionshare", os.path.join("resources", filename))
@staticmethod
def get_translated_tor_error(e):
"""
Takes an exception defined in onion.py and returns a translated error message
"""
if type(e) is TorErrorInvalidSetting:
return strings._("settings_error_unknown")
elif type(e) is TorErrorAutomatic:
return strings._("settings_error_automatic")
elif type(e) is TorErrorSocketPort:
return strings._("settings_error_socket_port").format(e.args[0], e.args[1])
elif type(e) is TorErrorSocketFile:
return strings._("settings_error_socket_file").format(e.args[0])
elif type(e) is TorErrorMissingPassword:
return strings._("settings_error_missing_password")
elif type(e) is TorErrorUnreadableCookieFile:
return strings._("settings_error_unreadable_cookie_file")
elif type(e) is TorErrorAuthError:
return strings._("settings_error_auth").format(e.args[0], e.args[1])
elif type(e) is TorErrorProtocolError:
return strings._("error_tor_protocol_error").format(e.args[0])
elif type(e) is BundledTorTimeout:
return strings._("settings_error_bundled_tor_timeout")
elif type(e) is BundledTorBroken:
return strings._("settings_error_bundled_tor_broken").format(e.args[0])
elif type(e) is TorTooOldEphemeral:
return strings._("error_ephemeral_not_supported")
elif type(e) is TorTooOldStealth:
return strings._("error_stealth_not_supported")
elif type(e) is PortNotAvailable:
return strings._("error_port_not_available")
return None
``` |
[
{
"content": "Here is the code content:\n```python\n# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom models import _User, CSVUsuario\nfrom forms import _UserChangeForm,_UserCreationForm\n\n... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom models import _User, CSVUsuario\nfrom forms import _UserChangeForm,_User... | ```python
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from models import _User, CSVUsuario
from forms import _UserChangeForm,_UserCreationForm
def deactivate(modeladmin, request, queryset):
queryset.update(is_active=False)
deactivate.short_description = "Desativar Usuarios selecionados"
def activate(modeladmin, request, queryset):
queryset.update(is_active=True)
activate.short_description = "Ativar Usuarios selecionados"
class _UserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('cpf', 'password')}),
(_(u'Informações Pessoais'), {'fields': ('nome_completo','email','numero_usp','curso','endereco',
'numero','complemento','cidade','uf',
'bairro','tel','cep',)}),
(_(u'Permissões do Sistema'), {'fields': ('is_active', 'is_staff', 'is_superuser',
# 'groups',
'user_permissions'
)}),
(_(u'Funções'), {'fields': ('docente','doutorando', 'mestrando','aluno', 'funcionario','monitor','pae','supervisor','secretario')}),
(_('Datas Importantes'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('cpf', 'password1', 'password2')}
),
)
form = _UserChangeForm
add_form = _UserCreationForm
list_display = ('nome_completo', 'email', 'cpf', 'is_staff',)
search_fields = ('nome_completo', 'email', 'cpf','numero_usp')
ordering = ('nome_completo',)
actions = [deactivate,activate]
list_filter = ['docente','doutorando','mestrando','aluno','funcionario','supervisor','monitor','pae','secretario' , 'is_staff', 'is_superuser', 'is_active']
class CsvUsuarioAdmin(admin.ModelAdmin):
# save_on_top = True
# list_display = (['criada_em'])
# list_display_links = (['criada_em'])
# search_fields = ['criada_em']
# date_hierarchy = 'criada_em'
readonly_fields=('log',)
admin.site.register(CSVUsuario,CsvUsuarioAdmin)
admin.site.register(_User, _UserAdmin)
``` |
[
{
"content": "Repeat the following code:\n```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Saas Manager\n# Copyright (C) 2014 Sistemas ADHOC\n# No email\n#\n# This program is free software: you can redistribute it and/or modify\... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Saas Manager\n# Copyright (C) 2014 Sistemas ADHOC\n# No email\n#\n# This program is free software: you can redistribute i... | ```python
# -*- coding: utf-8 -*-
##############################################################################
#
# Saas Manager
# Copyright (C) 2014 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class instance_additional(osv.osv):
""""""
_name = 'saas_manager.instance_additional'
_description = 'instance_additional'
_columns = {
'additional_product_id': fields.many2one('product.product', string='Sub Product', required=True),
'instance_id': fields.many2one('saas_manager.instance', string='instance_id', ondelete='cascade', required=True),
}
_defaults = {
}
_constraints = [
]
instance_additional()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
``` |
[
{
"content": "Here is the script:\n```python\n'''\n\nPython implementation of HTML wordcloud of words collected from\na website, Paragraph Input or File Upload. Flask Web App implementation\nof the same.\n\nAuthor: Shivam Bansal\nEmail: shivam5992@gmail.com\nWebsite: www.shivambansal.com \nVersion: 0.1\n\n'''\n... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n'''\n\nPython implementation of HTML wordcloud of words collected from\na website, Paragraph Input or File Upload. Flask Web App implementation\nof the same.\n\nAuthor: Shivam Bansal\nEmail: shivam5992@gmail.com\nWebsite: www.shivambansal.com \nVersi... | ```python
'''
Python implementation of HTML wordcloud of words collected from
a website, Paragraph Input or File Upload. Flask Web App implementation
of the same.
Author: Shivam Bansal
Email: shivam5992@gmail.com
Website: www.shivambansal.com
Version: 0.1
'''
from flask import Flask, render_template, request, flash, redirect, url_for
from BeautifulSoup import BeautifulSoup
import urllib, random, re, string, stopwords
app = Flask(__name__)
app.secret_key = 'You will never guess'
'''
Index router function, Receive post request and displays the html wordcloud
'''
@app.route('/', methods = ['GET','POST'])
@app.route('/index', methods = ['GET','POST'])
def index():
if request.method == 'POST':
''' Store post variables '''
url = request.form['urllink']
case = request.form['case']
show_freq = request.form['show_freq']
''' Try to connect with the URL '''
try:
if not url.startswith("http"):
url = "http://" + url
htmltext = urllib.urlopen(url).read()
except:
flash("Cannot connect to the requested url")
return redirect(url_for('startover'))
''' Get all text from the html repsonse '''
soup = BeautifulSoup(htmltext)
texts = soup.findAll(text=True)
visible_texts = filter(visible, texts)
article = ""
for text in visible_texts:
article += text.encode("utf-8")
article = str(article)
article = BeautifulSoup(article, convertEntities=BeautifulSoup.HTML_ENTITIES)
#exclude = set(string.punctuation)
#article = str(article)
#article = ''.join(ch for ch in article if ch not in exclude)
article = str(article).replace("\n"," ")
''' Get top keywords '''
freq = 50
a = getKeywords(article, case, freq)
random.shuffle(a)
b = [x[1] for x in a]
minFreq = min(b)
maxFreq = max(b)
''' Create html span tags and corresponding css '''
span = ""
css = """#box{font-family:'calibri';border:2px solid black;}
#box a{text-decoration : none}
"""
''' Colors for words in wordcloud '''
colors = ['#607ec5','#002a8b','#86a0dc','#4c6db9']
colsize = len(colors)
k = 0
for index,item in enumerate(a):
index += 1
if case == "upper":
tag = str(item[0]).upper()
else:
tag = str(item[0])
if show_freq == "yes":
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " (" + str(item[1]) + ") " + " </span></a>\n"
else:
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " </span></a>\n"
''' Algorithm to scale sizes'''
freqTag = int(item[1])
fontMax = 5.5
fontMin = 1.5
K = (freqTag - minFreq)/(maxFreq - minFreq)
frange = fontMax - fontMin
C = 4
K = float(freqTag - minFreq)/(maxFreq - minFreq)
size = fontMin + (C*float(K*frange/C))
css += '#tag'+str(index)+'{font-size: '+ str(size) +'em;color: '+colors[int(k%colsize)]+'}\n'
css += '#tag'+str(index)+':hover{color: red}\n'
k += 1
''' Write the HTML and CSS into seperate files '''
f = open('templates/wordcloud.html', 'w')
message = """
<style type="text/css">
""" + css +"""
</style>
<div id='box'>
""" + span + """
</div>
"""
f.write(message)
f.close
f.flush()
return render_template('index.html')
startover()
return render_template('index.html')
'''
Function to get top keywords from an article
'''
def getKeywords(articletext, case, freq):
''' Create the dictionary for output response '''
word_dict = {}
word_list = articletext.lower().split()
filtered_words = word_list
for word in filtered_words:
if word not in stopwords.stopwords and word.isalnum() and not word.isdigit() and not len(word) == 1:
if word not in word_dict:
word_dict[word] = 1
if word in word_dict:
word_dict[word] += 1
top_words = sorted(word_dict.items(),key=lambda(k,v):(v,k),reverse=True)[0:freq]
''' Return a list of dictionaies, dictionaies contains word and their frequencies '''
top = []
for w in top_words:
top.append(w)
return top
'''
Function to reset everthing and startover
'''
@app.route('/startover')
def startover():
f = open("templates/wordcloud.html",'w')
f.write("")
f.close
return redirect(url_for('index'))
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
'''
Run the Flask Application
'''
if __name__ == '__main__':
app.run(debug = True)
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nimport os\nimport sys\nfrom multiprocessing.connection import Client\nimport bcrypt\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom terms.server.scripts.webserver import... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nimport os\nimport sys\nfrom multiprocessing.connection import Client\nimport bcrypt\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom terms.server.scripts.... | ```python
import os
import sys
from multiprocessing.connection import Client
import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from terms.server.scripts.webserver import get_config
from terms.server.schemata import Schema
from terms.server.pluggable import load_plugins, get_plugins
from terms.server.pluggable import ImportRegistry, Base
from terms.server.app.schemata import Person
def import_ontologies(config, session):
'''
get directory
cycle over trm files
if the file name not in importregistry
break file content on dots
send pieces to server
put filename in importregistry
'''
for module in get_plugins(config):
fname = os.path.join(os.path.dirname(module.__file__), 'ontology', 'terms.trm')
totell = []
kb = Client((config('kb_host'), int(config('kb_port'))))
with open(fname, 'r') as f:
for line in f.readlines():
if line:
kb.send_bytes(line)
kb.send_bytes('FINISH-TERMS')
for fact in iter(kb.recv_bytes, 'END'):
print(fact)
kb.close()
def import_exec_globals(config, session):
'''
get exec_globals directory
cycle over its files
if the file name not in importregistry
send its contents to server
put filename in importregistry
'''
for module in get_plugins(config):
dirname = os.path.join(os.path.dirname(module.__file__),
'exec_globals')
for fname in sorted(os.listdir(dirname)):
if fname.endswith('.py'):
name = 'execs:' + fname[:-3]
try:
session.query(ImportRegistry).filter(ImportRegistry.name==name).one()
except NoResultFound:
path = os.path.join(dirname, fname)
with open(path, 'r') as f:
eg = f.read()
kb = Client((config('kb_host'), int(config('kb_port'))))
kb.send_bytes('compiler:exec_globals:' + eg)
kb.send_bytes('FINISH-TERMS')
for fact in iter(kb.recv_bytes, 'END'):
print(fact)
kb.close()
ir = ImportRegistry(name)
session.add(ir)
def init_terms():
config = get_config()
address = '%s/%s' % (config('dbms'), config('dbname'))
load_plugins(config)
engine = create_engine(address)
Schema.metadata.create_all(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
import_ontologies(config, session)
import_exec_globals(config, session)
pass1 = 'admin'
# we need defaults for automated scripts here
# pass1, pass2 = '', ' '
# while pass1 != pass2:
# pass1 = raw_input('Enter a password for the admin user: ')
# pass2 = raw_input('Repeat it: ')
password = bcrypt.hashpw(pass1, bcrypt.gensalt())
admin = Person(_id='admin', name='admin', surname='superuser', password=password)
session.add(admin)
session.commit()
session.close()
sys.exit('Created knowledge store %s' % config('dbname'))
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2008-2012 the MansOS team. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditi... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2008-2012 the MansOS team. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the f... | ```python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2012 the MansOS team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
class Settings:
configFile = "config.cfg"
@staticmethod
def __init__():
defaultSettings = {
"active_language" : "ENG",
"platform" : "telosb",
"blockly_location": "../../../seal-blockly/blockly/demos/seal/index.html",
"blockly_port" : '8090',
"blockly_host" : "localhost",
"recently_opened_count" : "10"
}
Settings.config = ConfigParser.SafeConfigParser(defaultSettings)
Settings.config.read(Settings.configFile);
if not Settings.config.has_section("CURRENT"):
Settings.config.add_section("CURRENT")
@staticmethod
def saveConfig():
with open(Settings.configFile, 'wb') as file:
Settings.config.write(file)
@staticmethod
def get(name):
try:
return Settings.config.get("CURRENT", name)
except:
print "No config entry found: " + name
return ""
@staticmethod
def set(name, value):
try:
return Settings.config.set("CURRENT", name, str(value))
except:
print "Can't add config(" + name + " : " + value + ")"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.