code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _parse_welcome(client, command, actor, args):
"""Parse a WELCOME and update user state, then dispatch a WELCOME event."""
_, _, hostmask = args.rpartition(' ')
client.user.update_from_hostmask(hostmask)
client.dispatch_event("WELCOME", hostmask)
|
Parse a WELCOME and update user state, then dispatch a WELCOME event.
|
def _set_ownership(self):
"""
set ownership of the directory: user and group
:return: None
"""
if self.owner or self.group:
args = (
self.path,
self.owner if self.owner else -1,
self.group if self.group else -1,
)
logger.debug("changing ownership bits of %s to %s", self.path, args)
os.chown(*args)
|
set ownership of the directory: user and group
:return: None
|
def abu_chart(self, cycle, mass_range=None ,ilabel=True,
imlabel=True, imlabel_fontsize=8, imagic=False,
boxstable=True, lbound=(-12, 0),
plotaxis=[0, 0, 0, 0], show=True, color_map='jet',
ifig=None,data_provided=False,thedata=None,
savefig=False,drawfig=None,drawax=None,mov=False,
path=None):
'''
Plots an abundance chart
Parameters
----------
cycle : string, integer or list
The cycle we are looking in. If it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file.
mass_range : list, optional
A 1x2 array containing the lower and upper mass range. If
this is an instance of abu_vector this will only plot
isotopes that have an atomic mass within this range. This
will throw an error if this range does not make sence ie
[45,2] if None, it will plot over the entire range. The
default is None.
ilabel : boolean, optional
Elemental labels off/on. The default is True.
imlabel : boolean, optional
Label for isotopic masses off/on. The default is True.
imlabel_fontsize : integer, optional
Fontsize for isotopic mass labels. The default is 8.
imagic : boolean, optional
Turn lines for magic numbers off/on. The default is False.
boxstable : boolean, optional
Plot the black boxes around the stable elements. The
defaults is True.
lbound : tuple, optional
Boundaries for colour spectrum ploted. The default is
(-12,0).
plotaxis : list, optional
Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z)
will be plotted. It equates to [xMin, xMax, Ymin, Ymax].
The default is [0, 0, 0, 0].
show : boolean, optional
Boolean of if the plot should be displayed. Useful with
saving multiple plots using abu_chartMulti. The default is
True.
color_map : string, optional
Color map according to choices in matplotlib
(e.g. www.scipy.org/Cookbook/Matplotlib/Show_colormaps).
The default is 'jet'.
ifig : integer, optional
Figure number, if ifig is None it wiil be set to the cycle
number. The defaults is None.
savefig : boolean, optional
Whether or not to save the figure.
The default is False
drawfig, drawax, mov : optional, not necessary for user to set these variables
The figure and axes containers to be drawn on, and whether or not a movie is
being made (only True when se.movie is called, which sets mov to True
automatically
path: path where to save figure
'''
if ifig == None and not mov:
ifig=cycle
if type(cycle)==type([]):
self.abu_chartMulti(cycle, mass_range,ilabel,imlabel,imlabel_fontsize,imagic,boxstable,\
lbound,plotaxis,color_map, path=path)
return
plotType=self._classTest()
if mass_range!=None and mass_range[0]>mass_range[1]:
raise IOError("Please input a proper mass range")
if plotType=='se':
if not data_provided:
cycle=self.se.findCycle(cycle)
# nin=zeros(len(self.se.A))
# zin=zeros(len(self.se.Z))
yin=self.get(cycle, 'iso_massf')
isom=self.se.isomeric_states
masses = self.se.get(cycle,'mass')
else:
cycle=cycle # why so serious?
yin=thedata[0]
isom=self.se.isomeric_states
masses = thedata[1]
# for i in xrange(len(nin)):
# zin[i]=self.se.Z[i]
# nin[i]=self.se.A[i]-zin[i]
# SJONES implicit loop instead:
zin=array([el for el in self.se.Z])
nin=array([el for el in self.se.A])-zin
#Test if the mass cell order is inverted
#and hence mass[-1] the center.
if masses[0]>masses[-1]:
#invert
print('Inverted order of mass cells will be taken into account.')
yin=yin[::-1]
masses=masses[::-1]
if mass_range != None:
# trim out only the zones needed:
tmpyps=[]
masses.sort() # SJ: not sure why this sort if necessary
# for i in xrange(len(masses)):
# if (masses[i] >mass_range[0] and masses[i]<mass_range[1]) or\
# (masses[i]==mass_range[0] or masses[i]==mass_range[1]):
# tmpyps.append(yin[i])
# yin=tmpyps
# find lower and upper indices and slice instead:
idxl=np.abs(masses-mass_range[0]).argmin()
if masses[idxl] < mass_range[0]: idxl+=1
idxu=np.abs(masses-mass_range[1]).argmin()
if masses[idxu] > mass_range[1]: idxu-=1
yin=yin[idxl:idxu+1]
#tmp=zeros(len(yin[0]))
#for i in xrange(len(yin)):
# for j in xrange(len(yin[i])):
# tmp[j]+=yin[i][j]
tmp2=sum(yin,axis=0) # SJONES sum along axis instead of nested loop
tmp=old_div(tmp2,len(yin))
yin=tmp
elif plotType=='PPN':
ain=self.get('A',cycle)
zin=self.get('Z',cycle)
nin=ain-zin
yin=self.get('ABUNDANCE_MF',cycle)
isom=self.get('ISOM',cycle)
if mass_range != None:
tmpA=[]
tmpZ=[]
tmpIsom=[]
tmpyps=[]
for i in range(len(nin)):
if (ain[i] >mass_range[0] and ain[i]<mass_range[1])\
or (ain[i]==mass_range[0] or ain[i]==mass_range[1]):
tmpA.append(nin[i])
tmpZ.append(zin[i])
tmpIsom.append(isom[i])
tmpyps.append(yin[i])
zin=tmpZ
nin=tmpA
yin=tmpyps
isom=tmpIsom
else:
raise IOError("This method, abu_chart, is not supported by this class")
# in case we call from ipython -pylab, turn interactive on at end again
turnoff=False
if not show:
try:
ioff()
turnoff=True
except NameError:
turnoff=False
nnmax = int(max(nin))+1
nzmax = int(max(zin))+1
nzycheck = zeros([nnmax,nzmax,3])
for i in range(len(nin)):
if isom[i]==1:
ni = int(nin[i])
zi = int(zin[i])
nzycheck[ni,zi,0] = 1
nzycheck[ni,zi,1] = yin[i]
#######################################################################
# elemental names: elname(i) is the name of element with Z=i
elname=self.elements_names
#### create plot
## define axis and plot style (colormap, size, fontsize etc.)
if plotaxis==[0,0,0,0]:
xdim=10
ydim=6
else:
dx = plotaxis[1]-plotaxis[0]
dy = plotaxis[3]-plotaxis[2]
ydim = 6
xdim = ydim*dx/dy
params = {'axes.labelsize': 12,
'text.fontsize': 12,
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': True}
#pl.rcParams.update(params) #May cause Error, someting to do with tex
if mov:
fig=drawfig
fig.set_size_inches(xdim,ydim)
artists=[]
else:
fig=pl.figure(ifig,figsize=(xdim,ydim),dpi=100)
axx = 0.10
axy = 0.10
axw = 0.85
axh = 0.8
if mov:
ax=drawax
else:
ax=pl.axes([axx,axy,axw,axh])
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(5)
ax.xaxis.set_major_locator(xmajorlocator)
ax.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(1)
ymajorlocator = MultipleLocator(5)
ax.yaxis.set_major_locator(ymajorlocator)
ax.yaxis.set_minor_locator(yminorlocator)
# color map choice for abundances
cmapa = cm.get_cmap(name=color_map)
# color map choice for arrows
cmapr = cm.autumn
# if a value is below the lower limit its set to white
cmapa.set_under(color='w')
cmapr.set_under(color='w')
# set value range for abundance colors (log10(Y))
norma = colors.Normalize(vmin=lbound[0],vmax=lbound[1])
# set x- and y-axis scale aspect ratio to 1
ax.set_aspect('equal')
#print time,temp and density on top
temp = ' '#'%8.3e' %ff['temp']
time = ' '#'%8.3e' %ff['time']
dens = ' '#'%8.3e' %ff['dens']
#May cause Error, someting to do with tex
'''
#box1 = TextArea("t : " + time + " s~~/~~T$_{9}$ : " + temp + "~~/~~$\\rho_{b}$ : " \
# + dens + ' g/cm$^{3}$', textprops=dict(color="k"))
anchored_box = AnchoredOffsetbox(loc=3,
child=box1, pad=0.,
frameon=False,
bbox_to_anchor=(0., 1.02),
bbox_transform=ax.transAxes,
borderpad=0.,
)
ax.add_artist(anchored_box)
'''
## Colour bar plotted
patches = []
color = []
for i in range(nzmax):
for j in range(nnmax):
if nzycheck[j,i,0]==1:
xy = j-0.5,i-0.5
rect = Rectangle(xy,1,1,)
# abundance
yab = nzycheck[j,i,1]
if yab == 0:
yab=1e-99
col =log10(yab)
patches.append(rect)
color.append(col)
p = PatchCollection(patches, cmap=cmapa, norm=norma)
p.set_array(array(color))
p.set_zorder(1)
if mov:
artist1=ax.add_collection(p)
artists.append(artist1)
else:
ax.add_collection(p)
if not mov:
cb = pl.colorbar(p)
# colorbar label
cb.set_label('log$_{10}$(X)')
# plot file name
graphname = 'abundance-chart'+str(cycle)
# Add black frames for stable isotopes
if boxstable:
for i in range(len(self.stable_el)):
if i == 0:
continue
tmp = self.stable_el[i]
try:
zz= self.elements_names.index(tmp[0]) #charge
except:
continue
for j in range(len(tmp)):
if j == 0:
continue
nn = int(tmp[j]) #atomic mass
nn=nn-zz
xy = nn-0.5,zz-0.5
rect = Rectangle(xy,1,1,ec='k',fc='None',fill='False',lw=3.)
rect.set_zorder(2)
ax.add_patch(rect)
# decide which array to take for label positions
iarr = 0
# plot element labels
if ilabel:
for z in range(nzmax):
try:
nmin = min(argwhere(nzycheck[:,z,iarr]))[0]-1
ax.text(nmin,z,elname[z],horizontalalignment='center',verticalalignment='center',\
fontsize='x-small',clip_on=True)
except ValueError:
continue
# plot mass numbers
if imlabel:
for z in range(nzmax):
for n in range(nnmax):
a = z+n
if nzycheck[n,z,iarr]==1:
ax.text(n,z,a,horizontalalignment='center',verticalalignment='center',\
fontsize=imlabel_fontsize,clip_on=True)
# plot lines at magic numbers
if imagic:
ixymagic=[2, 8, 20, 28, 50, 82, 126]
nmagic = len(ixymagic)
for magic in ixymagic:
if magic<=nzmax:
try:
xnmin = min(argwhere(nzycheck[:,magic,iarr]))[0]
xnmax = max(argwhere(nzycheck[:,magic,iarr]))[0]
line = ax.plot([xnmin,xnmax],[magic,magic],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
if magic<=nnmax:
try:
yzmin = min(argwhere(nzycheck[magic,:,iarr]))[0]
yzmax = max(argwhere(nzycheck[magic,:,iarr]))[0]
line = ax.plot([magic,magic],[yzmin,yzmax],lw=3.,color='r',ls='-')
except ValueError:
dummy=0
# set axis limits
if plotaxis==[0,0,0,0]:
xmax=max(nin)
ymax=max(zin)
ax.axis([-0.5,xmax+0.5,-0.5,ymax+0.5])
else:
ax.axis(plotaxis)
# set x- and y-axis label
ax.set_xlabel('neutron number (A-Z)')
ax.set_ylabel('proton number Z')
if not mov:
pl.title('Isotopic Chart for cycle '+str(int(cycle)))
if savefig:
if path is not None:
graphname = os.path.join(path, graphname)
fig.savefig(graphname)
print(graphname,'is done')
if show:
pl.show()
if turnoff:
ion()
if mov:
return p,artists
else:
return
|
Plots an abundance chart
Parameters
----------
cycle : string, integer or list
The cycle we are looking in. If it is a list of cycles,
this method will then do a plot for each of these cycles
and save them all to a file.
mass_range : list, optional
A 1x2 array containing the lower and upper mass range. If
this is an instance of abu_vector this will only plot
isotopes that have an atomic mass within this range. This
will throw an error if this range does not make sence ie
[45,2] if None, it will plot over the entire range. The
default is None.
ilabel : boolean, optional
Elemental labels off/on. The default is True.
imlabel : boolean, optional
Label for isotopic masses off/on. The default is True.
imlabel_fontsize : integer, optional
Fontsize for isotopic mass labels. The default is 8.
imagic : boolean, optional
Turn lines for magic numbers off/on. The default is False.
boxstable : boolean, optional
Plot the black boxes around the stable elements. The
defaults is True.
lbound : tuple, optional
Boundaries for colour spectrum ploted. The default is
(-12,0).
plotaxis : list, optional
Set axis limit. If [0, 0, 0, 0] the complete range in (N,Z)
will be plotted. It equates to [xMin, xMax, Ymin, Ymax].
The default is [0, 0, 0, 0].
show : boolean, optional
Boolean of if the plot should be displayed. Useful with
saving multiple plots using abu_chartMulti. The default is
True.
color_map : string, optional
Color map according to choices in matplotlib
(e.g. www.scipy.org/Cookbook/Matplotlib/Show_colormaps).
The default is 'jet'.
ifig : integer, optional
Figure number, if ifig is None it wiil be set to the cycle
number. The defaults is None.
savefig : boolean, optional
Whether or not to save the figure.
The default is False
drawfig, drawax, mov : optional, not necessary for user to set these variables
The figure and axes containers to be drawn on, and whether or not a movie is
being made (only True when se.movie is called, which sets mov to True
automatically
path: path where to save figure
|
def linkage_group_ordering(linkage_records):
"""Convert degenerate linkage records into ordered info_frags-like records
for comparison purposes.
Simple example:
>>> linkage_records = [
... ['linkage_group_1', 31842, 94039, 'sctg_207'],
... ['linkage_group_1', 95303, 95303, 'sctg_207'],
... ['linkage_group_2', 15892, 25865, 'sctg_308'],
... ['linkage_group_2', 41893, 41893, 'sctg_486'],
... ['linkage_group_3', 36614, 50582, 'sctg_516'],
... ]
>>> ordering = linkage_group_ordering(linkage_records)
Each key of the record is a newly-formed 'scaffold' (linkage group):
>>> sorted(ordering.keys())
['linkage_group_1', 'linkage_group_2', 'linkage_group_3']
Records are in the form [init_contig, frag_id, start, end, orientation].
Since fragment ids are meaningless in non-HiC contexts a negative
identifier is set so it is understood that region was added due to
linkage data (-1 is for recovering data after first-pass polishing and -2
is for sequence insertions after long read based polishing).
>>> ordering['linkage_group_1']
[['sctg_207', -3, 31842, 94039, 1], ['sctg_207', -3, 95303, 95303, 1]]
>>> ordering['linkage_group_2']
[['sctg_308', -3, 15892, 25865, 1], ['sctg_486', -3, 41893, 41893, 1]]
Orientations are always set to 1 by default.
>>> ordering['linkage_group_3']
[['sctg_516', -3, 36614, 50582, 1]]
"""
new_records = dict()
for lg_name, linkage_group in itertools.groupby(
linkage_records, operator.itemgetter(0)
):
new_records[lg_name] = []
for record in linkage_group:
init_contig = record[-1]
start = record[1]
end = record[2]
new_record = [init_contig, -3, start, end, 1]
new_records[lg_name].append(new_record)
return new_records
|
Convert degenerate linkage records into ordered info_frags-like records
for comparison purposes.
Simple example:
>>> linkage_records = [
... ['linkage_group_1', 31842, 94039, 'sctg_207'],
... ['linkage_group_1', 95303, 95303, 'sctg_207'],
... ['linkage_group_2', 15892, 25865, 'sctg_308'],
... ['linkage_group_2', 41893, 41893, 'sctg_486'],
... ['linkage_group_3', 36614, 50582, 'sctg_516'],
... ]
>>> ordering = linkage_group_ordering(linkage_records)
Each key of the record is a newly-formed 'scaffold' (linkage group):
>>> sorted(ordering.keys())
['linkage_group_1', 'linkage_group_2', 'linkage_group_3']
Records are in the form [init_contig, frag_id, start, end, orientation].
Since fragment ids are meaningless in non-HiC contexts a negative
identifier is set so it is understood that region was added due to
linkage data (-1 is for recovering data after first-pass polishing and -2
is for sequence insertions after long read based polishing).
>>> ordering['linkage_group_1']
[['sctg_207', -3, 31842, 94039, 1], ['sctg_207', -3, 95303, 95303, 1]]
>>> ordering['linkage_group_2']
[['sctg_308', -3, 15892, 25865, 1], ['sctg_486', -3, 41893, 41893, 1]]
Orientations are always set to 1 by default.
>>> ordering['linkage_group_3']
[['sctg_516', -3, 36614, 50582, 1]]
|
def _language_to_voice_code(self, language):
"""
Translate a language value to a voice code.
If you want to mock support for a language
by using a voice for a similar language,
please add it to the ``LANGUAGE_TO_VOICE_CODE`` dictionary.
:param language: the requested language
:type language: :class:`~aeneas.language.Language`
:rtype: string
"""
voice_code = self.rconf[RuntimeConfiguration.TTS_VOICE_CODE]
if voice_code is None:
try:
voice_code = self.LANGUAGE_TO_VOICE_CODE[language]
except KeyError as exc:
self.log_exc(u"Language code '%s' not found in LANGUAGE_TO_VOICE_CODE" % (language), exc, False, None)
self.log_warn(u"Using the language code as the voice code")
voice_code = language
else:
self.log(u"TTS voice override in rconf")
self.log([u"Language to voice code: '%s' => '%s'", language, voice_code])
return voice_code
|
Translate a language value to a voice code.
If you want to mock support for a language
by using a voice for a similar language,
please add it to the ``LANGUAGE_TO_VOICE_CODE`` dictionary.
:param language: the requested language
:type language: :class:`~aeneas.language.Language`
:rtype: string
|
def get_memory_usage(self):
"""
Get data about the virtual memory usage of the holder.
:returns: Memory usage data
:rtype: dict
Example:
>>> holder.get_memory_usage()
>>> {
>>> 'nb_arrays': 12, # The holder contains the variable values for 12 different periods
>>> 'nb_cells_by_array': 100, # There are 100 entities (e.g. persons) in our simulation
>>> 'cell_size': 8, # Each value takes 8B of memory
>>> 'dtype': dtype('float64') # Each value is a float 64
>>> 'total_nb_bytes': 10400 # The holder uses 10.4kB of virtual memory
>>> 'nb_requests': 24 # The variable has been computed 24 times
>>> 'nb_requests_by_array': 2 # Each array stored has been on average requested twice
>>> }
"""
usage = dict(
nb_cells_by_array = self.population.count,
dtype = self.variable.dtype,
)
usage.update(self._memory_storage.get_memory_usage())
if self.simulation.trace:
usage_stats = self.simulation.tracer.usage_stats[self.variable.name]
usage.update(dict(
nb_requests = usage_stats['nb_requests'],
nb_requests_by_array = usage_stats['nb_requests'] / float(usage['nb_arrays']) if usage['nb_arrays'] > 0 else np.nan
))
return usage
|
Get data about the virtual memory usage of the holder.
:returns: Memory usage data
:rtype: dict
Example:
>>> holder.get_memory_usage()
>>> {
>>> 'nb_arrays': 12, # The holder contains the variable values for 12 different periods
>>> 'nb_cells_by_array': 100, # There are 100 entities (e.g. persons) in our simulation
>>> 'cell_size': 8, # Each value takes 8B of memory
>>> 'dtype': dtype('float64') # Each value is a float 64
>>> 'total_nb_bytes': 10400 # The holder uses 10.4kB of virtual memory
>>> 'nb_requests': 24 # The variable has been computed 24 times
>>> 'nb_requests_by_array': 2 # Each array stored has been on average requested twice
>>> }
|
def dict_value_hint(key, mapper=None):
"""Returns a function that takes a dictionary and returns value of
particular key. The returned value can be optionally processed by `mapper`
function.
To be used as a type hint in :class:`OneOf`.
"""
if mapper is None:
mapper = identity
def hinter(data):
return mapper(data.get(key))
return hinter
|
Returns a function that takes a dictionary and returns value of
particular key. The returned value can be optionally processed by `mapper`
function.
To be used as a type hint in :class:`OneOf`.
|
def check_missing_references(client):
"""Find missing references."""
from renku.models.refs import LinkReference
missing = [
ref for ref in LinkReference.iter_items(client)
if not ref.reference.exists()
]
if not missing:
return True
click.secho(
WARNING + 'There are missing references.'
'\n (use "git rm <name>" to clean them)\n\n\t' + '\n\t '.join(
click.style(str(ref.path), fg='yellow') + ' -> ' +
click.style(str(ref.reference), fg='red') for ref in missing
) + '\n'
)
return False
|
Find missing references.
|
def sel_entries(self):
"""Generator which returns all SEL entries."""
ENTIRE_RECORD = 0xff
rsp = self.send_message_with_name('GetSelInfo')
if rsp.entries == 0:
return
reservation_id = self.get_sel_reservation_id()
next_record_id = 0
while True:
req = create_request_by_name('GetSelEntry')
req.reservation_id = reservation_id
req.record_id = next_record_id
req.offset = 0
self.max_req_len = ENTIRE_RECORD
record_data = ByteBuffer()
while True:
req.length = self.max_req_len
if (self.max_req_len != 0xff
and (req.offset + req.length) > 16):
req.length = 16 - req.offset
rsp = self.send_message(req)
if rsp.completion_code == constants.CC_CANT_RET_NUM_REQ_BYTES:
if self.max_req_len == 0xff:
self.max_req_len = 16
else:
self.max_req_len -= 1
continue
else:
check_completion_code(rsp.completion_code)
record_data.extend(rsp.record_data)
req.offset = len(record_data)
if len(record_data) >= 16:
break
next_record_id = rsp.next_record_id
yield SelEntry(record_data)
if next_record_id == 0xffff:
break
|
Generator which returns all SEL entries.
|
def options(self, request, *args, **kwargs):
"""
Implements a OPTIONS HTTP method function returning all allowed HTTP
methods.
"""
allow = []
for method in self.http_method_names:
if hasattr(self, method):
allow.append(method.upper())
r = self.render_to_response(None)
r['Allow'] = ','.join(allow)
return r
|
Implements a OPTIONS HTTP method function returning all allowed HTTP
methods.
|
def location(self):
"""
Returns a ``string`` constant to indicate whether the game was played
at the team's home venue, the opponent's venue, or at a neutral site.
"""
if self._location == '':
return HOME
if self._location == 'N':
return NEUTRAL
if self._location == '@':
return AWAY
|
Returns a ``string`` constant to indicate whether the game was played
at the team's home venue, the opponent's venue, or at a neutral site.
|
def save(self, path):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine " + host + "\n\tlogin " \
+ six.text_type(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + six.text_type(attrs[1])
rep = rep + "\tpassword " + six.text_type(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
f = open(path, 'w')
f.write(rep)
f.close()
|
Dump the class data in the format of a .netrc file.
|
def write_taxon_info(taxon, include_anc, output):
"""Writes out data from `taxon` to the `output` stream to demonstrate
the attributes of a taxon object.
(currently some lines are commented out until the web-services call returns more info. See:
https://github.com/OpenTreeOfLife/taxomachine/issues/85
).
If `include_anc` is True, then ancestor information was requested (so a None parent is only
expected at the root of the tree)
"""
output.write('Taxon info for OTT ID (ot:ottId) = {}\n'.format(taxon.ott_id))
output.write(' name (ot:ottTaxonName) = "{}"\n'.format(taxon.name))
if taxon.synonyms:
output.write(' known synonyms: "{}"\n'.format('", "'.join(taxon.synonyms)))
else:
output.write(' known synonyms: \n')
output.write(' OTT flags for this taxon: {}\n'.format(taxon.flags))
output.write(' The taxonomic rank associated with this name is: {}\n'.format(taxon.rank))
output.write(
' The (unstable) node ID in the current taxomachine instance is: {}\n'.format(taxon.taxomachine_node_id))
if include_anc:
if taxon.parent is not None:
output.write('Taxon {c} is a child of {p}.\n'.format(c=taxon.ott_id, p=taxon.parent.ott_id))
write_taxon_info(taxon.parent, True, output)
else:
output.write('Taxon {c} is the root of the taxonomy.'.format(c=taxon.ott_id))
|
Writes out data from `taxon` to the `output` stream to demonstrate
the attributes of a taxon object.
(currently some lines are commented out until the web-services call returns more info. See:
https://github.com/OpenTreeOfLife/taxomachine/issues/85
).
If `include_anc` is True, then ancestor information was requested (so a None parent is only
expected at the root of the tree)
|
def cached(fun):
"""
memoizing decorator for linkage functions.
Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because,
the way this is coded (interchangingly using sets and frozensets) is true
for this specific case. For other cases that is not necessarily guaranteed.
"""
_cache = {}
@wraps(fun)
def newfun(a, b, distance_function):
frozen_a = frozenset(a)
frozen_b = frozenset(b)
if (frozen_a, frozen_b) not in _cache:
result = fun(a, b, distance_function)
_cache[(frozen_a, frozen_b)] = result
return _cache[(frozen_a, frozen_b)]
return newfun
|
memoizing decorator for linkage functions.
Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because,
the way this is coded (interchangingly using sets and frozensets) is true
for this specific case. For other cases that is not necessarily guaranteed.
|
def from_requirement(cls, req, changes=None):
""" Create an instance from :class:`pkg_resources.Requirement` instance """
return cls(req.project_name, req.specs and ''.join(req.specs[0]) or '', changes=changes)
|
Create an instance from :class:`pkg_resources.Requirement` instance
|
def toggle_show_cd_only(self, checked):
"""Toggle show current directory only mode"""
self.parent_widget.sig_option_changed.emit('show_cd_only', checked)
self.show_cd_only = checked
if checked:
if self.__last_folder is not None:
self.set_current_folder(self.__last_folder)
elif self.__original_root_index is not None:
self.setRootIndex(self.__original_root_index)
|
Toggle show current directory only mode
|
def __add_bank(self, account_id, **kwargs):
"""Call documentation: `/account/add_bank
<https://www.wepay.com/developer/reference/account-2011-01-15#add_bank>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'.
"""
params = {
'account_id': account_id
}
return self.make_call(self.__add_bank, params, kwargs)
|
Call documentation: `/account/add_bank
<https://www.wepay.com/developer/reference/account-2011-01-15#add_bank>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. warning ::
This call is depricated as of API version '2014-01-08'.
|
def any_ends_with(self, string_list, pattern):
"""Returns true iff one of the strings in string_list ends in
pattern."""
try:
s_base = basestring
except:
s_base = str
is_string = isinstance(pattern, s_base)
if not is_string:
return False
for s in string_list:
if pattern.endswith(s):
return True
return False
|
Returns true iff one of the strings in string_list ends in
pattern.
|
def incr_obj(obj, **attrs):
"""Increments context variables
"""
for name, value in attrs.iteritems():
v = getattr(obj, name, None)
if not hasattr(obj, name) or v is None:
v = 0
setattr(obj, name, v + value)
|
Increments context variables
|
def _merge_wf_inputs(new, out, wf_outputs, to_ignore, parallel, nested_inputs):
"""Merge inputs for a sub-workflow, adding any not present inputs in out.
Skips inputs that are internally generated or generated and ignored, keeping
only as inputs those that we do not generate internally.
"""
internal_generated_ids = []
for vignore in to_ignore:
vignore_id = _get_string_vid(vignore)
# ignore anything we generate internally, but not those we need to pull in
# from the external process
if vignore_id not in [v["id"] for v in wf_outputs]:
internal_generated_ids.append(vignore_id)
ignore_ids = set(internal_generated_ids + [v["id"] for v in wf_outputs])
cur_ids = set([v["id"] for v in out])
remapped_new = []
for v in new:
remapped_v = copy.deepcopy(v)
outv = copy.deepcopy(v)
outv["id"] = get_base_id(v["id"])
outv["source"] = v["id"]
if outv["id"] not in cur_ids and outv["id"] not in ignore_ids:
if nested_inputs and v["id"] in nested_inputs:
outv = _flatten_nested_input(outv)
out.append(outv)
if remapped_v["id"] in set([v["source"] for v in out]):
remapped_v["source"] = get_base_id(remapped_v["id"])
remapped_new.append(remapped_v)
return out, remapped_new
|
Merge inputs for a sub-workflow, adding any not present inputs in out.
Skips inputs that are internally generated or generated and ignored, keeping
only as inputs those that we do not generate internally.
|
def update(self, device_json=None, info_json=None, settings_json=None,
avatar_json=None):
"""Update the internal device json data."""
if device_json:
UTILS.update(self._device_json, device_json)
if avatar_json:
UTILS.update(self._avatar_json, avatar_json)
if info_json:
UTILS.update(self._info_json, info_json)
if settings_json:
UTILS.update(self._settings_json, settings_json)
|
Update the internal device json data.
|
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
:param sc: SparkContext used to create the RDD.
:param shape: Shape (> 0) of the Gamma distribution
:param scale: Scale (> 0) of the Gamma distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
|
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
:param sc: SparkContext used to create the RDD.
:param shape: Shape (> 0) of the Gamma distribution
:param scale: Scale (> 0) of the Gamma distribution
:param numRows: Number of Vectors in the RDD.
:param numCols: Number of elements in each Vector.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
|
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Check if the token has permission on those realms."""
log.debug('Validate realms %r for %r', realms, client_key)
if request.access_token:
tok = request.access_token
else:
tok = self._tokengetter(client_key=client_key, token=token)
request.access_token = tok
if not tok:
return False
return set(tok.realms).issuperset(set(realms))
|
Check if the token has permission on those realms.
|
def build(self, builder):
"""
Build this element
:param builder:
:return:
"""
builder.start(self.__class__.__name__)
builder.data(self.country_code)
builder.end(self.__class__.__name__)
|
Build this element
:param builder:
:return:
|
def reboot(name, call=None):
'''
Reboot a vagrant minion.
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The reboot action must be called with -a or --action.'
)
my_info = _get_my_info(name)
profile_name = my_info[name]['profile']
profile = __opts__['profiles'][profile_name]
host = profile['host']
local = salt.client.LocalClient()
return local.cmd(host, 'vagrant.reboot', [name])
|
Reboot a vagrant minion.
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
|
def _system_path(self, subdir, basename=''):
'''
Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
'''
try:
return self._file_path(os.path.join(self.system_dir, subdir), basename)
except KeyboardInterrupt as e:
raise e
except Exception:
return None
|
Gets the full path to the 'subdir/basename' file in the system binwalk directory.
@subdir - Subdirectory inside the system binwalk directory.
@basename - File name inside the subdirectory.
Returns the full path to the 'subdir/basename' file.
|
def _is_cp_helper(self, choi, atol, rtol):
"""Test if a channel is completely-positive (CP)"""
if atol is None:
atol = self._atol
if rtol is None:
rtol = self._rtol
return is_positive_semidefinite_matrix(choi, rtol=rtol, atol=atol)
|
Test if a channel is completely-positive (CP)
|
def stream_list(self, id, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Stream events for the current user, restricted to accounts on the given
list.
"""
id = self.__unpack_id(id)
return self.__stream("/api/v1/streaming/list?list={}".format(id), listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec)
|
Stream events for the current user, restricted to accounts on the given
list.
|
def dimensioned_streams(dmap):
"""
Given a DynamicMap return all streams that have any dimensioned
parameters i.e parameters also listed in the key dimensions.
"""
dimensioned = []
for stream in dmap.streams:
stream_params = stream_parameters([stream])
if set([str(k) for k in dmap.kdims]) & set(stream_params):
dimensioned.append(stream)
return dimensioned
|
Given a DynamicMap return all streams that have any dimensioned
parameters i.e parameters also listed in the key dimensions.
|
def List(self, listName, exclude_hidden_fields=False):
"""Sharepoint Lists Web Service
Microsoft Developer Network:
The Lists Web service provides methods for working
with SharePoint lists, content types, list items, and files.
"""
return _List(self._session, listName, self._url, self._verify_ssl, self.users, self.huge_tree, self.timeout, exclude_hidden_fields=exclude_hidden_fields)
|
Sharepoint Lists Web Service
Microsoft Developer Network:
The Lists Web service provides methods for working
with SharePoint lists, content types, list items, and files.
|
def plot_chmap(cube, kidid, ax=None, **kwargs):
"""Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
"""
if ax is None:
ax = plt.gca()
index = np.where(cube.kidid == kidid)[0]
if len(index) == 0:
raise KeyError('Such a kidid does not exist.')
index = int(index)
im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('intensity map ch #{}'.format(kidid))
return im
|
Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
|
def curtailment(network, carrier='solar', filename=None):
"""
Plot curtailment of selected carrier
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
carrier: str
Plot curtailemt of this carrier
filename: str or None
Save figure in this direction
Returns
-------
Plot
"""
p_by_carrier = network.generators_t.p.groupby\
(network.generators.carrier, axis=1).sum()
capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"]
p_available = network.generators_t.p_max_pu.multiply(
network.generators["p_nom"])
p_available_by_carrier = p_available.groupby(
network.generators.carrier, axis=1).sum()
p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier
print(p_curtailed_by_carrier.sum())
p_df = pd.DataFrame({carrier +
" available": p_available_by_carrier[carrier],
carrier +
" dispatched": p_by_carrier[carrier], carrier +
" curtailed": p_curtailed_by_carrier[carrier]})
p_df[carrier + " capacity"] = capacity
p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.] = 0.
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 6)
p_df[[carrier + " dispatched", carrier + " curtailed"]
].plot(kind="area", ax=ax, linewidth=3)
p_df[[carrier + " available", carrier + " capacity"]
].plot(ax=ax, linewidth=3)
ax.set_xlabel("")
ax.set_ylabel("Power [MW]")
ax.set_ylim([0, capacity * 1.1])
ax.legend()
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.close()
|
Plot curtailment of selected carrier
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
carrier: str
Plot curtailemt of this carrier
filename: str or None
Save figure in this direction
Returns
-------
Plot
|
def on_click(self, button, **kwargs):
"""
Maps a click event with its associated callback.
Currently implemented events are:
============ ================ =========
Event Callback setting Button ID
============ ================ =========
Left click on_leftclick 1
Middle click on_middleclick 2
Right click on_rightclick 3
Scroll up on_upscroll 4
Scroll down on_downscroll 5
Others on_otherclick > 5
============ ================ =========
The action is determined by the nature (type and value) of the callback
setting in the following order:
1. If null callback (``None``), no action is taken.
2. If it's a `python function`, call it and pass any additional
arguments.
3. If it's name of a `member method` of current module (string), call
it and pass any additional arguments.
4. If the name does not match with `member method` name execute program
with such name.
.. seealso:: :ref:`callbacks` for more information about
callback settings and examples.
:param button: The ID of button event received from i3bar.
:param kwargs: Further information received from i3bar like the
positions of the mouse where the click occured.
:return: Returns ``True`` if a valid callback action was executed.
``False`` otherwise.
"""
actions = ['leftclick', 'middleclick', 'rightclick',
'upscroll', 'downscroll']
try:
action = actions[button - 1]
except (TypeError, IndexError):
self.__log_button_event(button, None, None, "Other button")
action = "otherclick"
m_click = self.__multi_click
with m_click.lock:
double = m_click.check_double(button)
double_action = 'double%s' % action
if double:
action = double_action
# Get callback function
cb = getattr(self, 'on_%s' % action, None)
double_handler = getattr(self, 'on_%s' % double_action, None)
delay_execution = (not double and double_handler)
if delay_execution:
m_click.set_timer(button, cb, **kwargs)
else:
self.__button_callback_handler(button, cb, **kwargs)
|
Maps a click event with its associated callback.
Currently implemented events are:
============ ================ =========
Event Callback setting Button ID
============ ================ =========
Left click on_leftclick 1
Middle click on_middleclick 2
Right click on_rightclick 3
Scroll up on_upscroll 4
Scroll down on_downscroll 5
Others on_otherclick > 5
============ ================ =========
The action is determined by the nature (type and value) of the callback
setting in the following order:
1. If null callback (``None``), no action is taken.
2. If it's a `python function`, call it and pass any additional
arguments.
3. If it's name of a `member method` of current module (string), call
it and pass any additional arguments.
4. If the name does not match with `member method` name execute program
with such name.
.. seealso:: :ref:`callbacks` for more information about
callback settings and examples.
:param button: The ID of button event received from i3bar.
:param kwargs: Further information received from i3bar like the
positions of the mouse where the click occured.
:return: Returns ``True`` if a valid callback action was executed.
``False`` otherwise.
|
def get_template_id(self, template_id_short):
"""
获得模板ID
详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html
:param template_id_short: 模板库中模板的编号,有“TM**”和“OPENTMTM**”等形式
:return: 返回的 JSON 数据包
"""
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/template/api_add_template',
data={
'template_id_short': str(template_id_short),
}
)
|
获得模板ID
详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html
:param template_id_short: 模板库中模板的编号,有“TM**”和“OPENTMTM**”等形式
:return: 返回的 JSON 数据包
|
def p_objectitem_0(self, p):
'''
objectitem : objectkey EQUAL number
| objectkey EQUAL BOOL
| objectkey EQUAL STRING
| objectkey EQUAL object
| objectkey EQUAL list
'''
if DEBUG:
self.print_p(p)
p[0] = (p[1], p[3])
|
objectitem : objectkey EQUAL number
| objectkey EQUAL BOOL
| objectkey EQUAL STRING
| objectkey EQUAL object
| objectkey EQUAL list
|
def apply_boundary_conditions_to_cm(external_indices, cm):
"""Remove connections to or from external nodes."""
cm = cm.copy()
cm[external_indices, :] = 0 # Zero-out row
cm[:, external_indices] = 0 # Zero-out columnt
return cm
|
Remove connections to or from external nodes.
|
def act(self, action):
"""
Take one action for one step
"""
# FIXME: Hack to change in return type
action = int(action)
assert isinstance(action, int)
assert action < self.actions_num, "%r (%s) invalid"%(action, type(action))
# Reset buttons
for k in self.world_layer.buttons:
self.world_layer.buttons[k] = 0
# Apply each button defined in action config
for key in self.world_layer.player.controls[action]:
if key in self.world_layer.buttons:
self.world_layer.buttons[key] = 1
# Act in the environment
self.step()
observation = self.world_layer.get_state()
reward = self.world_layer.player.get_reward()
terminal = self.world_layer.player.game_over
info = {}
return observation, reward, terminal, info
|
Take one action for one step
|
def translate_output_properties(res: 'Resource', output: Any) -> Any:
"""
Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification.
"""
if isinstance(output, dict):
return {res.translate_output_property(k): translate_output_properties(res, v) for k, v in output.items()}
if isinstance(output, list):
return [translate_output_properties(res, v) for v in output]
return output
|
Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification.
|
def create_single_dialog_train_example(context_dialog_path, candidate_dialog_paths, rng, positive_probability,
minimum_context_length=2, max_context_length=20):
"""
Creates a single example for training set.
:param context_dialog_path:
:param candidate_dialog_paths:
:param rng:
:param positive_probability:
:return:
"""
dialog = translate_dialog_to_lists(context_dialog_path)
context_str, next_utterance_ix = create_random_context(dialog, rng,
minimum_context_length=minimum_context_length,
max_context_length=max_context_length)
if positive_probability > rng.random():
# use the next utterance as positive example
response = singe_user_utterances_to_string(dialog[next_utterance_ix])
label = 1.0
else:
response = get_random_utterances_from_corpus(candidate_dialog_paths, rng, 1,
min_turn=minimum_context_length + 1,
max_turn=max_context_length)[0]
label = 0.0
return context_str, response, label
|
Creates a single example for training set.
:param context_dialog_path:
:param candidate_dialog_paths:
:param rng:
:param positive_probability:
:return:
|
def linkify_s_by_sd(self, services):
"""Add dependency in service objects
:return: None
"""
for servicedep in self:
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string", "undefined")
setattr(servicedep, "dependent_service_description_string", "undefined")
if getattr(servicedep, 'service_description', None) is None or\
getattr(servicedep, 'dependent_service_description', None) is None:
continue
services.add_act_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.notification_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
services.add_chk_dependency(servicedep.dependent_service_description,
servicedep.service_description,
servicedep.execution_failure_criteria,
getattr(servicedep, 'dependency_period', ''),
servicedep.inherits_parent)
# Only used for debugging purpose when loops are detected
setattr(servicedep, "service_description_string",
services[servicedep.service_description].get_name())
setattr(servicedep, "dependent_service_description_string",
services[servicedep.dependent_service_description].get_name())
|
Add dependency in service objects
:return: None
|
def _discover_refs(self, remote=False):
"""Get the current list of local or remote refs."""
if remote:
cmd_refs = ['git', 'ls-remote', '-h', '-t', '--exit-code', 'origin']
sep = '\t'
ignored_error_codes = [2]
else:
# Check first whether the local repo is empty;
# Running 'show-ref' in empty repos gives an error
if self.is_empty():
raise EmptyRepositoryError(repository=self.uri)
cmd_refs = ['git', 'show-ref', '--heads', '--tags']
sep = ' '
ignored_error_codes = [1]
# Error codes returned when no matching refs (i.e, no heads
# or tags) are found in a repository will be ignored. Otherwise,
# the full process would fail for those situations.
outs = self._exec(cmd_refs, cwd=self.dirpath,
env=self.gitenv,
ignored_error_codes=ignored_error_codes)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
outs = outs.split('\n') if outs else []
refs = []
for line in outs:
data = line.split(sep)
ref = GitRef(data[0], data[1])
refs.append(ref)
return refs
|
Get the current list of local or remote refs.
|
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
"""
return Callable(self.project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc)
|
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
|
def _parse_raw_data(self):
"""
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
"""
if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
if self._raw[0] == self._START_OF_FRAME:
self._raw.pop(0)
eof_index = self._raw.index(self._END_OF_FRAME)
raw_message = self._raw[:eof_index]
self._raw = self._raw[eof_index:]
logger.debug('raw message: {}'.format(raw_message))
message = self._remove_esc_chars(raw_message)
logger.debug('message with checksum: {}'.format(message))
expected_checksum = (message[-1] << 8) | message[-2]
logger.debug('checksum: {}'.format(expected_checksum))
message = message[:-2] # checksum bytes
logger.debug('message: {}'.format(message))
sum1, sum2 = self._fletcher16_checksum(message)
calculated_checksum = (sum2 << 8) | sum1
if expected_checksum == calculated_checksum:
message = message[2:] # remove length
logger.debug('valid message received: {}'.format(message))
self._messages.append(message)
else:
logger.warning('invalid message received: {}, discarding'.format(message))
logger.debug('expected checksum: {}, calculated checksum: {}'.format(expected_checksum, calculated_checksum))
# remove any extra bytes at the beginning
try:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
except IndexError:
pass
|
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
|
def update(name,
password=None,
fullname=None,
description=None,
home=None,
homedrive=None,
logonscript=None,
profile=None,
expiration_date=None,
expired=None,
account_disabled=None,
unlock_account=None,
password_never_expires=None,
disallow_change_password=None):
# pylint: disable=anomalous-backslash-in-string
'''
Updates settings for the windows user. Name is the only required parameter.
Settings will only be changed if the parameter is passed a value.
.. versionadded:: 2015.8.0
Args:
name (str): The user name to update.
password (str, optional): New user password in plain text.
fullname (str, optional): The user's full name.
description (str, optional): A brief description of the user account.
home (str, optional): The path to the user's home directory.
homedrive (str, optional): The drive letter to assign to the home
directory. Must be the Drive Letter followed by a colon. ie: U:
logonscript (str, optional): The path to the logon script.
profile (str, optional): The path to the user's profile directory.
expiration_date (date, optional): The date and time when the account
expires. Can be a valid date/time string. To set to never expire
pass the string 'Never'.
expired (bool, optional): Pass `True` to expire the account. The user
will be prompted to change their password at the next logon. Pass
`False` to mark the account as 'not expired'. You can't use this to
negate the expiration if the expiration was caused by the account
expiring. You'll have to change the `expiration_date` as well.
account_disabled (bool, optional): True disables the account. False
enables the account.
unlock_account (bool, optional): True unlocks a locked user account.
False is ignored.
password_never_expires (bool, optional): True sets the password to never
expire. False allows the password to expire.
disallow_change_password (bool, optional): True blocks the user from
changing the password. False allows the user to change the password.
Returns:
bool: True if successful. False is unsuccessful.
CLI Example:
.. code-block:: bash
salt '*' user.update bob password=secret profile=C:\\Users\\Bob
home=\\server\homeshare\bob homedrive=U:
'''
# pylint: enable=anomalous-backslash-in-string
if six.PY2:
name = _to_unicode(name)
password = _to_unicode(password)
fullname = _to_unicode(fullname)
description = _to_unicode(description)
home = _to_unicode(home)
homedrive = _to_unicode(homedrive)
logonscript = _to_unicode(logonscript)
profile = _to_unicode(profile)
# Make sure the user exists
# Return an object containing current settings for the user
try:
user_info = win32net.NetUserGetInfo(None, name, 4)
except win32net.error as exc:
log.error('Failed to update user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
log.error('msg: %s', exc.strerror)
return False
# Check parameters to update
# Update the user object with new settings
if password:
user_info['password'] = password
if home:
user_info['home_dir'] = home
if homedrive:
user_info['home_dir_drive'] = homedrive
if description:
user_info['comment'] = description
if logonscript:
user_info['script_path'] = logonscript
if fullname:
user_info['full_name'] = fullname
if profile:
user_info['profile'] = profile
if expiration_date:
if expiration_date == 'Never':
user_info['acct_expires'] = win32netcon.TIMEQ_FOREVER
else:
try:
dt_obj = salt.utils.dateutils.date_cast(expiration_date)
except (ValueError, RuntimeError):
return 'Invalid Date/Time Format: {0}'.format(expiration_date)
user_info['acct_expires'] = time.mktime(dt_obj.timetuple())
if expired is not None:
if expired:
user_info['password_expired'] = 1
else:
user_info['password_expired'] = 0
if account_disabled is not None:
if account_disabled:
user_info['flags'] |= win32netcon.UF_ACCOUNTDISABLE
else:
user_info['flags'] &= ~win32netcon.UF_ACCOUNTDISABLE
if unlock_account is not None:
if unlock_account:
user_info['flags'] &= ~win32netcon.UF_LOCKOUT
if password_never_expires is not None:
if password_never_expires:
user_info['flags'] |= win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info['flags'] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
if disallow_change_password is not None:
if disallow_change_password:
user_info['flags'] |= win32netcon.UF_PASSWD_CANT_CHANGE
else:
user_info['flags'] &= ~win32netcon.UF_PASSWD_CANT_CHANGE
# Apply new settings
try:
win32net.NetUserSetInfo(None, name, 4, user_info)
except win32net.error as exc:
log.error('Failed to update user %s', name)
log.error('nbr: %s', exc.winerror)
log.error('ctx: %s', exc.funcname)
log.error('msg: %s', exc.strerror)
return False
return True
|
Updates settings for the windows user. Name is the only required parameter.
Settings will only be changed if the parameter is passed a value.
.. versionadded:: 2015.8.0
Args:
name (str): The user name to update.
password (str, optional): New user password in plain text.
fullname (str, optional): The user's full name.
description (str, optional): A brief description of the user account.
home (str, optional): The path to the user's home directory.
homedrive (str, optional): The drive letter to assign to the home
directory. Must be the Drive Letter followed by a colon. ie: U:
logonscript (str, optional): The path to the logon script.
profile (str, optional): The path to the user's profile directory.
expiration_date (date, optional): The date and time when the account
expires. Can be a valid date/time string. To set to never expire
pass the string 'Never'.
expired (bool, optional): Pass `True` to expire the account. The user
will be prompted to change their password at the next logon. Pass
`False` to mark the account as 'not expired'. You can't use this to
negate the expiration if the expiration was caused by the account
expiring. You'll have to change the `expiration_date` as well.
account_disabled (bool, optional): True disables the account. False
enables the account.
unlock_account (bool, optional): True unlocks a locked user account.
False is ignored.
password_never_expires (bool, optional): True sets the password to never
expire. False allows the password to expire.
disallow_change_password (bool, optional): True blocks the user from
changing the password. False allows the user to change the password.
Returns:
bool: True if successful. False is unsuccessful.
CLI Example:
.. code-block:: bash
salt '*' user.update bob password=secret profile=C:\\Users\\Bob
home=\\server\homeshare\bob homedrive=U:
|
def new_section(self, name, params=None):
"""Return a new section"""
self.sections[name.lower()] = SectionTerm(None, name, term_args=params, doc=self)
# Set the default arguments
s = self.sections[name.lower()]
if name.lower() in self.decl_sections:
s.args = self.decl_sections[name.lower()]['args']
return s
|
Return a new section
|
def get_request_payment(self, bucket):
"""
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
"""
details = self._details(
method=b"GET",
url_context=self._url_context(bucket=bucket, object_name="?requestPayment"),
)
d = self._submit(self._query_factory(details))
d.addCallback(self._parse_get_request_payment)
return d
|
Get the request payment configuration on a bucket.
@param bucket: The name of the bucket.
@return: A C{Deferred} that will fire with the name of the payer.
|
def code(self):
"""
code
"""
def uniq(seq):
"""
@type seq: str
@return: None
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
# noinspection PyTypeChecker
a = uniq(i for i in self.autos if i is not None)
# noinspection PyTypeChecker
e = uniq(i for i in self.errors if i is not None)
if e:
return '\n'.join(e)
return '\n'.join(a)
|
code
|
def turbulent_Sieder_Tate(Re, Pr, mu=None, mu_w=None):
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
'''
Nu = 0.027*Re**0.8*Pr**(1/3.)
if mu_w and mu:
Nu *= (mu/mu_w)**0.14
return Nu
|
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [1]_ and supposedly [2]_.
.. math::
Nu = 0.027Re^{4/5}Pr^{1/3}\left(\frac{\mu}{\mu_s}\right)^{0.14}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
mu : float
Viscosity of fluid, [Pa*s]
mu_w : float
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
A linear coefficient of 0.023 is often listed with this equation. The
source of the discrepancy is not known. The equation is not present in the
original paper, but is nevertheless the source usually cited for it.
Examples
--------
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2)
286.9178136793052
>>> turbulent_Sieder_Tate(Re=1E5, Pr=1.2, mu=0.01, mu_w=0.067)
219.84016455766044
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Sieder, E. N., and G. E. Tate. "Heat Transfer and Pressure Drop of
Liquids in Tubes." Industrial & Engineering Chemistry 28, no. 12
(December 1, 1936): 1429-35. doi:10.1021/ie50324a027.
|
def __delete(self, subscription_plan_id, **kwargs):
"""Call documentation: `/subscription_plan/delete
<https://www.wepay.com/developer/reference/subscription_plan#delete>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'subscription_plan_id': subscription_plan_id
}
return self.make_call(self.__delete, params, kwargs)
|
Call documentation: `/subscription_plan/delete
<https://www.wepay.com/developer/reference/subscription_plan#delete>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
|
def build_vcf_deletion(x, genome_2bit):
"""Provide representation of deletion from BedPE breakpoints.
"""
base1 = genome_2bit[x.chrom1].get(x.start1, x.start1 + 1).upper()
id1 = "hydra{0}".format(x.name)
return VcfLine(x.chrom1, x.start1, id1, base1, "<DEL>",
_vcf_single_end_info(x, "DEL", True))
|
Provide representation of deletion from BedPE breakpoints.
|
def hoverMoveEvent(self, event):
"""
Prompts the tool tip for this node based on the inputed event.
:param event | <QHoverEvent>
"""
# process the parent event
super(XNode, self).hoverMoveEvent(event)
self._hovered = True
# hover over a hotspot
hotspot = self.hotspotAt(event.pos())
if not hotspot:
hotspot = self.dropzoneAt(event.pos())
old_spot = self._hoverSpot
if hotspot and hotspot != old_spot:
# update the new hotspot
self._hoverSpot = hotspot
if old_spot:
old_spot.hoverLeaveEvent(event)
if hotspot.hoverEnterEvent(event):
self.update()
elif old_spot and not hotspot:
self._hoverSpot = None
if old_spot.hoverLeaveEvent(event):
self.update()
if hotspot and hotspot.toolTip():
super(XNode, self).setToolTip(hotspot.toolTip())
else:
super(XNode, self).setToolTip(self._toolTip)
|
Prompts the tool tip for this node based on the inputed event.
:param event | <QHoverEvent>
|
def _remove_by_pk(self, key, flush=True):
"""Retrieve value from store.
:param key: Key
"""
try:
del self.store[key]
except Exception as error:
pass
if flush:
self.flush()
|
Retrieve value from store.
:param key: Key
|
def get_serializer_context(self):
"""Adds ``election_day`` to serializer context."""
context = super(SpecialMixin, self).get_serializer_context()
context['election_date'] = self.kwargs['date']
return context
|
Adds ``election_day`` to serializer context.
|
def _init_map(self):
"""Have to call these all separately because they are "end" classes,
with no super() in them. Non-cooperative."""
ItemTextsFormRecord._init_map(self)
ItemFilesFormRecord._init_map(self)
edXBaseFormRecord._init_map(self)
IRTItemFormRecord._init_map(self)
TimeValueFormRecord._init_map(self)
ProvenanceFormRecord._init_map(self)
super(edXItemFormRecord, self)._init_map()
|
Have to call these all separately because they are "end" classes,
with no super() in them. Non-cooperative.
|
def partial_match(self, path, filter_path):
'''Partially match a path and a filter_path with wildcards.
This function will return True if this path partially match a filter path.
This is used for walking through directories with multiple level wildcard.
'''
if not path or not filter_path:
return True
# trailing slash normalization
if path[-1] == PATH_SEP:
path = path[0:-1]
if filter_path[-1] == PATH_SEP:
filter_path += '*'
pi = path.split(PATH_SEP)
fi = filter_path.split(PATH_SEP)
# Here, if we are in recursive mode, we allow the pi to be longer than fi.
# Otherwise, length of pi should be equal or less than the lenght of fi.
min_len = min(len(pi), len(fi))
matched = fnmatch.fnmatch(PATH_SEP.join(pi[0:min_len]), PATH_SEP.join(fi[0:min_len]))
return matched and (self.opt.recursive or len(pi) <= len(fi))
|
Partially match a path and a filter_path with wildcards.
This function will return True if this path partially match a filter path.
This is used for walking through directories with multiple level wildcard.
|
def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces)
|
Executes an xpath expression using the correct namespaces
|
def is_executable(exe_name):
"""Check if Input is Executable
This methid checks if the input executable exists.
Parameters
----------
exe_name : str
Executable name
Returns
-------
Bool result of test
Raises
------
TypeError
For invalid input type
"""
if not isinstance(exe_name, str):
raise TypeError('Executable name must be a string.')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(exe_name)
if not fpath:
res = any([is_exe(os.path.join(path, exe_name)) for path in
os.environ["PATH"].split(os.pathsep)])
else:
res = is_exe(exe_name)
if not res:
raise IOError('{} does not appear to be a valid executable on this '
'system.'.format(exe_name))
|
Check if Input is Executable
This methid checks if the input executable exists.
Parameters
----------
exe_name : str
Executable name
Returns
-------
Bool result of test
Raises
------
TypeError
For invalid input type
|
def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points)
|
This method will be called to set Series data
|
def save_to_file(self, filename: str) -> ConfigFile:
"""
This converts the NetworkedConfigFile into a normal ConfigFile object.
This requires the normal class hooks to be provided.
"""
newclass = ConfigFile(fd=filename, load_hook=self.normal_class_hook[0],
dump_hook=self.normal_class_hook[1], safe_load=self.safe_load)
return newclass
|
This converts the NetworkedConfigFile into a normal ConfigFile object.
This requires the normal class hooks to be provided.
|
def get_view(self, table):
"""Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
"""
request = self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id)
try:
response = request.execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return None
raise
return response['view']['query'] if 'view' in response else None
|
Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
|
def get_http_info(self, request):
"""
Determine how to retrieve actual data by using request.mimetype.
"""
if self.is_json_type(request.mimetype):
retriever = self.get_json_data
else:
retriever = self.get_form_data
return self.get_http_info_with_retriever(request, retriever)
|
Determine how to retrieve actual data by using request.mimetype.
|
def tplds(self):
"""
:return: dictionary {id: object} of all current tplds.
:rtype: dict of (int, xenamanager.xena_port.XenaTpld)
"""
# As TPLDs are dynamic we must re-read them each time from the port.
self.parent.del_objects_by_type('tpld')
for tpld in self.get_attribute('pr_tplds').split():
XenaTpld(parent=self, index='{}/{}'.format(self.index, tpld))
return {t.id: t for t in self.get_objects_by_type('tpld')}
|
:return: dictionary {id: object} of all current tplds.
:rtype: dict of (int, xenamanager.xena_port.XenaTpld)
|
def error_wrapper(error, errorClass):
"""
We want to see all error messages from cloud services. Amazon's EC2 says
that their errors are accompanied either by a 400-series or 500-series HTTP
response code. As such, the first thing we want to do is check to see if
the error is in that range. If it is, we then need to see if the error
message is an EC2 one.
In the event that an error is not a Twisted web error nor an EC2 one, the
original exception is raised.
"""
http_status = 0
if error.check(TwistedWebError):
xml_payload = error.value.response
if error.value.status:
http_status = int(error.value.status)
else:
error.raiseException()
if http_status >= 400:
if not xml_payload:
error.raiseException()
try:
fallback_error = errorClass(
xml_payload, error.value.status, str(error.value),
error.value.response)
except (ParseError, AWSResponseParseError):
error_message = http.RESPONSES.get(http_status)
fallback_error = TwistedWebError(
http_status, error_message, error.value.response)
raise fallback_error
elif 200 <= http_status < 300:
return str(error.value)
else:
error.raiseException()
|
We want to see all error messages from cloud services. Amazon's EC2 says
that their errors are accompanied either by a 400-series or 500-series HTTP
response code. As such, the first thing we want to do is check to see if
the error is in that range. If it is, we then need to see if the error
message is an EC2 one.
In the event that an error is not a Twisted web error nor an EC2 one, the
original exception is raised.
|
def _addr_in_exec_memory_regions(self, addr):
"""
Test if the address belongs to an executable memory region.
:param int addr: The address to test
:return: True if the address belongs to an exectubale memory region, False otherwise
:rtype: bool
"""
for start, end in self._exec_mem_regions:
if start <= addr < end:
return True
return False
|
Test if the address belongs to an executable memory region.
:param int addr: The address to test
:return: True if the address belongs to an exectubale memory region, False otherwise
:rtype: bool
|
def send(self, command, _id=None, result={}, frames=[], threads=None,
error_messages=[], warning_messages=[], info_messages=[],
exception=None):
""" Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict
"""
with self._connection_lock:
payload = {
'_id': _id,
'command': command,
'result': result,
'commandExecStatus': 'ok',
'frames': frames,
'info_messages': info_messages,
'warning_messages': warning_messages,
'error_messages': error_messages,
'exception': exception
}
if threads:
payload['threads'] = threads
msg = self.encode(payload)
if self._connection:
msg_bytes = bytearray(msg, 'utf-8')
send_bytes_count = self._connection.sendall(msg_bytes)
self.log_sent(msg)
return send_bytes_count
raise IKPdbConnectionError("Connection lost!")
|
Build a message from parameters and send it to debugger.
:param command: The command sent to the debugger client.
:type command: str
:param _id: Unique id of the sent message. Right now, it's always `None`
for messages by debugger to client.
:type _id: int
:param result: Used to send `exit_code` and updated `executionStatus`
to debugger client.
:type result: dict
:param frames: contains the complete stack frames when debugger sends
the `programBreak` message.
:type frames: list
:param error_messages: A list of error messages the debugger client must
display to the user.
:type error_messages: list of str
:param warning_messages: A list of warning messages the debugger client
must display to the user.
:type warning_messages: list of str
:param info_messages: A list of info messages the debugger client must
display to the user.
:type info_messages: list of str
:param exception: If debugger encounter an exception, this dict contains
2 keys: `type` and `info` (the later is the message).
:type exception: dict
|
def crab_request(client, action, *args):
'''
Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0
'''
log.debug('Calling %s on CRAB service.', action)
return getattr(client.service, action)(*args)
|
Utility function that helps making requests to the CRAB service.
:param client: A :class:`suds.client.Client` for the CRAB service.
:param string action: Which method to call, eg. `ListGewesten`
:returns: Result of the SOAP call.
.. versionadded:: 0.3.0
|
def hms_string(secs):
"""return hours,minutes and seconds string, e.g. 02:00:45"""
l = hms(secs)
def extend10(n):
if n < 10:
return '0' + str(n)
else:
return str(n)
return extend10(l[0]) + ':' + extend10(l[1]) + ':' + extend10(l[2])
|
return hours,minutes and seconds string, e.g. 02:00:45
|
def display(url):
"""Display a file in ds9"""
import os
oscmd="curl --silent -g --fail --max-time 1800 --user jkavelaars '%s'" % (url)
logger.debug(oscmd)
os.system(oscmd+' | xpaset ds9 fits')
return
|
Display a file in ds9
|
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
"""
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. To disable warm start, pass in None to this
argument. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
None: No Warm Start
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
"""
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
from .._mxnet import _mxnet_utils
start_time = _time.time()
accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None]
# @TODO: Should be able to automatically choose number of iterations
# based on data size: Tracked in Github Issue #1576
# automatically infer feature column
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
if type(warm_start) is not str:
raise TypeError("'warm_start' must be a string or None. "
+ "'warm_start' can take in the following values: "
+ str(accepted_values_for_warm_start))
if warm_start not in accepted_values_for_warm_start:
raise _ToolkitError("Unrecognized value for 'warm_start': "
+ warm_start + ". 'warm_start' can take in the following "
+ "values: " + str(accepted_values_for_warm_start))
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
# Inside training scope
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
# Computes softmax cross entropy loss.
loss = softmax_cross_entropy(z, y)
# Backpropagate the error for one iteration.
loss.backward()
# Make one step of parameter update. Trainer needs to know the
# batch size of data to normalize the gradient by 1/batch_size.
trainer.step(train_batch.data[0].shape[0])
# calculate training metrics
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
# Compute training accuracy
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
# Compute validation accuracy
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
# nan if validation_set=None
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
|
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. To disable warm start, pass in None to this
argument. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
None: No Warm Start
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
|
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
|
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
|
def describe_arguments(func):
"""
Analyze a function's signature and return a data structure suitable for
passing in as arguments to an argparse parser's add_argument() method."""
argspec = inspect.getargspec(func)
# we should probably raise an exception somewhere if func includes **kwargs
if argspec.defaults:
positional_args = argspec.args[:-len(argspec.defaults)]
keyword_names = argspec.args[-len(argspec.defaults):]
for arg, default in zip(keyword_names, argspec.defaults):
yield ('--{}'.format(arg),), {'default': default}
else:
positional_args = argspec.args
for arg in positional_args:
yield (arg,), {}
if argspec.varargs:
yield (argspec.varargs,), {'nargs': '*'}
|
Analyze a function's signature and return a data structure suitable for
passing in as arguments to an argparse parser's add_argument() method.
|
def approve(self, creator):
"""
Approve granular permission request setting a Permission entry as
approved=True for a specific action from an user on an object instance.
"""
self.approved = True
self.creator = creator
self.save()
|
Approve granular permission request setting a Permission entry as
approved=True for a specific action from an user on an object instance.
|
def identical_blocks(self):
"""
:returns: A list of block matches which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks
|
:returns: A list of block matches which appear to be identical
|
def call_decorator(cls, func):
"""class function that MUST be specified as decorator
to the `__call__` method overriden by sub-classes.
"""
@wraps(func)
def _wrap(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception:
self.logger.exception('While executing benchmark')
if not (self.catch_child_exception or False):
raise
return _wrap
|
class function that MUST be specified as decorator
to the `__call__` method overriden by sub-classes.
|
def hierarchical_key(self, s):
"""
Parse text as some kind of hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
"""
s = parseable_str(s)
for f in [self.bip32_seed, self.bip32_prv, self.bip32_pub,
self.electrum_seed, self.electrum_prv, self.electrum_pub]:
v = f(s)
if v:
return v
|
Parse text as some kind of hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
|
def print_dataset_summary(self):
"""
Prints information about the the BIDS data and the files currently selected.
"""
print('--- DATASET INFORMATION ---')
print('--- Subjects ---')
if self.raw_data_exists:
if self.BIDS.get_subjects():
print('Number of subjects (in dataset): ' +
str(len(self.BIDS.get_subjects())))
print('Subjects (in dataset): ' +
', '.join(self.BIDS.get_subjects()))
else:
print('NO SUBJECTS FOUND (is the BIDS directory specified correctly?)')
print('Number of subjects (selected): ' +
str(len(self.bids_tags['sub'])))
print('Subjects (selected): ' + ', '.join(self.bids_tags['sub']))
if isinstance(self.bad_subjects, list):
print('Bad subjects: ' + ', '.join(self.bad_subjects))
else:
print('Bad subjects: 0')
print('--- Tasks ---')
if self.raw_data_exists:
if self.BIDS.get_tasks():
print('Number of tasks (in dataset): ' +
str(len(self.BIDS.get_tasks())))
print('Tasks (in dataset): ' + ', '.join(self.BIDS.get_tasks()))
if 'task' in self.bids_tags:
print('Number of tasks (selected): ' +
str(len(self.bids_tags['task'])))
print('Tasks (selected): ' + ', '.join(self.bids_tags['task']))
else:
print('No task names found')
print('--- Runs ---')
if self.raw_data_exists:
if self.BIDS.get_runs():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_runs())))
print('Runs (in dataset): ' + ', '.join(self.BIDS.get_runs()))
if 'run' in self.bids_tags:
print('Number of runs (selected): ' +
str(len(self.bids_tags['run'])))
print('Rubs (selected): ' + ', '.join(self.bids_tags['run']))
else:
print('No run names found')
print('--- Sessions ---')
if self.raw_data_exists:
if self.BIDS.get_sessions():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_sessions())))
print('Sessions (in dataset): ' +
', '.join(self.BIDS.get_sessions()))
if 'ses' in self.bids_tags:
print('Number of sessions (selected): ' +
str(len(self.bids_tags['ses'])))
print('Sessions (selected): ' + ', '.join(self.bids_tags['ses']))
else:
print('No session names found')
print('--- PREPROCESSED DATA (Pipelines/Derivatives) ---')
if not self.pipeline:
print('Derivative pipeline not set. To set, run TN.set_pipeline()')
else:
print('Pipeline: ' + self.pipeline)
if self.pipeline_subdir:
print('Pipeline subdirectories: ' + self.pipeline_subdir)
selected_files = self.get_selected_files(quiet=1)
if selected_files:
print('--- SELECTED DATA ---')
print('Numnber of selected files: ' + str(len(selected_files)))
print('\n - '.join(selected_files))
|
Prints information about the the BIDS data and the files currently selected.
|
def try_lock(self, key, ttl=-1, timeout=0):
"""
Tries to acquire the lock for the specified key. When the lock is not available,
* If timeout is not provided, the current thread doesn't wait and returns ``false`` immediately.
* If a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies
dormant until one of the followings happens:
* the lock is acquired by the current thread, or
* the specified waiting time elapses.
If ttl is provided, lock will be released after this time elapses.
:param key: (object), key to lock in this map.
:param ttl: (int), time in seconds to wait before releasing the lock (optional).
:param timeout: (int), maximum time in seconds to wait for the lock (optional).
:return: (bool), ``true`` if the lock was acquired and otherwise, ``false``.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_lock_codec, key_data, invocation_timeout=MAX_SIZE, key=key_data,
thread_id=thread_id(), lease=to_millis(ttl), timeout=to_millis(timeout),
reference_id=self.reference_id_generator.get_and_increment())
|
Tries to acquire the lock for the specified key. When the lock is not available,
* If timeout is not provided, the current thread doesn't wait and returns ``false`` immediately.
* If a timeout is provided, the current thread becomes disabled for thread scheduling purposes and lies
dormant until one of the followings happens:
* the lock is acquired by the current thread, or
* the specified waiting time elapses.
If ttl is provided, lock will be released after this time elapses.
:param key: (object), key to lock in this map.
:param ttl: (int), time in seconds to wait before releasing the lock (optional).
:param timeout: (int), maximum time in seconds to wait for the lock (optional).
:return: (bool), ``true`` if the lock was acquired and otherwise, ``false``.
|
def moma(self, wt_fluxes):
"""Minimize the redistribution of fluxes using Euclidean distance.
Minimizing the redistribution of fluxes using a quadratic objective
function. The distance is minimized by minimizing the sum of
(wild type - knockout)^2.
Args:
wt_fluxes: Dictionary of all the wild type fluxes that will be
used to find a close MOMA solution. Fluxes can be expiremental
or calculated using :meth: get_fba_flux(objective).
"""
reactions = set(self._adjustment_reactions())
v = self._v
obj_expr = 0
for f_reaction, f_value in iteritems(wt_fluxes):
if f_reaction in reactions:
# Minimize the Euclidean distance between the two vectors
obj_expr += (f_value - v[f_reaction])**2
self._prob.set_objective(obj_expr)
self._solve(lp.ObjectiveSense.Minimize)
|
Minimize the redistribution of fluxes using Euclidean distance.
Minimizing the redistribution of fluxes using a quadratic objective
function. The distance is minimized by minimizing the sum of
(wild type - knockout)^2.
Args:
wt_fluxes: Dictionary of all the wild type fluxes that will be
used to find a close MOMA solution. Fluxes can be expiremental
or calculated using :meth: get_fba_flux(objective).
|
def outer_id(self, value):
"""The outer_id property.
Args:
value (int). the property value.
"""
if value == self._defaults['outerId'] and 'outerId' in self._values:
del self._values['outerId']
else:
self._values['outerId'] = value
|
The outer_id property.
Args:
value (int). the property value.
|
def _fill_scope_refs(name, scope):
"""Put referenced name in 'ref' dictionary of a scope.
Walks up the scope tree and adds the name to 'ref' of every scope
up in the tree until a scope that defines referenced name is reached.
"""
symbol = scope.resolve(name)
if symbol is None:
return
orig_scope = symbol.scope
scope.refs[name] = orig_scope
while scope is not orig_scope:
scope = scope.get_enclosing_scope()
scope.refs[name] = orig_scope
|
Put referenced name in 'ref' dictionary of a scope.
Walks up the scope tree and adds the name to 'ref' of every scope
up in the tree until a scope that defines referenced name is reached.
|
def aggregate(self, query):
"""
Issue an aggregation query
### Parameters
**query**: This can be either an `AggeregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from its
`rows` property, which will always yield the rows of the result
"""
if isinstance(query, AggregateRequest):
has_schema = query._with_schema
has_cursor = bool(query._cursor)
cmd = [self.AGGREGATE_CMD, self.index_name] + query.build_args()
elif isinstance(query, Cursor):
has_schema = False
has_cursor = True
cmd = [self.CURSOR_CMD, 'READ', self.index_name] + query.build_args()
else:
raise ValueError('Bad query', query)
raw = self.redis.execute_command(*cmd)
if has_cursor:
if isinstance(query, Cursor):
query.cid = raw[1]
cursor = query
else:
cursor = Cursor(raw[1])
raw = raw[0]
else:
cursor = None
if query._with_schema:
schema = raw[0]
rows = raw[2:]
else:
schema = None
rows = raw[1:]
res = AggregateResult(rows, cursor, schema)
return res
|
Issue an aggregation query
### Parameters
**query**: This can be either an `AggeregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from its
`rows` property, which will always yield the rows of the result
|
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None,
width=8, height=None, dpi=300):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
"""
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[i % len(styles)],
label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[i % len(styles)],
label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt
|
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
|
def get_table_location(self, database_name, table_name):
"""
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
"""
table = self.get_table(database_name, table_name)
return table['StorageDescriptor']['Location']
|
Get the physical location of the table
:param database_name: Name of hive database (schema) @table belongs to
:type database_name: str
:param table_name: Name of hive table
:type table_name: str
:return: str
|
def serializeContainers(self):
"""Serializes the current view of open video grids (i.e. the view)
"""
""" each serialized container looks like this:
dic={# these are used when re-instantiating the view
"classname" : self.__class__.__name__,
"kwargs" : {}, # parameters that we're used to instantiate this class
# these parameters are used by deserialize
"x" : self.window.x(),
"y" : self.window.y(),
"width" : self.window.width(),
"height" : self.window.height(),
"streams" : streams
}
"""
container_list = []
mvision_container_list = []
for container in self.containers:
print("gui: serialize containers : container=", container)
container_list.append(container.serialize())
for container in self.mvision_containers:
mvision_container_list.append(container.serialize())
return {"container_list" : container_list, "mvision_container_list" : mvision_container_list}
|
Serializes the current view of open video grids (i.e. the view)
|
def hashable(x):
"""
Return a hashable version of the given object x, with lists and
dictionaries converted to tuples. Allows mutable objects to be
used as a lookup key in cases where the object has not actually
been mutated. Lookup will fail (appropriately) in cases where some
part of the object has changed. Does not (currently) recursively
replace mutable subobjects.
"""
if isinstance(x, collections.MutableSequence):
return tuple(x)
elif isinstance(x, collections.MutableMapping):
return tuple([(k,v) for k,v in x.items()])
else:
return x
|
Return a hashable version of the given object x, with lists and
dictionaries converted to tuples. Allows mutable objects to be
used as a lookup key in cases where the object has not actually
been mutated. Lookup will fail (appropriately) in cases where some
part of the object has changed. Does not (currently) recursively
replace mutable subobjects.
|
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
|
Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
|
def _os_bootstrap():
"""
Set up 'os' module replacement functions for use during import bootstrap.
"""
global _os_stat, _os_getcwd, _os_environ, _os_listdir
global _os_path_join, _os_path_dirname, _os_path_basename
global _os_sep
names = sys.builtin_module_names
join = dirname = environ = listdir = basename = None
mindirlen = 0
# Only 'posix' and 'nt' os specific modules are supported.
# 'dos', 'os2' and 'mac' (MacOS 9) are not supported.
if 'posix' in names:
from posix import stat, getcwd, environ, listdir
sep = _os_sep = '/'
mindirlen = 1
elif 'nt' in names:
from nt import stat, getcwd, environ, listdir
sep = _os_sep = '\\'
mindirlen = 3
else:
raise ImportError('no os specific module found')
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
if dirname is None:
def dirname(a, sep=sep, mindirlen=mindirlen):
for i in range(len(a) - 1, -1, -1):
c = a[i]
if c == '/' or c == sep:
if i < mindirlen:
return a[:i + 1]
return a[:i]
return ''
if basename is None:
def basename(p):
i = p.rfind(sep)
if i == -1:
return p
else:
return p[i + len(sep):]
def _listdir(dir, cache={}):
# since this function is only used by caseOk, it's fine to cache the
# results and avoid reading the whole contents of a directory each time
# we just want to check the case of a filename.
if not dir in cache:
cache[dir] = listdir(dir)
return cache[dir]
_os_stat = stat
_os_getcwd = getcwd
_os_path_join = join
_os_path_dirname = dirname
_os_environ = environ
_os_listdir = _listdir
_os_path_basename = basename
|
Set up 'os' module replacement functions for use during import bootstrap.
|
def addSignal(self, s):
"""
Adds a L{Signal} to the interface
"""
if s.nargs == -1:
s.nargs = len([a for a in marshal.genCompleteTypes(s.sig)])
self.signals[s.name] = s
self._xml = None
|
Adds a L{Signal} to the interface
|
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
|
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
|
def save_weights(sess, output_path, conv_var_names=None, conv_transpose_var_names=None):
"""Save the weights of the trainable variables, each one in a different file in output_path."""
if not conv_var_names:
conv_var_names = []
if not conv_transpose_var_names:
conv_transpose_var_names = []
for var in tf.trainable_variables():
filename = '{}-{}'.format(output_path, var.name.replace(':', '-').replace('/', '-'))
if var.name in conv_var_names:
var = tf.transpose(var, perm=[3, 0, 1, 2])
elif var.name in conv_transpose_var_names:
var = tf.transpose(var, perm=[3, 1, 0, 2])
value = sess.run(var)
# noinspection PyTypeChecker
with open(filename, 'w') as file_:
value.tofile(file_)
|
Save the weights of the trainable variables, each one in a different file in output_path.
|
def _read_message(self):
""" Reads a single size-annotated message from the server """
size = int(self.buf.read_line().decode("utf-8"))
return self.buf.read(size).decode("utf-8")
|
Reads a single size-annotated message from the server
|
def current_user(self):
"""Returns the username of the current user.
:rtype: str
"""
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
|
Returns the username of the current user.
:rtype: str
|
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message)-len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
|
If we are in a progress scope, and no log messages have been
shown, write out another '.
|
def _get_tmaster_processes(self):
''' get the command to start the tmaster processes '''
retval = {}
tmaster_cmd_lst = [
self.tmaster_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--zkhostportlist=%s' % self.state_manager_connection,
'--zkroot=%s' % self.state_manager_root,
'--myhost=%s' % self.master_host,
'--master_port=%s' % str(self.master_port),
'--controller_port=%s' % str(self.tmaster_controller_port),
'--stats_port=%s' % str(self.tmaster_stats_port),
'--config_file=%s' % self.heron_internals_config_file,
'--override_config_file=%s' % self.override_config_file,
'--metrics_sinks_yaml=%s' % self.metrics_sinks_config_file,
'--metricsmgr_port=%s' % str(self.metrics_manager_port),
'--ckptmgr_port=%s' % str(self.checkpoint_manager_port)]
tmaster_env = self.shell_env.copy() if self.shell_env is not None else {}
tmaster_cmd = Command(tmaster_cmd_lst, tmaster_env)
if os.environ.get('ENABLE_HEAPCHECK') is not None:
tmaster_cmd.env.update({
'LD_PRELOAD': "/usr/lib/libtcmalloc.so",
'HEAPCHECK': "normal"
})
retval["heron-tmaster"] = tmaster_cmd
if self.metricscache_manager_mode.lower() != "disabled":
retval["heron-metricscache"] = self._get_metrics_cache_cmd()
if self.health_manager_mode.lower() != "disabled":
retval["heron-healthmgr"] = self._get_healthmgr_cmd()
retval[self.metricsmgr_ids[0]] = self._get_metricsmgr_cmd(
self.metricsmgr_ids[0],
self.metrics_sinks_config_file,
self.metrics_manager_port)
if self.is_stateful_topology:
retval.update(self._get_ckptmgr_process())
return retval
|
get the command to start the tmaster processes
|
def put(self, targetId):
"""
stores a new target.
:param targetId: the target to store.
:return:
"""
json = request.get_json()
if 'hinge' in json:
logger.info('Storing target ' + targetId)
if self._targetController.storeFromHinge(targetId, json['hinge']):
logger.info('Stored target ' + targetId)
return None, 200
else:
return None, 500
else:
return None, 400
|
stores a new target.
:param targetId: the target to store.
:return:
|
def build_skeleton(nodes, independencies):
"""Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
nodes = list(nodes)
if isinstance(independencies, Independencies):
def is_independent(X, Y, Zs):
return IndependenceAssertion(X, Y, Zs) in independencies
elif callable(independencies):
is_independent = independencies
else:
raise ValueError("'independencies' must be either Independencies-instance " +
"or a ternary function that decides independencies.")
graph = UndirectedGraph(combinations(nodes, 2))
lim_neighbors = 0
separating_sets = dict()
while not all([len(list(graph.neighbors(node))) < lim_neighbors for node in nodes]):
for node in nodes:
for neighbor in list(graph.neighbors(node)):
# search if there is a set of neighbors (of size lim_neighbors)
# that makes X and Y independent:
for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):
if is_independent(node, neighbor, separating_set):
separating_sets[frozenset((node, neighbor))] = separating_set
graph.remove_edge(node, neighbor)
break
lim_neighbors += 1
return graph, separating_sets
|
Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
nodes: list, array-like
A list of node/variable names of the network skeleton.
independencies: Independencies-instance or function.
The source of independency information from which to build the skeleton.
The provided Independencies should admit a faithful representation.
Can either be provided as an Independencies()-instance or by passing a
function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
Reference
---------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
|
def resume_multiple(self, infohash_list):
"""
Resume multiple paused torrents.
:param infohash_list: Single or list() of infohashes.
"""
data = self._process_infohash_list(infohash_list)
return self._post('command/resumeAll', data=data)
|
Resume multiple paused torrents.
:param infohash_list: Single or list() of infohashes.
|
def check_password_confirm(self, form, trigger_action_group=None):
"""Checks that the password and the confirm password match in
the provided form. Won't do anything if any of the password fields
are not in the form.
"""
pwcol = self.options['password_column']
pwconfirmfield = pwcol + "_confirm"
if pwcol in form and pwconfirmfield in form and form[pwconfirmfield].data != form[pwcol].data:
if self.options["password_confirm_failed_message"]:
flash(self.options["password_confirm_failed_message"], "error")
current_context.exit(trigger_action_group=trigger_action_group)
|
Checks that the password and the confirm password match in
the provided form. Won't do anything if any of the password fields
are not in the form.
|
def write(self, _force=False, _exists_ok=False, **items):
"""
Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten.
"""
if self.fname and self.fname.exists():
raise ValueError('db file already exists, use force=True to overwrite')
with self.connection() as db:
for table in self.tables:
db.execute(table.sql(translate=self.translate))
db.execute('PRAGMA foreign_keys = ON;')
db.commit()
refs = defaultdict(list) # collects rows in association tables.
for t in self.tables:
if t.name not in items:
continue
rows, keys = [], []
cols = {c.name: c for c in t.columns}
for i, row in enumerate(items[t.name]):
pk = row[t.primary_key[0]] \
if t.primary_key and len(t.primary_key) == 1 else None
values = []
for k, v in row.items():
if k in t.many_to_many:
assert pk
at = t.many_to_many[k]
atkey = tuple([at.name] + [c.name for c in at.columns])
for vv in v:
fkey, context = self.association_table_context(t, k, vv)
refs[atkey].append((pk, fkey, context))
else:
col = cols[k]
if isinstance(v, list):
# Note: This assumes list-valued columns are of datatype string!
v = (col.separator or ';').join(
col.convert(vv) for vv in v)
else:
v = col.convert(v) if v is not None else None
if i == 0:
keys.append(col.name)
values.append(v)
rows.append(tuple(values))
insert(db, self.translate, t.name, keys, *rows)
for atkey, rows in refs.items():
insert(db, self.translate, atkey[0], atkey[1:], *rows)
db.commit()
|
Creates a db file with the core schema.
:param force: If `True` an existing db file will be overwritten.
|
def add_argument(self, *args, **kwargs):
"""Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
"""
if _HELP not in kwargs:
for name in args:
name = name.replace("-", "")
if name in self.__argmap:
kwargs[_HELP] = self.__argmap[name]
break
return super(ArgumentParser, self).add_argument(*args, **kwargs)
|
Add an argument.
This method adds a new argument to the current parser. The function is
same as ``argparse.ArgumentParser.add_argument``. However, this method
tries to determine help messages for the adding argument from some
docstrings.
If the new arguments belong to some sub commands, the docstring
of a function implements behavior of the sub command has ``Args:`` section,
and defines same name variable, this function sets such
definition to the help message.
Positional Args:
same positional arguments as argparse.ArgumentParser.add_argument.
Keyword Args:
same keywards arguments as argparse.ArgumentParser.add_argument.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.