code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_clean_url(self):
"""Retrieve the clean, full URL - including username/password."""
if self.needs_auth:
self.prompt_auth()
url = RepositoryURL(self.url.full_url)
url.username = self.username
url.password = self.password
return url | Retrieve the clean, full URL - including username/password. | Below is the the instruction that describes the task:
### Input:
Retrieve the clean, full URL - including username/password.
### Response:
def get_clean_url(self):
"""Retrieve the clean, full URL - including username/password."""
if self.needs_auth:
self.prompt_auth()
url = RepositoryURL(self.url.full_url)
url.username = self.username
url.password = self.password
return url |
def update(self, pci):
"""Copy the PCI fields."""
self.pduUserData = pci.pduUserData
self.pduSource = pci.pduSource
self.pduDestination = pci.pduDestination | Copy the PCI fields. | Below is the the instruction that describes the task:
### Input:
Copy the PCI fields.
### Response:
def update(self, pci):
"""Copy the PCI fields."""
self.pduUserData = pci.pduUserData
self.pduSource = pci.pduSource
self.pduDestination = pci.pduDestination |
def add_term_occurrence(self, term, document):
"""
Adds an occurrence of the term in the specified document.
"""
if document not in self._documents:
self._documents[document] = 0
if term not in self._terms:
if self._freeze:
return
else:
self._terms[term] = collections.Counter()
if document not in self._terms[term]:
self._terms[term][document] = 0
self._documents[document] += 1
self._terms[term][document] += 1 | Adds an occurrence of the term in the specified document. | Below is the the instruction that describes the task:
### Input:
Adds an occurrence of the term in the specified document.
### Response:
def add_term_occurrence(self, term, document):
"""
Adds an occurrence of the term in the specified document.
"""
if document not in self._documents:
self._documents[document] = 0
if term not in self._terms:
if self._freeze:
return
else:
self._terms[term] = collections.Counter()
if document not in self._terms[term]:
self._terms[term][document] = 0
self._documents[document] += 1
self._terms[term][document] += 1 |
def push(self, set_upstream: bool = True):
"""
Pushes all refs (branches and tags) to origin
"""
LOGGER.info('pushing repo to origin')
try:
self.repo.git.push()
except GitCommandError as error:
if 'has no upstream branch' in error.stderr and set_upstream:
self.repo.git.push(f'--set-upstream origin {self.get_current_branch()}')
else:
raise
self.push_tags() | Pushes all refs (branches and tags) to origin | Below is the the instruction that describes the task:
### Input:
Pushes all refs (branches and tags) to origin
### Response:
def push(self, set_upstream: bool = True):
"""
Pushes all refs (branches and tags) to origin
"""
LOGGER.info('pushing repo to origin')
try:
self.repo.git.push()
except GitCommandError as error:
if 'has no upstream branch' in error.stderr and set_upstream:
self.repo.git.push(f'--set-upstream origin {self.get_current_branch()}')
else:
raise
self.push_tags() |
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the RevokeRequestPayload object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
# Write the contents of the request payload
if self.unique_identifier is not None:
self.unique_identifier.write(tstream, kmip_version=kmip_version)
self.revocation_reason.write(tstream, kmip_version=kmip_version)
if self.compromise_occurrence_date is not None:
self.compromise_occurrence_date.write(
tstream,
kmip_version=kmip_version
)
# Write the length and value of the request payload
self.length = tstream.length()
super(RevokeRequestPayload, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer) | Write the data encoding the RevokeRequestPayload object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0. | Below is the the instruction that describes the task:
### Input:
Write the data encoding the RevokeRequestPayload object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
### Response:
def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the RevokeRequestPayload object to a stream.
Args:
ostream (Stream): A data stream in which to encode object data,
supporting a write method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
tstream = BytearrayStream()
# Write the contents of the request payload
if self.unique_identifier is not None:
self.unique_identifier.write(tstream, kmip_version=kmip_version)
self.revocation_reason.write(tstream, kmip_version=kmip_version)
if self.compromise_occurrence_date is not None:
self.compromise_occurrence_date.write(
tstream,
kmip_version=kmip_version
)
# Write the length and value of the request payload
self.length = tstream.length()
super(RevokeRequestPayload, self).write(
ostream,
kmip_version=kmip_version
)
ostream.write(tstream.buffer) |
def temporal_firing_rate(self,time_dimension=0,resolution=1.0,units=None,
min_t=None,max_t=None,weight_function=None,normalize_time=False,
normalize_n=False,start_units_with_0=True,cell_dimension='N'):
"""
Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index
"""
units = self._default_units(units)
if self.data_format == 'spike_times':
converted_dimension,st = self.spike_times.get_converted(0,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
st = st[(st>=min_t)*(st<max_t)]
bins = converted_dimension.linspace_by_resolution(resolution,end_at_end=True,extra_bins=0)
H,edg = np.histogram(st,bins=bins)
if normalize_time:
H = H/(convert_time(resolution,from_units=units,to_units='s')) # make it Hertz
if normalize_n:
H = H/(len(np.unique(self.spike_times[cell_dimension])))
return H,edg | Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index | Below is the the instruction that describes the task:
### Input:
Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index
### Response:
def temporal_firing_rate(self,time_dimension=0,resolution=1.0,units=None,
min_t=None,max_t=None,weight_function=None,normalize_time=False,
normalize_n=False,start_units_with_0=True,cell_dimension='N'):
"""
Outputs a time histogram of spikes.
`bins`: number of bins (default is 1ms bins from 0 to t_max)
`weight_function`: if set, computes a weighted histogram, dependent on the (index, time) tuples of each spike
weight_function = lambda x: weight_map.flatten()[array(x[:,0],dtype=int)]
`normalize_time`
`normalize_n`: normalize by the length of time (such that normal output is Hz) and/or number of units (such that output is Hz/unit, determined with unique values in cell_dimension)
Generally does not make sense when using a weight_function other than 'count'.
`start_units_with_0`: starts indizes from 0 instead from the actual index
"""
units = self._default_units(units)
if self.data_format == 'spike_times':
converted_dimension,st = self.spike_times.get_converted(0,units)
if min_t is None:
min_t = converted_dimension.min
if max_t is None:
max_t = converted_dimension.max
st = st[(st>=min_t)*(st<max_t)]
bins = converted_dimension.linspace_by_resolution(resolution,end_at_end=True,extra_bins=0)
H,edg = np.histogram(st,bins=bins)
if normalize_time:
H = H/(convert_time(resolution,from_units=units,to_units='s')) # make it Hertz
if normalize_n:
H = H/(len(np.unique(self.spike_times[cell_dimension])))
return H,edg |
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler | Load this domain based on an XML document | Below is the the instruction that describes the task:
### Input:
Load this domain based on an XML document
### Response:
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler |
def ro(self):
""" Return read-only copy of this object
:return: WHTTPHeaders
"""
ro_headers = WHTTPHeaders()
names = self.headers()
for name in names:
ro_headers.add_headers(name, *self.get_headers(name))
ro_headers.__cookies = self.__set_cookies.ro()
ro_headers.__ro_flag = True
return ro_headers | Return read-only copy of this object
:return: WHTTPHeaders | Below is the the instruction that describes the task:
### Input:
Return read-only copy of this object
:return: WHTTPHeaders
### Response:
def ro(self):
""" Return read-only copy of this object
:return: WHTTPHeaders
"""
ro_headers = WHTTPHeaders()
names = self.headers()
for name in names:
ro_headers.add_headers(name, *self.get_headers(name))
ro_headers.__cookies = self.__set_cookies.ro()
ro_headers.__ro_flag = True
return ro_headers |
def basemz(df):
"""
The mz of the most abundant ion.
"""
# returns the
d = np.array(df.columns)[df.values.argmax(axis=1)]
return Trace(d, df.index, name='basemz') | The mz of the most abundant ion. | Below is the the instruction that describes the task:
### Input:
The mz of the most abundant ion.
### Response:
def basemz(df):
"""
The mz of the most abundant ion.
"""
# returns the
d = np.array(df.columns)[df.values.argmax(axis=1)]
return Trace(d, df.index, name='basemz') |
def breeding_change(request, breeding_id):
"""This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
"""
breeding = Breeding.objects.select_related().get(id=breeding_id)
strain = breeding.Strain
PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes'))
if request.method =="POST":
formset = PupsFormSet(request.POST, instance=breeding)
if formset.is_valid():
formset.save()
return HttpResponseRedirect( breeding.get_absolute_url() )
else:
formset = PupsFormSet(instance=breeding,)
return render(request, "breeding_change.html", {"formset":formset, 'breeding':breeding}) | This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal. | Below is the the instruction that describes the task:
### Input:
This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
### Response:
def breeding_change(request, breeding_id):
"""This view is used to generate a form by which to change pups which belong to a particular breeding set.
This view typically is used to modify existing pups. This might include marking animals as sacrificed, entering genotype or marking information or entering movement of mice to another cage. It is used to show and modify several animals at once.
It takes a request in the form /breeding/(breeding_id)/change/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage.
This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/.
This view is restricted to those with the permission animal.change_animal.
"""
breeding = Breeding.objects.select_related().get(id=breeding_id)
strain = breeding.Strain
PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes'))
if request.method =="POST":
formset = PupsFormSet(request.POST, instance=breeding)
if formset.is_valid():
formset.save()
return HttpResponseRedirect( breeding.get_absolute_url() )
else:
formset = PupsFormSet(instance=breeding,)
return render(request, "breeding_change.html", {"formset":formset, 'breeding':breeding}) |
def save_figure_as(self):
"""
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
"""
self.file_name = QtGui.QFileDialog.getSaveFileName()
self.file_name = self.file_name + ".png"
self.ui.graphic_widget.canvas.print_figure(str(self.file_name)) | This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog. | Below is the the instruction that describes the task:
### Input:
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
### Response:
def save_figure_as(self):
"""
This function programs the button to save the figure displayed
and save it in a png file where you want / with the name you want thanks to a file dialog.
"""
self.file_name = QtGui.QFileDialog.getSaveFileName()
self.file_name = self.file_name + ".png"
self.ui.graphic_widget.canvas.print_figure(str(self.file_name)) |
def insert_symbol_pushpop(self):
"""
For each stack symbol t E G, we look for a pair of states, qi and qj,
such that the PDA in state qi can read some input a E S and push t
on the stack and in state state qj can read some input b E S and pop t
off the stack. In that case, we add the rule Aik -> a Alj b
where (ql,t) E d(qi,a,e) and (qk,e) E d(qj,b,t).
"""
for state_a in self.statediag:
if state_a.type == 1:
found = 0
for state_b in self.statediag:
if state_b.type == 2 and state_b.sym == state_a.sym:
found = 1
for j in state_a.trans:
if state_a.trans[j] == [0]:
read_a = ''
else:
new = []
for selected_transition in state_a.trans[j]:
if selected_transition == ' ':
new.append('&')
else:
new.append(selected_transition)
read_a = " | ".join(new)
for i in state_b.trans:
if state_b.trans[i] == [0]:
read_b = ''
else:
new = []
for selected_transition in state_b.trans[i]:
if selected_transition == ' ':
new.append('&')
else:
new.append(selected_transition)
read_b = " | ".join(new)
self.rules.append(
'A' + repr(state_a.id)
+ ',' + repr(i)
+ ':' + read_a
+ ' A' + repr(j)
+ ',' + repr(state_b.id)
+ ' ' + read_b)
if found == 0:
# A special case is required for State 2, where the POPed symbols
# are part of the transitions array and not defined for "sym" variable.
for state_b in self.statediag:
if state_b.type == 2 and state_b.sym == 0:
for i in state_b.trans:
if state_a.sym in state_b.trans[i]:
for j in state_a.trans:
if state_a.trans[j] == [0]:
read_a = ''
else:
read_a = " | ".join(
state_a.trans[j])
self.rules.append(
'A' + repr(state_a.id)
+ ',' + repr(i)
+ ':' + read_a
+ ' A' + repr(j)
+ ',' + repr(state_b.id))
# print
# 'A'+`state_a.id`+','+`i`+':'+read_a+'
# A'+`j`+','+`state_b.id`
found = 1
if found == 0:
print "ERROR: symbol " + repr(state_a.sym) \
+ ". It was not found anywhere in the graph." | For each stack symbol t E G, we look for a pair of states, qi and qj,
such that the PDA in state qi can read some input a E S and push t
on the stack and in state state qj can read some input b E S and pop t
off the stack. In that case, we add the rule Aik -> a Alj b
where (ql,t) E d(qi,a,e) and (qk,e) E d(qj,b,t). | Below is the the instruction that describes the task:
### Input:
For each stack symbol t E G, we look for a pair of states, qi and qj,
such that the PDA in state qi can read some input a E S and push t
on the stack and in state state qj can read some input b E S and pop t
off the stack. In that case, we add the rule Aik -> a Alj b
where (ql,t) E d(qi,a,e) and (qk,e) E d(qj,b,t).
### Response:
def insert_symbol_pushpop(self):
"""
For each stack symbol t E G, we look for a pair of states, qi and qj,
such that the PDA in state qi can read some input a E S and push t
on the stack and in state state qj can read some input b E S and pop t
off the stack. In that case, we add the rule Aik -> a Alj b
where (ql,t) E d(qi,a,e) and (qk,e) E d(qj,b,t).
"""
for state_a in self.statediag:
if state_a.type == 1:
found = 0
for state_b in self.statediag:
if state_b.type == 2 and state_b.sym == state_a.sym:
found = 1
for j in state_a.trans:
if state_a.trans[j] == [0]:
read_a = ''
else:
new = []
for selected_transition in state_a.trans[j]:
if selected_transition == ' ':
new.append('&')
else:
new.append(selected_transition)
read_a = " | ".join(new)
for i in state_b.trans:
if state_b.trans[i] == [0]:
read_b = ''
else:
new = []
for selected_transition in state_b.trans[i]:
if selected_transition == ' ':
new.append('&')
else:
new.append(selected_transition)
read_b = " | ".join(new)
self.rules.append(
'A' + repr(state_a.id)
+ ',' + repr(i)
+ ':' + read_a
+ ' A' + repr(j)
+ ',' + repr(state_b.id)
+ ' ' + read_b)
if found == 0:
# A special case is required for State 2, where the POPed symbols
# are part of the transitions array and not defined for "sym" variable.
for state_b in self.statediag:
if state_b.type == 2 and state_b.sym == 0:
for i in state_b.trans:
if state_a.sym in state_b.trans[i]:
for j in state_a.trans:
if state_a.trans[j] == [0]:
read_a = ''
else:
read_a = " | ".join(
state_a.trans[j])
self.rules.append(
'A' + repr(state_a.id)
+ ',' + repr(i)
+ ':' + read_a
+ ' A' + repr(j)
+ ',' + repr(state_b.id))
# print
# 'A'+`state_a.id`+','+`i`+':'+read_a+'
# A'+`j`+','+`state_b.id`
found = 1
if found == 0:
print "ERROR: symbol " + repr(state_a.sym) \
+ ". It was not found anywhere in the graph." |
def convexHull(self,
geometries,
sr=None):
"""
The convexHull operation is performed on a geometry service resource.
It returns the convex hull of the input geometry. The input geometry can
be a point, multipoint, polyline, or polygon. The convex hull is typically
a polygon but can also be a polyline or point in degenerate cases.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
sr - spatial reference of the input geometries WKID.
"""
url = self._url + "/convexHull"
params = {
"f" : "json"
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if sr is not None:
params['sr'] = sr
else:
params['sr'] = g._wkid
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | The convexHull operation is performed on a geometry service resource.
It returns the convex hull of the input geometry. The input geometry can
be a point, multipoint, polyline, or polygon. The convex hull is typically
a polygon but can also be a polyline or point in degenerate cases.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
sr - spatial reference of the input geometries WKID. | Below is the the instruction that describes the task:
### Input:
The convexHull operation is performed on a geometry service resource.
It returns the convex hull of the input geometry. The input geometry can
be a point, multipoint, polyline, or polygon. The convex hull is typically
a polygon but can also be a polyline or point in degenerate cases.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
sr - spatial reference of the input geometries WKID.
### Response:
def convexHull(self,
geometries,
sr=None):
"""
The convexHull operation is performed on a geometry service resource.
It returns the convex hull of the input geometry. The input geometry can
be a point, multipoint, polyline, or polygon. The convex hull is typically
a polygon but can also be a polyline or point in degenerate cases.
Inputs:
geometries - array of geometries (structured as JSON geometry
objects returned by the ArcGIS REST API).
sr - spatial reference of the input geometries WKID.
"""
url = self._url + "/convexHull"
params = {
"f" : "json"
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if sr is not None:
params['sr'] = sr
else:
params['sr'] = g._wkid
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) |
def ac_factory(path=""):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path)
for fil in os.listdir(path):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item,
dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module(".%s" % typ, "saml2.attributemaps")
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
return acs | Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance | Below is the the instruction that describes the task:
### Input:
Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
### Response:
def ac_factory(path=""):
"""Attribute Converter factory
:param path: The path to a directory where the attribute maps are expected
to reside.
:return: A AttributeConverter instance
"""
acs = []
if path:
if path not in sys.path:
sys.path.insert(0, path)
for fil in os.listdir(path):
if fil.endswith(".py"):
mod = import_module(fil[:-3])
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item,
dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
else:
from saml2 import attributemaps
for typ in attributemaps.__all__:
mod = import_module(".%s" % typ, "saml2.attributemaps")
for key, item in mod.__dict__.items():
if key.startswith("__"):
continue
if isinstance(item, dict) and "to" in item and "fro" in item:
atco = AttributeConverter(item["identifier"])
atco.from_dict(item)
acs.append(atco)
return acs |
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# First, determine the length we're looking for
length = state.length
if length is None:
# Try decoding a length from the data buffer
length = self.decode_length(data, state)
# Now, is there enough data?
if len(data) < length:
state.length = length
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:length])
del data[:length]
# Update the state
state.length = None
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | Below is the the instruction that describes the task:
### Input:
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
### Response:
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# First, determine the length we're looking for
length = state.length
if length is None:
# Try decoding a length from the data buffer
length = self.decode_length(data, state)
# Now, is there enough data?
if len(data) < length:
state.length = length
raise exc.NoFrames()
# Extract the frame
frame = six.binary_type(data[:length])
del data[:length]
# Update the state
state.length = None
# Return the frame
return frame |
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e) | Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators. | Below is the the instruction that describes the task:
### Input:
Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
### Response:
def first_true(iterable, func):
# type: (Iterable[T], Callable) -> T
"""" Return the first item of the iterable for which func(item) == True.
Or raises IndexError.
WARNING: this will consume generators.
"""
try:
return next((x for x in iterable if func(x)))
except StopIteration as e:
# TODO: Find a better error message
raise_from(IndexError('No match for %s' % func), e) |
def _remove_bcbiovm_path():
"""Avoid referencing minimal bcbio_nextgen in bcbio_vm installation.
"""
cur_path = os.path.dirname(os.path.realpath(sys.executable))
paths = os.environ["PATH"].split(":")
if cur_path in paths:
paths.remove(cur_path)
os.environ["PATH"] = ":".join(paths) | Avoid referencing minimal bcbio_nextgen in bcbio_vm installation. | Below is the the instruction that describes the task:
### Input:
Avoid referencing minimal bcbio_nextgen in bcbio_vm installation.
### Response:
def _remove_bcbiovm_path():
"""Avoid referencing minimal bcbio_nextgen in bcbio_vm installation.
"""
cur_path = os.path.dirname(os.path.realpath(sys.executable))
paths = os.environ["PATH"].split(":")
if cur_path in paths:
paths.remove(cur_path)
os.environ["PATH"] = ":".join(paths) |
def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) | Restore publish from config file | Below is the the instruction that describes the task:
### Input:
Restore publish from config file
### Response:
def restore_publish(self, config, components, recreate=False):
"""
Restore publish from config file
"""
if "all" in components:
components = []
try:
self.load()
publish = True
except NoSuchPublish:
publish = False
new_publish_snapshots = []
to_publish = []
created_snapshots = []
for saved_component in config.get('components', []):
component_name = saved_component.get('component')
if not component_name:
raise Exception("Corrupted file")
if components and component_name not in components:
continue
saved_packages = []
if not saved_component.get('packages'):
raise Exception("Component %s is empty" % component_name)
for package in saved_component.get('packages'):
package_ref = '{} {} {} {}'.format(package.get('arch'), package.get('package'), package.get('version'), package.get('ref'))
saved_packages.append(package_ref)
to_publish.append(component_name)
timestamp = time.strftime("%Y%m%d%H%M%S")
snapshot_name = '{}-{}-{}'.format("restored", timestamp, saved_component.get('snapshot'))
lg.debug("Creating snapshot %s for component %s of packages: %s"
% (snapshot_name, component_name, saved_packages))
try:
self.client.do_post(
'/snapshots',
data={
'Name': snapshot_name,
'SourceSnapshots': [],
'Description': saved_component.get('description'),
'PackageRefs': saved_packages,
}
)
created_snapshots.append(snapshot_name)
except AptlyException as e:
if e.res.status_code == 404:
# delete all the previously created
# snapshots because the file is corrupted
self._remove_snapshots(created_snapshots)
raise Exception("Source snapshot or packages don't exist")
else:
raise
new_publish_snapshots.append({
'Component': component_name,
'Name': snapshot_name
})
if components:
self.publish_snapshots = [x for x in self.publish_snapshots if x['Component'] not in components and x['Component'] not in to_publish]
check_components = [x for x in new_publish_snapshots if x['Component'] in components]
if len(check_components) != len(components):
self._remove_snapshots(created_snapshots)
raise Exception("Not possible to find all the components required in the backup file")
self.publish_snapshots += new_publish_snapshots
self.do_publish(recreate=recreate, merge_snapshots=False) |
def arbiter(**params):
'''Obtain the ``arbiter``.
It returns the arbiter instance only if we are on the arbiter
context domain, otherwise it returns nothing.
'''
arbiter = get_actor()
if arbiter is None:
# Create the arbiter
return set_actor(_spawn_actor('arbiter', None, **params))
elif arbiter.is_arbiter():
return arbiter | Obtain the ``arbiter``.
It returns the arbiter instance only if we are on the arbiter
context domain, otherwise it returns nothing. | Below is the the instruction that describes the task:
### Input:
Obtain the ``arbiter``.
It returns the arbiter instance only if we are on the arbiter
context domain, otherwise it returns nothing.
### Response:
def arbiter(**params):
'''Obtain the ``arbiter``.
It returns the arbiter instance only if we are on the arbiter
context domain, otherwise it returns nothing.
'''
arbiter = get_actor()
if arbiter is None:
# Create the arbiter
return set_actor(_spawn_actor('arbiter', None, **params))
elif arbiter.is_arbiter():
return arbiter |
def play(self, board):
""" uct tree search """
color = board.color
node = self
path = [node]
while True:
pos = node.select(board)
if pos == PASS:
break
board.move(pos)
child = node.pos_child[pos]
if not child:
child = node.pos_child[pos] = UCTNode()
child.unexplored = board.useful_moves()
child.pos = pos
child.parent = node
path.append(child)
break
path.append(child)
node = child
self.random_playout(board)
self.update_path(board, color, path) | uct tree search | Below is the the instruction that describes the task:
### Input:
uct tree search
### Response:
def play(self, board):
""" uct tree search """
color = board.color
node = self
path = [node]
while True:
pos = node.select(board)
if pos == PASS:
break
board.move(pos)
child = node.pos_child[pos]
if not child:
child = node.pos_child[pos] = UCTNode()
child.unexplored = board.useful_moves()
child.pos = pos
child.parent = node
path.append(child)
break
path.append(child)
node = child
self.random_playout(board)
self.update_path(board, color, path) |
def oedit(self, key):
"""Edit item"""
data = self.model.get_data()
from spyder.plugins.variableexplorer.widgets.objecteditor import (
oedit)
oedit(data[key]) | Edit item | Below is the the instruction that describes the task:
### Input:
Edit item
### Response:
def oedit(self, key):
"""Edit item"""
data = self.model.get_data()
from spyder.plugins.variableexplorer.widgets.objecteditor import (
oedit)
oedit(data[key]) |
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False,
size_for_gist=MAX_URL_LEN):
"""
Returns the URL to open given the domain and contents.
If the file contents are large, an anonymous gist will be created.
Parameters
----------
contents
* string - assumed to be GeoJSON
* an object that implements __geo_interface__
A FeatureCollection will be constructed with one feature,
the object.
* a sequence of objects that each implement __geo_interface__
A FeatureCollection will be constructed with the objects
as the features
domain - string, default http://geojson.io
force_gist - force gist creation regardless of file size.
For more information about __geo_interface__ see:
https://gist.github.com/sgillies/2217756
If the contents are large, then a gist will be created.
"""
contents = make_geojson(contents)
if len(contents) <= size_for_gist and not force_gist:
url = data_url(contents, domain)
else:
gist = _make_gist(contents)
url = gist_url(gist.id, domain)
return url | Returns the URL to open given the domain and contents.
If the file contents are large, an anonymous gist will be created.
Parameters
----------
contents
* string - assumed to be GeoJSON
* an object that implements __geo_interface__
A FeatureCollection will be constructed with one feature,
the object.
* a sequence of objects that each implement __geo_interface__
A FeatureCollection will be constructed with the objects
as the features
domain - string, default http://geojson.io
force_gist - force gist creation regardless of file size.
For more information about __geo_interface__ see:
https://gist.github.com/sgillies/2217756
If the contents are large, then a gist will be created. | Below is the the instruction that describes the task:
### Input:
Returns the URL to open given the domain and contents.
If the file contents are large, an anonymous gist will be created.
Parameters
----------
contents
* string - assumed to be GeoJSON
* an object that implements __geo_interface__
A FeatureCollection will be constructed with one feature,
the object.
* a sequence of objects that each implement __geo_interface__
A FeatureCollection will be constructed with the objects
as the features
domain - string, default http://geojson.io
force_gist - force gist creation regardless of file size.
For more information about __geo_interface__ see:
https://gist.github.com/sgillies/2217756
If the contents are large, then a gist will be created.
### Response:
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False,
size_for_gist=MAX_URL_LEN):
"""
Returns the URL to open given the domain and contents.
If the file contents are large, an anonymous gist will be created.
Parameters
----------
contents
* string - assumed to be GeoJSON
* an object that implements __geo_interface__
A FeatureCollection will be constructed with one feature,
the object.
* a sequence of objects that each implement __geo_interface__
A FeatureCollection will be constructed with the objects
as the features
domain - string, default http://geojson.io
force_gist - force gist creation regardless of file size.
For more information about __geo_interface__ see:
https://gist.github.com/sgillies/2217756
If the contents are large, then a gist will be created.
"""
contents = make_geojson(contents)
if len(contents) <= size_for_gist and not force_gist:
url = data_url(contents, domain)
else:
gist = _make_gist(contents)
url = gist_url(gist.id, domain)
return url |
def DRAGONS(flat=False, extras=True):
"""DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2292
omega_b_0 = 0.0458
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.725,
'h': 0.702,
'n': 0.963,
'sigma_8': 0.816,
'tau': 0.088,
'z_reion': 10.6,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo | DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24. | Below is the the instruction that describes the task:
### Input:
DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
### Response:
def DRAGONS(flat=False, extras=True):
"""DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from
Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)
Parameters
----------
flat: boolean
If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0
= 0 exactly. Also sets omega_k_0 = 0 explicitly.
extras: boolean
If True, sets neutrino number N_nu = 0, neutrino density
omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.
"""
omega_c_0 = 0.2292
omega_b_0 = 0.0458
cosmo = {'omega_b_0': omega_b_0,
'omega_M_0': omega_b_0 + omega_c_0,
'omega_lambda_0': 0.725,
'h': 0.702,
'n': 0.963,
'sigma_8': 0.816,
'tau': 0.088,
'z_reion': 10.6,
't_0': 13.76,
}
if flat:
cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']
cosmo['omega_k_0'] = 0.0
if extras:
add_extras(cosmo)
return cosmo |
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"operation": self.operation, "title": self.title,
"xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(),
"units": self.units.as_dict(), "scf": self.scf.as_dict(),
"geo": self.geo.as_dict(),
"others": [k.as_dict() for k in self.other_directives]} | A JSON serializable dict representation of self. | Below is the the instruction that describes the task:
### Input:
A JSON serializable dict representation of self.
### Response:
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"operation": self.operation, "title": self.title,
"xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(),
"units": self.units.as_dict(), "scf": self.scf.as_dict(),
"geo": self.geo.as_dict(),
"others": [k.as_dict() for k in self.other_directives]} |
def run(self, target, payload, instance=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
"""
response = requests.post(
url=target,
data=json.dumps(payload, cls=DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
if response.status_code == 410 and hook_id:
HookModel = get_hook_model()
hook = HookModel.object.get(id=hook_id)
hook.delete() | target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing) | Below is the the instruction that describes the task:
### Input:
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
### Response:
def run(self, target, payload, instance=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
"""
response = requests.post(
url=target,
data=json.dumps(payload, cls=DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
if response.status_code == 410 and hook_id:
HookModel = get_hook_model()
hook = HookModel.object.get(id=hook_id)
hook.delete() |
def delete_subscription(self, subscription_id):
"""DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
self._send(http_method='DELETE',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='5.0',
route_values=route_values) | DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription. | Below is the the instruction that describes the task:
### Input:
DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
### Response:
def delete_subscription(self, subscription_id):
"""DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
self._send(http_method='DELETE',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='5.0',
route_values=route_values) |
def rate(base, target, error_log=None):
"""Get current exchange rate.
:param base: A base currency
:param target: Convert to the target currency
:param error_log: A callable function to track the exception
It parses current exchange rate from these services:
1) Yahoo finance
2) fixer.io
3) European Central Bank
It will fallback to the next service when previous not available.
The exchane rate is a decimal number. If `None` is returned, it means
the parsing goes wrong::
>>> import exchange
>>> exchange.rate('USD', 'CNY')
Decimal('6.2045')
"""
if base == target:
return decimal.Decimal(1.00)
services = [yahoo, fixer, ecb]
if error_log is None:
error_log = _error_log
for fn in services:
try:
return fn(base, target)
except Exception as e:
error_log(e)
return None | Get current exchange rate.
:param base: A base currency
:param target: Convert to the target currency
:param error_log: A callable function to track the exception
It parses current exchange rate from these services:
1) Yahoo finance
2) fixer.io
3) European Central Bank
It will fallback to the next service when previous not available.
The exchane rate is a decimal number. If `None` is returned, it means
the parsing goes wrong::
>>> import exchange
>>> exchange.rate('USD', 'CNY')
Decimal('6.2045') | Below is the the instruction that describes the task:
### Input:
Get current exchange rate.
:param base: A base currency
:param target: Convert to the target currency
:param error_log: A callable function to track the exception
It parses current exchange rate from these services:
1) Yahoo finance
2) fixer.io
3) European Central Bank
It will fallback to the next service when previous not available.
The exchane rate is a decimal number. If `None` is returned, it means
the parsing goes wrong::
>>> import exchange
>>> exchange.rate('USD', 'CNY')
Decimal('6.2045')
### Response:
def rate(base, target, error_log=None):
"""Get current exchange rate.
:param base: A base currency
:param target: Convert to the target currency
:param error_log: A callable function to track the exception
It parses current exchange rate from these services:
1) Yahoo finance
2) fixer.io
3) European Central Bank
It will fallback to the next service when previous not available.
The exchane rate is a decimal number. If `None` is returned, it means
the parsing goes wrong::
>>> import exchange
>>> exchange.rate('USD', 'CNY')
Decimal('6.2045')
"""
if base == target:
return decimal.Decimal(1.00)
services = [yahoo, fixer, ecb]
if error_log is None:
error_log = _error_log
for fn in services:
try:
return fn(base, target)
except Exception as e:
error_log(e)
return None |
def write(self, b):
"""Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError(
"input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._buf.write(b)
self._total_bytes += len(b)
if self._buf.tell() >= self._min_part_size:
self._upload_next_part()
return len(b) | Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away. | Below is the the instruction that describes the task:
### Input:
Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away.
### Response:
def write(self, b):
"""Write the given bytes (binary string) to the S3 file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError(
"input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._buf.write(b)
self._total_bytes += len(b)
if self._buf.tell() >= self._min_part_size:
self._upload_next_part()
return len(b) |
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response | Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error. | Below is the the instruction that describes the task:
### Input:
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
### Response:
def search(self, query, **kwargs):
"""
Cruddy provides a limited but useful interface to search GSI indexes in
DynamoDB with the following limitations (hopefully some of these will
be expanded or eliminated in the future.
* The GSI must be configured with a only HASH and not a RANGE.
* The only operation supported in the query is equality
To use the ``search`` operation you must pass in a query string of this
form:
<attribute_name>=<value>
As stated above, the only operation currently supported is equality (=)
but other operations will be added over time. Also, the
``attribute_name`` must be an attribute which is configured as the
``HASH`` of a GSI in the DynamoDB table. If all of the above
conditions are met, the ``query`` operation will return a list
(possibly empty) of all items matching the query and the ``status`` of
the response will be ``success``. Otherwise, the ``status`` will be
``error`` and the ``error_type`` and ``error_message`` will provide
further information about the error.
"""
response = self._new_response()
if self._check_supported_op('search', response):
if '=' not in query:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Only the = operation is supported'
response.error_message = msg
else:
key, value = query.split('=')
if key not in self._indexes:
response.status = 'error'
response.error_type = 'InvalidQuery'
msg = 'Attribute {} is not indexed'.format(key)
response.error_message = msg
else:
params = {'KeyConditionExpression': Key(key).eq(value)}
index_name = self._indexes[key]
if index_name:
params['IndexName'] = index_name
pe = kwargs.get('projection_expression')
if pe:
params['ProjectionExpression'] = pe
self._call_ddb_method(self.table.query,
params, response)
if response.status == 'success':
response.data = self._replace_decimals(
response.raw_response['Items'])
response.prepare()
return response |
def activate(self, asset):
'''Request activation of the specified asset representation.
Asset representations are obtained from :py:meth:`get_assets`.
:param request dict: An asset representation from the API.
:returns: :py:class:`planet.api.models.Body` with no response content
:raises planet.api.exceptions.APIException: On API error.
'''
activate_url = asset['_links']['activate']
return self._get(activate_url, body_type=models.Body).get_body() | Request activation of the specified asset representation.
Asset representations are obtained from :py:meth:`get_assets`.
:param request dict: An asset representation from the API.
:returns: :py:class:`planet.api.models.Body` with no response content
:raises planet.api.exceptions.APIException: On API error. | Below is the the instruction that describes the task:
### Input:
Request activation of the specified asset representation.
Asset representations are obtained from :py:meth:`get_assets`.
:param request dict: An asset representation from the API.
:returns: :py:class:`planet.api.models.Body` with no response content
:raises planet.api.exceptions.APIException: On API error.
### Response:
def activate(self, asset):
'''Request activation of the specified asset representation.
Asset representations are obtained from :py:meth:`get_assets`.
:param request dict: An asset representation from the API.
:returns: :py:class:`planet.api.models.Body` with no response content
:raises planet.api.exceptions.APIException: On API error.
'''
activate_url = asset['_links']['activate']
return self._get(activate_url, body_type=models.Body).get_body() |
def universal_transformer_depthwise_attention(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps.
"""
_, inputs, memory = layer_inputs
all_states = memory
# add depth signal
if hparams.depth_embedding:
all_states = add_depth_embedding(all_states)
# get the states up to the current step (non-zero part of the memory)
states_so_far = all_states[:step, :, :, :]
states_so_far_weights = tf.nn.softmax(
common_layers.dense(
states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1),
activation=None,
use_bias=True),
axis=-1)
# prepare the state tensor that will be transformed
state_to_be_transformed = tf.reduce_sum(
(states_so_far * states_so_far_weights), axis=0)
new_state = step_preprocess(state_to_be_transformed, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
# add the new state to the memory
memory = fill_memory_slot(memory, new_state, step + 1)
return new_state, inputs, memory | universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps. | Below is the the instruction that describes the task:
### Input:
universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps.
### Response:
def universal_transformer_depthwise_attention(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps.
"""
_, inputs, memory = layer_inputs
all_states = memory
# add depth signal
if hparams.depth_embedding:
all_states = add_depth_embedding(all_states)
# get the states up to the current step (non-zero part of the memory)
states_so_far = all_states[:step, :, :, :]
states_so_far_weights = tf.nn.softmax(
common_layers.dense(
states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1),
activation=None,
use_bias=True),
axis=-1)
# prepare the state tensor that will be transformed
state_to_be_transformed = tf.reduce_sum(
(states_so_far * states_so_far_weights), axis=0)
new_state = step_preprocess(state_to_be_transformed, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
# add the new state to the memory
memory = fill_memory_slot(memory, new_state, step + 1)
return new_state, inputs, memory |
def main():
"""
Run ftfy as a command-line utility.
"""
import argparse
parser = argparse.ArgumentParser(
description="ftfy (fixes text for you), version %s" % __version__
)
parser.add_argument('filename', default='-', nargs='?',
help='The file whose Unicode is to be fixed. Defaults '
'to -, meaning standard input.')
parser.add_argument('-o', '--output', type=str, default='-',
help='The file to output to. Defaults to -, meaning '
'standard output.')
parser.add_argument('-g', '--guess', action='store_true',
help="Ask ftfy to guess the encoding of your input. "
"This is risky. Overrides -e.")
parser.add_argument('-e', '--encoding', type=str, default='utf-8',
help='The encoding of the input. Defaults to UTF-8.')
parser.add_argument('-n', '--normalization', type=str, default='NFC',
help='The normalization of Unicode to apply. '
'Defaults to NFC. Can be "none".')
parser.add_argument('--preserve-entities', action='store_true',
help="Leave HTML entities as they are. The default "
"is to decode them, as long as no HTML tags "
"have appeared in the file.")
args = parser.parse_args()
encoding = args.encoding
if args.guess:
encoding = None
if args.filename == '-':
# Get a standard input stream made of bytes, so we can decode it as
# whatever encoding is necessary.
file = sys.stdin.buffer
else:
file = open(args.filename, 'rb')
if args.output == '-':
outfile = sys.stdout
else:
if os.path.realpath(args.output) == os.path.realpath(args.filename):
sys.stderr.write(SAME_FILE_ERROR_TEXT)
sys.exit(1)
outfile = open(args.output, 'w', encoding='utf-8')
normalization = args.normalization
if normalization.lower() == 'none':
normalization = None
if args.preserve_entities:
fix_entities = False
else:
fix_entities = 'auto'
try:
for line in fix_file(file, encoding=encoding,
fix_entities=fix_entities,
normalization=normalization):
try:
outfile.write(line)
except UnicodeEncodeError:
if sys.platform == 'win32':
sys.stderr.write(ENCODE_ERROR_TEXT_WINDOWS)
else:
sys.stderr.write(ENCODE_ERROR_TEXT_UNIX)
sys.exit(1)
except UnicodeDecodeError as err:
sys.stderr.write(DECODE_ERROR_TEXT % (encoding, err))
sys.exit(1) | Run ftfy as a command-line utility. | Below is the the instruction that describes the task:
### Input:
Run ftfy as a command-line utility.
### Response:
def main():
"""
Run ftfy as a command-line utility.
"""
import argparse
parser = argparse.ArgumentParser(
description="ftfy (fixes text for you), version %s" % __version__
)
parser.add_argument('filename', default='-', nargs='?',
help='The file whose Unicode is to be fixed. Defaults '
'to -, meaning standard input.')
parser.add_argument('-o', '--output', type=str, default='-',
help='The file to output to. Defaults to -, meaning '
'standard output.')
parser.add_argument('-g', '--guess', action='store_true',
help="Ask ftfy to guess the encoding of your input. "
"This is risky. Overrides -e.")
parser.add_argument('-e', '--encoding', type=str, default='utf-8',
help='The encoding of the input. Defaults to UTF-8.')
parser.add_argument('-n', '--normalization', type=str, default='NFC',
help='The normalization of Unicode to apply. '
'Defaults to NFC. Can be "none".')
parser.add_argument('--preserve-entities', action='store_true',
help="Leave HTML entities as they are. The default "
"is to decode them, as long as no HTML tags "
"have appeared in the file.")
args = parser.parse_args()
encoding = args.encoding
if args.guess:
encoding = None
if args.filename == '-':
# Get a standard input stream made of bytes, so we can decode it as
# whatever encoding is necessary.
file = sys.stdin.buffer
else:
file = open(args.filename, 'rb')
if args.output == '-':
outfile = sys.stdout
else:
if os.path.realpath(args.output) == os.path.realpath(args.filename):
sys.stderr.write(SAME_FILE_ERROR_TEXT)
sys.exit(1)
outfile = open(args.output, 'w', encoding='utf-8')
normalization = args.normalization
if normalization.lower() == 'none':
normalization = None
if args.preserve_entities:
fix_entities = False
else:
fix_entities = 'auto'
try:
for line in fix_file(file, encoding=encoding,
fix_entities=fix_entities,
normalization=normalization):
try:
outfile.write(line)
except UnicodeEncodeError:
if sys.platform == 'win32':
sys.stderr.write(ENCODE_ERROR_TEXT_WINDOWS)
else:
sys.stderr.write(ENCODE_ERROR_TEXT_UNIX)
sys.exit(1)
except UnicodeDecodeError as err:
sys.stderr.write(DECODE_ERROR_TEXT % (encoding, err))
sys.exit(1) |
def lastDF(symbols=None, token='', version=''):
'''Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(last(symbols, token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df | Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result | Below is the the instruction that describes the task:
### Input:
Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
### Response:
def lastDF(symbols=None, token='', version=''):
'''Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.io.json.json_normalize(last(symbols, token, version))
_toDatetime(df)
_reindex(df, 'symbol')
return df |
def char_diff(self, old, new):
"""
Return color-coded character-based diff between `old` and `new`.
"""
def color_transition(old_type, new_type):
new_color = termcolor.colored("", None, "on_red" if new_type ==
"-" else "on_green" if new_type == "+" else None)
return "{}{}".format(termcolor.RESET, new_color[:-len(termcolor.RESET)])
return self._char_diff(old, new, color_transition) | Return color-coded character-based diff between `old` and `new`. | Below is the the instruction that describes the task:
### Input:
Return color-coded character-based diff between `old` and `new`.
### Response:
def char_diff(self, old, new):
"""
Return color-coded character-based diff between `old` and `new`.
"""
def color_transition(old_type, new_type):
new_color = termcolor.colored("", None, "on_red" if new_type ==
"-" else "on_green" if new_type == "+" else None)
return "{}{}".format(termcolor.RESET, new_color[:-len(termcolor.RESET)])
return self._char_diff(old, new, color_transition) |
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None | Ignore roll over data and set to start. | Below is the the instruction that describes the task:
### Input:
Ignore roll over data and set to start.
### Response:
def hard_reset(self):
"""Ignore roll over data and set to start."""
if self.shuffle:
self._shuffle_data()
self.cursor = -self.batch_size
self._cache_data = None
self._cache_label = None |
def valid(self):
"""Return valid certificates."""
now = timezone.now()
return self.filter(revoked=False, expires__gt=now, valid_from__lt=now) | Return valid certificates. | Below is the the instruction that describes the task:
### Input:
Return valid certificates.
### Response:
def valid(self):
"""Return valid certificates."""
now = timezone.now()
return self.filter(revoked=False, expires__gt=now, valid_from__lt=now) |
def _from_json_object_hook(obj):
"""Converts a json string, where datetime and UUID objects were converted
into strings using the '_to_json_default', into a python object.
Usage:
simplejson.loads(data, object_hook=_from_json_object_hook)
"""
for key, value in obj.items():
# Check for datetime objects
if isinstance(value, str):
dt_result = datetime_regex.match(value)
if dt_result:
year, month, day, hour, minute, second = map(
lambda x: int(x), dt_result.groups())
obj[key] = datetime.datetime(
year, month, day, hour, minute, second)
else:
dt_result = uuid_regex.match(value)
if dt_result:
obj[key] = uuid.UUID(value)
return obj | Converts a json string, where datetime and UUID objects were converted
into strings using the '_to_json_default', into a python object.
Usage:
simplejson.loads(data, object_hook=_from_json_object_hook) | Below is the the instruction that describes the task:
### Input:
Converts a json string, where datetime and UUID objects were converted
into strings using the '_to_json_default', into a python object.
Usage:
simplejson.loads(data, object_hook=_from_json_object_hook)
### Response:
def _from_json_object_hook(obj):
"""Converts a json string, where datetime and UUID objects were converted
into strings using the '_to_json_default', into a python object.
Usage:
simplejson.loads(data, object_hook=_from_json_object_hook)
"""
for key, value in obj.items():
# Check for datetime objects
if isinstance(value, str):
dt_result = datetime_regex.match(value)
if dt_result:
year, month, day, hour, minute, second = map(
lambda x: int(x), dt_result.groups())
obj[key] = datetime.datetime(
year, month, day, hour, minute, second)
else:
dt_result = uuid_regex.match(value)
if dt_result:
obj[key] = uuid.UUID(value)
return obj |
def clear(self):
"""Clear the displayed image."""
self._imgobj = None
try:
# See if there is an image on the canvas
self.canvas.delete_object_by_tag(self._canvas_img_tag)
self.redraw()
except KeyError:
pass | Clear the displayed image. | Below is the the instruction that describes the task:
### Input:
Clear the displayed image.
### Response:
def clear(self):
"""Clear the displayed image."""
self._imgobj = None
try:
# See if there is an image on the canvas
self.canvas.delete_object_by_tag(self._canvas_img_tag)
self.redraw()
except KeyError:
pass |
def iter_gists(self, username=None, number=-1, etag=None):
"""If no username is specified, GET /gists, otherwise GET
/users/:username/gists
:param str login: (optional), login of the user to check
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
"""
if username:
url = self._build_url('users', username, 'gists')
else:
url = self._build_url('gists')
return self._iter(int(number), url, Gist, etag=etag) | If no username is specified, GET /gists, otherwise GET
/users/:username/gists
:param str login: (optional), login of the user to check
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s | Below is the the instruction that describes the task:
### Input:
If no username is specified, GET /gists, otherwise GET
/users/:username/gists
:param str login: (optional), login of the user to check
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
### Response:
def iter_gists(self, username=None, number=-1, etag=None):
"""If no username is specified, GET /gists, otherwise GET
/users/:username/gists
:param str login: (optional), login of the user to check
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
"""
if username:
url = self._build_url('users', username, 'gists')
else:
url = self._build_url('gists')
return self._iter(int(number), url, Gist, etag=etag) |
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json) | View that returns the contents of version.json or a 404. | Below is the the instruction that describes the task:
### Input:
View that returns the contents of version.json or a 404.
### Response:
def _version_view(self):
"""
View that returns the contents of version.json or a 404.
"""
version_json = self._version_callback(self.version_path)
if version_json is None:
return 'version.json not found', 404
else:
return jsonify(version_json) |
def get_label(self, prop, value):
"""
Format label
If value is missing, label will be colored red
"""
if value is None:
return '{}: <FONT color="red">{}</FONT>'.format(prop, "not set")
else:
return "{}:{}".format(prop, value) | Format label
If value is missing, label will be colored red | Below is the the instruction that describes the task:
### Input:
Format label
If value is missing, label will be colored red
### Response:
def get_label(self, prop, value):
"""
Format label
If value is missing, label will be colored red
"""
if value is None:
return '{}: <FONT color="red">{}</FONT>'.format(prop, "not set")
else:
return "{}:{}".format(prop, value) |
def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum) | Returns name of temp dir for given url. | Below is the the instruction that describes the task:
### Input:
Returns name of temp dir for given url.
### Response:
def get_dl_dirname(url):
"""Returns name of temp dir for given url."""
checksum = hashlib.sha256(tf.compat.as_bytes(url)).hexdigest()
return get_dl_fname(url, checksum) |
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events
assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data) | :calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None | Below is the the instruction that describes the task:
### Input:
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
### Response:
def edit(self, name, config, events=github.GithubObject.NotSet, add_events=github.GithubObject.NotSet, remove_events=github.GithubObject.NotSet, active=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/hooks/:id <http://developer.github.com/v3/repos/hooks>`_
:param name: string
:param config: dict
:param events: list of string
:param add_events: list of string
:param remove_events: list of string
:param active: bool
:rtype: None
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(config, dict), config
assert events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in events), events
assert add_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_events), add_events
assert remove_events is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_events), remove_events
assert active is github.GithubObject.NotSet or isinstance(active, bool), active
post_parameters = {
"name": name,
"config": config,
}
if events is not github.GithubObject.NotSet:
post_parameters["events"] = events
if add_events is not github.GithubObject.NotSet:
post_parameters["add_events"] = add_events
if remove_events is not github.GithubObject.NotSet:
post_parameters["remove_events"] = remove_events
if active is not github.GithubObject.NotSet:
post_parameters["active"] = active
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data) |
def import_file(name, path):
"""
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
"""
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod | Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path | Below is the the instruction that describes the task:
### Input:
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
### Response:
def import_file(name, path):
"""
Import a file given a raw file path.
:param name: Name of module to be imported
:type name: str
:param path: Path to Python file
:type path: str / Path
"""
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod |
def glymur_config():
"""
Try to ascertain locations of openjp2, openjpeg libraries.
Returns
-------
tuple
tuple of library handles
"""
handles = (load_openjpeg_library(x) for x in ['openjp2', 'openjpeg'])
handles = tuple(handles)
if all(handle is None for handle in handles):
msg = "Neither the openjp2 nor the openjpeg library could be loaded. "
warnings.warn(msg)
return handles | Try to ascertain locations of openjp2, openjpeg libraries.
Returns
-------
tuple
tuple of library handles | Below is the the instruction that describes the task:
### Input:
Try to ascertain locations of openjp2, openjpeg libraries.
Returns
-------
tuple
tuple of library handles
### Response:
def glymur_config():
"""
Try to ascertain locations of openjp2, openjpeg libraries.
Returns
-------
tuple
tuple of library handles
"""
handles = (load_openjpeg_library(x) for x in ['openjp2', 'openjpeg'])
handles = tuple(handles)
if all(handle is None for handle in handles):
msg = "Neither the openjp2 nor the openjpeg library could be loaded. "
warnings.warn(msg)
return handles |
def register(self, name, namespace):
"""Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered.
"""
if name in self._NAMESPACES:
raise ValueError("Namespace {0} already exists.".format(name))
if not isinstance(namespace, ns.Namespace):
raise TypeError("Namespaces must be of type Namespace.")
self._NAMESPACES[name] = namespace | Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered. | Below is the the instruction that describes the task:
### Input:
Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered.
### Response:
def register(self, name, namespace):
"""Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered.
"""
if name in self._NAMESPACES:
raise ValueError("Namespace {0} already exists.".format(name))
if not isinstance(namespace, ns.Namespace):
raise TypeError("Namespaces must be of type Namespace.")
self._NAMESPACES[name] = namespace |
def add_compute(self, kind=compute.phoebe, **kwargs):
"""
Add a set of computeoptions for a given backend to the bundle.
The label ('compute') can then be sent to :meth:`run_compute`.
If not provided, 'compute' will be created for you and can be
accessed by the 'compute' attribute of the returned
ParameterSet.
Available kinds include:
* :func:`phoebe.parameters.compute.phoebe`
* :func:`phoebe.parameters.compute.legacy`
* :func:`phoebe.parameters.compute.photodynam`
* :func:`phoebe.parameters.compute.jktebop`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.compute` module
:type kind: str or callable
:parameter str compute: (optional) name of the newly-created
compute optins
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(_compute, kind)
kwargs.setdefault('compute',
self._default_label(func.func_name,
**{'context': 'compute',
'kind': func.func_name}))
self._check_label(kwargs['compute'])
params = func(**kwargs)
# TODO: similar kwargs logic as in add_dataset (option to pass dict to
# apply to different components this would be more complicated here if
# allowing to also pass to different datasets
metawargs = {'context': 'compute',
'kind': func.func_name,
'compute': kwargs['compute']}
logger.info("adding {} '{}' compute to bundle".format(metawargs['kind'], metawargs['compute']))
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_compute',
redo_kwargs=redo_kwargs,
undo_func='remove_compute',
undo_kwargs={'compute': kwargs['compute']})
# since we've already processed (so that we can get the new qualifiers),
# we'll only raise a warning
self._kwargs_checks(kwargs, warning_only=True)
return self.get_compute(**metawargs) | Add a set of computeoptions for a given backend to the bundle.
The label ('compute') can then be sent to :meth:`run_compute`.
If not provided, 'compute' will be created for you and can be
accessed by the 'compute' attribute of the returned
ParameterSet.
Available kinds include:
* :func:`phoebe.parameters.compute.phoebe`
* :func:`phoebe.parameters.compute.legacy`
* :func:`phoebe.parameters.compute.photodynam`
* :func:`phoebe.parameters.compute.jktebop`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.compute` module
:type kind: str or callable
:parameter str compute: (optional) name of the newly-created
compute optins
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented | Below is the the instruction that describes the task:
### Input:
Add a set of computeoptions for a given backend to the bundle.
The label ('compute') can then be sent to :meth:`run_compute`.
If not provided, 'compute' will be created for you and can be
accessed by the 'compute' attribute of the returned
ParameterSet.
Available kinds include:
* :func:`phoebe.parameters.compute.phoebe`
* :func:`phoebe.parameters.compute.legacy`
* :func:`phoebe.parameters.compute.photodynam`
* :func:`phoebe.parameters.compute.jktebop`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.compute` module
:type kind: str or callable
:parameter str compute: (optional) name of the newly-created
compute optins
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
### Response:
def add_compute(self, kind=compute.phoebe, **kwargs):
"""
Add a set of computeoptions for a given backend to the bundle.
The label ('compute') can then be sent to :meth:`run_compute`.
If not provided, 'compute' will be created for you and can be
accessed by the 'compute' attribute of the returned
ParameterSet.
Available kinds include:
* :func:`phoebe.parameters.compute.phoebe`
* :func:`phoebe.parameters.compute.legacy`
* :func:`phoebe.parameters.compute.photodynam`
* :func:`phoebe.parameters.compute.jktebop`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.compute` module
:type kind: str or callable
:parameter str compute: (optional) name of the newly-created
compute optins
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(_compute, kind)
kwargs.setdefault('compute',
self._default_label(func.func_name,
**{'context': 'compute',
'kind': func.func_name}))
self._check_label(kwargs['compute'])
params = func(**kwargs)
# TODO: similar kwargs logic as in add_dataset (option to pass dict to
# apply to different components this would be more complicated here if
# allowing to also pass to different datasets
metawargs = {'context': 'compute',
'kind': func.func_name,
'compute': kwargs['compute']}
logger.info("adding {} '{}' compute to bundle".format(metawargs['kind'], metawargs['compute']))
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_compute',
redo_kwargs=redo_kwargs,
undo_func='remove_compute',
undo_kwargs={'compute': kwargs['compute']})
# since we've already processed (so that we can get the new qualifiers),
# we'll only raise a warning
self._kwargs_checks(kwargs, warning_only=True)
return self.get_compute(**metawargs) |
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color | Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated | Below is the the instruction that describes the task:
### Input:
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
### Response:
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color |
def generate_elements_clasification(bpmn_graph):
"""
:param bpmn_graph:
:return:
"""
nodes_classification = []
node_param_name = "node"
flow_param_name = "flow"
classification_param_name = "classification"
classification_element = "Element"
classification_join = "Join"
classification_split = "Split"
classification_start_event = "Start Event"
classification_end_event = "End Event"
task_list = bpmn_graph.get_nodes(consts.Consts.task)
for element in task_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
subprocess_list = bpmn_graph.get_nodes(consts.Consts.subprocess)
for element in subprocess_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
complex_gateway_list = bpmn_graph.get_nodes(consts.Consts.complex_gateway)
for element in complex_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
event_based_gateway_list = bpmn_graph.get_nodes(consts.Consts.event_based_gateway)
for element in event_based_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
inclusive_gateway_list = bpmn_graph.get_nodes(consts.Consts.inclusive_gateway)
for element in inclusive_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
exclusive_gateway_list = bpmn_graph.get_nodes(consts.Consts.exclusive_gateway)
for element in exclusive_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
parallel_gateway_list = bpmn_graph.get_nodes(consts.Consts.parallel_gateway)
for element in parallel_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
start_event_list = bpmn_graph.get_nodes(consts.Consts.start_event)
for element in start_event_list:
tmp = [classification_element, classification_start_event]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
intermediate_catch_event_list = bpmn_graph.get_nodes(consts.Consts.intermediate_catch_event)
for element in intermediate_catch_event_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
end_event_list = bpmn_graph.get_nodes(consts.Consts.end_event)
for element in end_event_list:
tmp = [classification_element, classification_end_event]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
intermediate_throw_event_list = bpmn_graph.get_nodes(consts.Consts.intermediate_throw_event)
for element in intermediate_throw_event_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
flows_classification = []
flows_list = bpmn_graph.get_flows()
for flow in flows_list:
flows_classification += [{flow_param_name: flow, classification_param_name: ["Flow"]}]
return nodes_classification, flows_classification | :param bpmn_graph:
:return: | Below is the the instruction that describes the task:
### Input:
:param bpmn_graph:
:return:
### Response:
def generate_elements_clasification(bpmn_graph):
"""
:param bpmn_graph:
:return:
"""
nodes_classification = []
node_param_name = "node"
flow_param_name = "flow"
classification_param_name = "classification"
classification_element = "Element"
classification_join = "Join"
classification_split = "Split"
classification_start_event = "Start Event"
classification_end_event = "End Event"
task_list = bpmn_graph.get_nodes(consts.Consts.task)
for element in task_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
subprocess_list = bpmn_graph.get_nodes(consts.Consts.subprocess)
for element in subprocess_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
complex_gateway_list = bpmn_graph.get_nodes(consts.Consts.complex_gateway)
for element in complex_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
event_based_gateway_list = bpmn_graph.get_nodes(consts.Consts.event_based_gateway)
for element in event_based_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
inclusive_gateway_list = bpmn_graph.get_nodes(consts.Consts.inclusive_gateway)
for element in inclusive_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
exclusive_gateway_list = bpmn_graph.get_nodes(consts.Consts.exclusive_gateway)
for element in exclusive_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
parallel_gateway_list = bpmn_graph.get_nodes(consts.Consts.parallel_gateway)
for element in parallel_gateway_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
start_event_list = bpmn_graph.get_nodes(consts.Consts.start_event)
for element in start_event_list:
tmp = [classification_element, classification_start_event]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
intermediate_catch_event_list = bpmn_graph.get_nodes(consts.Consts.intermediate_catch_event)
for element in intermediate_catch_event_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
end_event_list = bpmn_graph.get_nodes(consts.Consts.end_event)
for element in end_event_list:
tmp = [classification_element, classification_end_event]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
intermediate_throw_event_list = bpmn_graph.get_nodes(consts.Consts.intermediate_throw_event)
for element in intermediate_throw_event_list:
tmp = [classification_element]
if len(element[1][consts.Consts.incoming_flow]) >= 2:
tmp.append(classification_join)
if len(element[1][consts.Consts.outgoing_flow]) >= 2:
tmp.append(classification_split)
nodes_classification += [{node_param_name: element, classification_param_name: tmp}]
flows_classification = []
flows_list = bpmn_graph.get_flows()
for flow in flows_list:
flows_classification += [{flow_param_name: flow, classification_param_name: ["Flow"]}]
return nodes_classification, flows_classification |
def update_entitlement(owner, repo, identifier, name, token, show_tokens):
"""Update an entitlement in a repository."""
client = get_entitlements_api()
data = {}
if name is not None:
data["name"] = name
if token is not None:
data["token"] = token
with catch_raise_api_exception():
data, _, headers = client.entitlements_partial_update_with_http_info(
owner=owner,
repo=repo,
identifier=identifier,
data=data,
show_tokens=show_tokens,
)
ratelimits.maybe_rate_limit(client, headers)
return data.to_dict() | Update an entitlement in a repository. | Below is the the instruction that describes the task:
### Input:
Update an entitlement in a repository.
### Response:
def update_entitlement(owner, repo, identifier, name, token, show_tokens):
"""Update an entitlement in a repository."""
client = get_entitlements_api()
data = {}
if name is not None:
data["name"] = name
if token is not None:
data["token"] = token
with catch_raise_api_exception():
data, _, headers = client.entitlements_partial_update_with_http_info(
owner=owner,
repo=repo,
identifier=identifier,
data=data,
show_tokens=show_tokens,
)
ratelimits.maybe_rate_limit(client, headers)
return data.to_dict() |
def on_do_accept(self, comment):
"""
WARNING WARNING: THIS IS ACUTALLY on_do_accept BUT HACKED.
TODO: Make it so that we have a short 'vetting' accept dialogue. Current accept dialogue too heavy for
this part of process, thus the hack.
Process the rejection of a vetting candidate, includes writing a comment to file.
@param comment:
@return:
"""
self.view.close_vetting_accept_source_dialog()
# Set to None if blank
if len(comment.strip()) == 0:
comment = None
writer = self.model.get_writer()
writer.write_source(self.model.get_current_source(), comment=comment, reject=False)
self.model.accept_current_item()
self.view.clear()
self.model.next_item() | WARNING WARNING: THIS IS ACUTALLY on_do_accept BUT HACKED.
TODO: Make it so that we have a short 'vetting' accept dialogue. Current accept dialogue too heavy for
this part of process, thus the hack.
Process the rejection of a vetting candidate, includes writing a comment to file.
@param comment:
@return: | Below is the the instruction that describes the task:
### Input:
WARNING WARNING: THIS IS ACUTALLY on_do_accept BUT HACKED.
TODO: Make it so that we have a short 'vetting' accept dialogue. Current accept dialogue too heavy for
this part of process, thus the hack.
Process the rejection of a vetting candidate, includes writing a comment to file.
@param comment:
@return:
### Response:
def on_do_accept(self, comment):
"""
WARNING WARNING: THIS IS ACUTALLY on_do_accept BUT HACKED.
TODO: Make it so that we have a short 'vetting' accept dialogue. Current accept dialogue too heavy for
this part of process, thus the hack.
Process the rejection of a vetting candidate, includes writing a comment to file.
@param comment:
@return:
"""
self.view.close_vetting_accept_source_dialog()
# Set to None if blank
if len(comment.strip()) == 0:
comment = None
writer = self.model.get_writer()
writer.write_source(self.model.get_current_source(), comment=comment, reject=False)
self.model.accept_current_item()
self.view.clear()
self.model.next_item() |
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) | This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm. | Below is the the instruction that describes the task:
### Input:
This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
### Response:
def cnn_to_mlp(convs, hiddens, dueling=False, layer_norm=False):
"""This model takes as input an observation and returns values of all actions.
Parameters
----------
convs: [(int, int, int)]
list of convolutional layers in form of
(num_outputs, kernel_size, stride)
hiddens: [int]
list of sizes of hidden layers
dueling: bool
if true double the output MLP to compute a baseline
for action scores
layer_norm: bool
if true applies layer normalization for every layer
as described in https://arxiv.org/abs/1607.06450
Returns
-------
q_func: function
q_function for DQN algorithm.
"""
return lambda *args, **kwargs: _cnn_to_mlp(convs, hiddens, dueling, layer_norm=layer_norm, *args, **kwargs) |
def append(self, node, dirty=True):
"""Add a new child node.
Args:
node (gkeepapi.Node): Node to add.
dirty (bool): Whether this node should be marked dirty.
"""
self._children[node.id] = node
node.parent = self
if dirty:
self.touch()
return node | Add a new child node.
Args:
node (gkeepapi.Node): Node to add.
dirty (bool): Whether this node should be marked dirty. | Below is the the instruction that describes the task:
### Input:
Add a new child node.
Args:
node (gkeepapi.Node): Node to add.
dirty (bool): Whether this node should be marked dirty.
### Response:
def append(self, node, dirty=True):
"""Add a new child node.
Args:
node (gkeepapi.Node): Node to add.
dirty (bool): Whether this node should be marked dirty.
"""
self._children[node.id] = node
node.parent = self
if dirty:
self.touch()
return node |
def srcmdl_xml(self, **kwargs):
""" return the file name for source model xml files
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the file name for source model xml files | Below is the the instruction that describes the task:
### Input:
return the file name for source model xml files
### Response:
def srcmdl_xml(self, **kwargs):
""" return the file name for source model xml files
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = NameFactory.srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath |
def to_bytes_safe(text, encoding="utf-8"):
"""Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
"""
if not isinstance(text, (bytes, text_type)):
raise TypeError("must be string type")
if isinstance(text, text_type):
return text.encode(encoding)
return text | Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes` | Below is the the instruction that describes the task:
### Input:
Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
### Response:
def to_bytes_safe(text, encoding="utf-8"):
"""Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
"""
if not isinstance(text, (bytes, text_type)):
raise TypeError("must be string type")
if isinstance(text, text_type):
return text.encode(encoding)
return text |
def match(self, passageId):
""" Given a passageId matches a citation level
:param passageId: A passage to match
:return:
"""
if not isinstance(passageId, CtsReference):
passageId = CtsReference(passageId)
if self.is_root():
return self[passageId.depth-1]
return self.root.match(passageId) | Given a passageId matches a citation level
:param passageId: A passage to match
:return: | Below is the the instruction that describes the task:
### Input:
Given a passageId matches a citation level
:param passageId: A passage to match
:return:
### Response:
def match(self, passageId):
""" Given a passageId matches a citation level
:param passageId: A passage to match
:return:
"""
if not isinstance(passageId, CtsReference):
passageId = CtsReference(passageId)
if self.is_root():
return self[passageId.depth-1]
return self.root.match(passageId) |
def choice_doinst(self):
"""View doinst.sh file
"""
if "doinst.sh" in self.sbo_files.split():
doinst_sh = ReadSBo(self.sbo_url).doinst("doinst.sh")
fill = self.fill_pager(doinst_sh)
self.pager(doinst_sh + fill) | View doinst.sh file | Below is the the instruction that describes the task:
### Input:
View doinst.sh file
### Response:
def choice_doinst(self):
"""View doinst.sh file
"""
if "doinst.sh" in self.sbo_files.split():
doinst_sh = ReadSBo(self.sbo_url).doinst("doinst.sh")
fill = self.fill_pager(doinst_sh)
self.pager(doinst_sh + fill) |
def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data | replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data |
def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with either the
copy_latest_a, copy_all_a or average_a function as is processing function.
If there is an explicit stream passed, that is used as input a with the
current scope's trigger as input b, otherwise the current scope's trigger
is used as input a.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
parent = scope_stack[-1]
alloc = parent.allocator
trigger_stream, trigger_cond = parent.trigger_chain()
op = 'copy_latest_a'
if self.all:
op = 'copy_all_a'
elif self.average:
op = 'average_a'
elif self.count:
op = 'copy_count_a'
if self.explicit_input:
# If root node is an input, create an intermediate node with an unbuffered node
if self.explicit_input.input:
unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
sensor_graph.add_node(u"({} always) => {} using {}".format(self.explicit_input, unbuffered_stream, 'copy_latest_a'))
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(unbuffered_stream, trigger_stream, trigger_cond, self.output, op))
else:
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(self.explicit_input, trigger_stream, trigger_cond, self.output, op))
elif self.constant_input is not None:
const_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(const_stream, trigger_stream, trigger_cond, self.output, op))
sensor_graph.add_constant(const_stream, self.constant_input)
else:
sensor_graph.add_node(u"({} {}) => {} using {}".format(trigger_stream, trigger_cond, self.output, op)) | Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with either the
copy_latest_a, copy_all_a or average_a function as is processing function.
If there is an explicit stream passed, that is used as input a with the
current scope's trigger as input b, otherwise the current scope's trigger
is used as input a.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources. | Below is the the instruction that describes the task:
### Input:
Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with either the
copy_latest_a, copy_all_a or average_a function as is processing function.
If there is an explicit stream passed, that is used as input a with the
current scope's trigger as input b, otherwise the current scope's trigger
is used as input a.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
### Response:
def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with either the
copy_latest_a, copy_all_a or average_a function as is processing function.
If there is an explicit stream passed, that is used as input a with the
current scope's trigger as input b, otherwise the current scope's trigger
is used as input a.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
parent = scope_stack[-1]
alloc = parent.allocator
trigger_stream, trigger_cond = parent.trigger_chain()
op = 'copy_latest_a'
if self.all:
op = 'copy_all_a'
elif self.average:
op = 'average_a'
elif self.count:
op = 'copy_count_a'
if self.explicit_input:
# If root node is an input, create an intermediate node with an unbuffered node
if self.explicit_input.input:
unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
sensor_graph.add_node(u"({} always) => {} using {}".format(self.explicit_input, unbuffered_stream, 'copy_latest_a'))
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(unbuffered_stream, trigger_stream, trigger_cond, self.output, op))
else:
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(self.explicit_input, trigger_stream, trigger_cond, self.output, op))
elif self.constant_input is not None:
const_stream = alloc.allocate_stream(DataStream.ConstantType, attach=True)
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(const_stream, trigger_stream, trigger_cond, self.output, op))
sensor_graph.add_constant(const_stream, self.constant_input)
else:
sensor_graph.add_node(u"({} {}) => {} using {}".format(trigger_stream, trigger_cond, self.output, op)) |
def login(self, request):
"""
Verify credentials
"""
try:
user = authenticate(request)
if not user:
raise AuthenticationFailed("User not authenticated.")
if not user.is_active:
raise AuthenticationFailed("This user has been disabled.")
login(request, user)
return Response(UserSerializer(user).data)
except AuthError as ex:
# This indicates an error that may require attention by the
# Treeherder or Taskcluster teams. Logging this to New Relic to
# increase visibility.
newrelic.agent.record_exception()
logger.exception("Error", exc_info=ex)
raise AuthenticationFailed(str(ex)) | Verify credentials | Below is the the instruction that describes the task:
### Input:
Verify credentials
### Response:
def login(self, request):
"""
Verify credentials
"""
try:
user = authenticate(request)
if not user:
raise AuthenticationFailed("User not authenticated.")
if not user.is_active:
raise AuthenticationFailed("This user has been disabled.")
login(request, user)
return Response(UserSerializer(user).data)
except AuthError as ex:
# This indicates an error that may require attention by the
# Treeherder or Taskcluster teams. Logging this to New Relic to
# increase visibility.
newrelic.agent.record_exception()
logger.exception("Error", exc_info=ex)
raise AuthenticationFailed(str(ex)) |
def print_cyjs_graph(self):
"""Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
"""
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
return cyjs_str | Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network. | Below is the the instruction that describes the task:
### Input:
Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
### Response:
def print_cyjs_graph(self):
"""Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
"""
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
cyjs_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
return cyjs_str |
def log(self, level, message):
'''Write a log message via the child process.
The child process must already exist; call :meth:`live_log_child`
to make sure. If it has died in a way we don't expect then
this will raise :const:`signal.SIGPIPE`.
'''
if self.log_fd is not None:
prefix = struct.pack('ii', level, len(message))
os.write(self.log_fd, prefix)
os.write(self.log_fd, message) | Write a log message via the child process.
The child process must already exist; call :meth:`live_log_child`
to make sure. If it has died in a way we don't expect then
this will raise :const:`signal.SIGPIPE`. | Below is the the instruction that describes the task:
### Input:
Write a log message via the child process.
The child process must already exist; call :meth:`live_log_child`
to make sure. If it has died in a way we don't expect then
this will raise :const:`signal.SIGPIPE`.
### Response:
def log(self, level, message):
'''Write a log message via the child process.
The child process must already exist; call :meth:`live_log_child`
to make sure. If it has died in a way we don't expect then
this will raise :const:`signal.SIGPIPE`.
'''
if self.log_fd is not None:
prefix = struct.pack('ii', level, len(message))
os.write(self.log_fd, prefix)
os.write(self.log_fd, message) |
def _read_name_file(self, filename):
"""Read a name file from the data directory
:param filename: Name of the file to read.
:return: A list of name entries.
"""
file_path = os.path.join(self._DATA_DIR, filename)
with open(file_path) as f:
names = json.load(f)
return names | Read a name file from the data directory
:param filename: Name of the file to read.
:return: A list of name entries. | Below is the the instruction that describes the task:
### Input:
Read a name file from the data directory
:param filename: Name of the file to read.
:return: A list of name entries.
### Response:
def _read_name_file(self, filename):
"""Read a name file from the data directory
:param filename: Name of the file to read.
:return: A list of name entries.
"""
file_path = os.path.join(self._DATA_DIR, filename)
with open(file_path) as f:
names = json.load(f)
return names |
def inner(X, Y, ip_B=None):
'''Euclidean and non-Euclidean inner product.
numpy.vdot only works for vectors and numpy.dot does not use the conjugate
transpose.
:param X: numpy array with ``shape==(N,m)``
:param Y: numpy array with ``shape==(N,n)``
:param ip_B: (optional) May be one of the following
* ``None``: Euclidean inner product.
* a self-adjoint and positive definite operator :math:`B` (as
``numpy.array`` or ``LinearOperator``). Then :math:`X^*B Y` is
returned.
* a callable which takes 2 arguments X and Y and returns
:math:`\\langle X,Y\\rangle`.
**Caution:** a callable should only be used if necessary. The choice
potentially has an impact on the round-off behavior, e.g. of projections.
:return: numpy array :math:`\\langle X,Y\\rangle` with ``shape==(m,n)``.
'''
if ip_B is None or isinstance(ip_B, IdentityLinearOperator):
return numpy.dot(X.T.conj(), Y)
(N, m) = X.shape
(_, n) = Y.shape
try:
B = get_linearoperator((N, N), ip_B)
except TypeError:
return ip_B(X, Y)
if m > n:
return numpy.dot((B*X).T.conj(), Y)
else:
return numpy.dot(X.T.conj(), B*Y) | Euclidean and non-Euclidean inner product.
numpy.vdot only works for vectors and numpy.dot does not use the conjugate
transpose.
:param X: numpy array with ``shape==(N,m)``
:param Y: numpy array with ``shape==(N,n)``
:param ip_B: (optional) May be one of the following
* ``None``: Euclidean inner product.
* a self-adjoint and positive definite operator :math:`B` (as
``numpy.array`` or ``LinearOperator``). Then :math:`X^*B Y` is
returned.
* a callable which takes 2 arguments X and Y and returns
:math:`\\langle X,Y\\rangle`.
**Caution:** a callable should only be used if necessary. The choice
potentially has an impact on the round-off behavior, e.g. of projections.
:return: numpy array :math:`\\langle X,Y\\rangle` with ``shape==(m,n)``. | Below is the the instruction that describes the task:
### Input:
Euclidean and non-Euclidean inner product.
numpy.vdot only works for vectors and numpy.dot does not use the conjugate
transpose.
:param X: numpy array with ``shape==(N,m)``
:param Y: numpy array with ``shape==(N,n)``
:param ip_B: (optional) May be one of the following
* ``None``: Euclidean inner product.
* a self-adjoint and positive definite operator :math:`B` (as
``numpy.array`` or ``LinearOperator``). Then :math:`X^*B Y` is
returned.
* a callable which takes 2 arguments X and Y and returns
:math:`\\langle X,Y\\rangle`.
**Caution:** a callable should only be used if necessary. The choice
potentially has an impact on the round-off behavior, e.g. of projections.
:return: numpy array :math:`\\langle X,Y\\rangle` with ``shape==(m,n)``.
### Response:
def inner(X, Y, ip_B=None):
'''Euclidean and non-Euclidean inner product.
numpy.vdot only works for vectors and numpy.dot does not use the conjugate
transpose.
:param X: numpy array with ``shape==(N,m)``
:param Y: numpy array with ``shape==(N,n)``
:param ip_B: (optional) May be one of the following
* ``None``: Euclidean inner product.
* a self-adjoint and positive definite operator :math:`B` (as
``numpy.array`` or ``LinearOperator``). Then :math:`X^*B Y` is
returned.
* a callable which takes 2 arguments X and Y and returns
:math:`\\langle X,Y\\rangle`.
**Caution:** a callable should only be used if necessary. The choice
potentially has an impact on the round-off behavior, e.g. of projections.
:return: numpy array :math:`\\langle X,Y\\rangle` with ``shape==(m,n)``.
'''
if ip_B is None or isinstance(ip_B, IdentityLinearOperator):
return numpy.dot(X.T.conj(), Y)
(N, m) = X.shape
(_, n) = Y.shape
try:
B = get_linearoperator((N, N), ip_B)
except TypeError:
return ip_B(X, Y)
if m > n:
return numpy.dot((B*X).T.conj(), Y)
else:
return numpy.dot(X.T.conj(), B*Y) |
def add_cmd_handler(self, handler_obj):
"""Registers a new command handler object.
All methods on `handler_obj` whose name starts with "cmd_" are
registered as a GTP command. For example, the method cmd_genmove will
be invoked when the engine receives a genmove command.
Args:
handler_obj: the handler object to register.
"""
for field in dir(handler_obj):
if field.startswith("cmd_"):
cmd = field[4:]
fn = getattr(handler_obj, field)
if cmd in self.cmds:
print('Replacing {} with {}'.format(
_handler_name(self.cmds[cmd]), _handler_name(fn)),
file=sys.stderr)
self.cmds[cmd] = fn | Registers a new command handler object.
All methods on `handler_obj` whose name starts with "cmd_" are
registered as a GTP command. For example, the method cmd_genmove will
be invoked when the engine receives a genmove command.
Args:
handler_obj: the handler object to register. | Below is the the instruction that describes the task:
### Input:
Registers a new command handler object.
All methods on `handler_obj` whose name starts with "cmd_" are
registered as a GTP command. For example, the method cmd_genmove will
be invoked when the engine receives a genmove command.
Args:
handler_obj: the handler object to register.
### Response:
def add_cmd_handler(self, handler_obj):
"""Registers a new command handler object.
All methods on `handler_obj` whose name starts with "cmd_" are
registered as a GTP command. For example, the method cmd_genmove will
be invoked when the engine receives a genmove command.
Args:
handler_obj: the handler object to register.
"""
for field in dir(handler_obj):
if field.startswith("cmd_"):
cmd = field[4:]
fn = getattr(handler_obj, field)
if cmd in self.cmds:
print('Replacing {} with {}'.format(
_handler_name(self.cmds[cmd]), _handler_name(fn)),
file=sys.stderr)
self.cmds[cmd] = fn |
def Bahadori_liquid(T, M):
r'''Estimates the thermal conductivity of parafin liquid hydrocarbons.
Fits their data well, and is useful as only MW is required.
X is the Molecular weight, and Y the temperature.
.. math::
K = a + bY + CY^2 + dY^3
a = A_1 + B_1 X + C_1 X^2 + D_1 X^3
b = A_2 + B_2 X + C_2 X^2 + D_2 X^3
c = A_3 + B_3 X + C_3 X^2 + D_3 X^3
d = A_4 + B_4 X + C_4 X^2 + D_4 X^3
Parameters
----------
T : float
Temperature of the fluid [K]
M : float
Molecular weight of the fluid [g/mol]
Returns
-------
kl : float
Estimated liquid thermal conductivity [W/m/k]
Notes
-----
The accuracy of this equation has not been reviewed.
Examples
--------
Data point from [1]_.
>>> Bahadori_liquid(273.15, 170)
0.14274278108272603
References
----------
.. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal
Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13
(December 2008): 52-54
'''
A = [-6.48326E-2, 2.715015E-3, -1.08580E-5, 9.853917E-9]
B = [1.565612E-2, -1.55833E-4, 5.051114E-7, -4.68030E-10]
C = [-1.80304E-4, 1.758693E-6, -5.55224E-9, 5.201365E-12]
D = [5.880443E-7, -5.65898E-9, 1.764384E-11, -1.65944E-14]
X, Y = M, T
a = A[0] + B[0]*X + C[0]*X**2 + D[0]*X**3
b = A[1] + B[1]*X + C[1]*X**2 + D[1]*X**3
c = A[2] + B[2]*X + C[2]*X**2 + D[2]*X**3
d = A[3] + B[3]*X + C[3]*X**2 + D[3]*X**3
return a + b*Y + c*Y**2 + d*Y**3 | r'''Estimates the thermal conductivity of parafin liquid hydrocarbons.
Fits their data well, and is useful as only MW is required.
X is the Molecular weight, and Y the temperature.
.. math::
K = a + bY + CY^2 + dY^3
a = A_1 + B_1 X + C_1 X^2 + D_1 X^3
b = A_2 + B_2 X + C_2 X^2 + D_2 X^3
c = A_3 + B_3 X + C_3 X^2 + D_3 X^3
d = A_4 + B_4 X + C_4 X^2 + D_4 X^3
Parameters
----------
T : float
Temperature of the fluid [K]
M : float
Molecular weight of the fluid [g/mol]
Returns
-------
kl : float
Estimated liquid thermal conductivity [W/m/k]
Notes
-----
The accuracy of this equation has not been reviewed.
Examples
--------
Data point from [1]_.
>>> Bahadori_liquid(273.15, 170)
0.14274278108272603
References
----------
.. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal
Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13
(December 2008): 52-54 | Below is the the instruction that describes the task:
### Input:
r'''Estimates the thermal conductivity of parafin liquid hydrocarbons.
Fits their data well, and is useful as only MW is required.
X is the Molecular weight, and Y the temperature.
.. math::
K = a + bY + CY^2 + dY^3
a = A_1 + B_1 X + C_1 X^2 + D_1 X^3
b = A_2 + B_2 X + C_2 X^2 + D_2 X^3
c = A_3 + B_3 X + C_3 X^2 + D_3 X^3
d = A_4 + B_4 X + C_4 X^2 + D_4 X^3
Parameters
----------
T : float
Temperature of the fluid [K]
M : float
Molecular weight of the fluid [g/mol]
Returns
-------
kl : float
Estimated liquid thermal conductivity [W/m/k]
Notes
-----
The accuracy of this equation has not been reviewed.
Examples
--------
Data point from [1]_.
>>> Bahadori_liquid(273.15, 170)
0.14274278108272603
References
----------
.. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal
Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13
(December 2008): 52-54
### Response:
def Bahadori_liquid(T, M):
r'''Estimates the thermal conductivity of parafin liquid hydrocarbons.
Fits their data well, and is useful as only MW is required.
X is the Molecular weight, and Y the temperature.
.. math::
K = a + bY + CY^2 + dY^3
a = A_1 + B_1 X + C_1 X^2 + D_1 X^3
b = A_2 + B_2 X + C_2 X^2 + D_2 X^3
c = A_3 + B_3 X + C_3 X^2 + D_3 X^3
d = A_4 + B_4 X + C_4 X^2 + D_4 X^3
Parameters
----------
T : float
Temperature of the fluid [K]
M : float
Molecular weight of the fluid [g/mol]
Returns
-------
kl : float
Estimated liquid thermal conductivity [W/m/k]
Notes
-----
The accuracy of this equation has not been reviewed.
Examples
--------
Data point from [1]_.
>>> Bahadori_liquid(273.15, 170)
0.14274278108272603
References
----------
.. [1] Bahadori, Alireza, and Saeid Mokhatab. "Estimating Thermal
Conductivity of Hydrocarbons." Chemical Engineering 115, no. 13
(December 2008): 52-54
'''
A = [-6.48326E-2, 2.715015E-3, -1.08580E-5, 9.853917E-9]
B = [1.565612E-2, -1.55833E-4, 5.051114E-7, -4.68030E-10]
C = [-1.80304E-4, 1.758693E-6, -5.55224E-9, 5.201365E-12]
D = [5.880443E-7, -5.65898E-9, 1.764384E-11, -1.65944E-14]
X, Y = M, T
a = A[0] + B[0]*X + C[0]*X**2 + D[0]*X**3
b = A[1] + B[1]*X + C[1]*X**2 + D[1]*X**3
c = A[2] + B[2]*X + C[2]*X**2 + D[2]*X**3
d = A[3] + B[3]*X + C[3]*X**2 + D[3]*X**3
return a + b*Y + c*Y**2 + d*Y**3 |
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = '\n'.join([' [{}] {}{}'
.format(i + 1,
x['name'] if isinstance(x, dict) and 'name' in x else x,
' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '')
for i, x in enumerate(a_list)])
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default))
if val == '?' and help_string is not None:
print(help_string)
continue
if not val:
val = '{}'.format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning('Valid values are %s', allowed_vals) | Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen. | Below is the the instruction that describes the task:
### Input:
Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
### Response:
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = '\n'.join([' [{}] {}{}'
.format(i + 1,
x['name'] if isinstance(x, dict) and 'name' in x else x,
' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '')
for i, x in enumerate(a_list)])
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default))
if val == '?' and help_string is not None:
print(help_string)
continue
if not val:
val = '{}'.format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning('Valid values are %s', allowed_vals) |
def get_fd_from_freqtau(template=None, **kwargs):
"""Return frequency domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown.
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower: {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, freqtau_required_args, **kwargs)
# Get required args
f_0, tau = lm_freqs_taus(**input_params)
lmns = input_params['lmns']
for lmn in lmns:
if int(lmn[2]) == 0:
raise ValueError('Number of overtones (nmodes) must be greater '
'than zero.')
# The following may not be in input_params
inc = input_params.pop('inclination', None)
delta_f = input_params.pop('delta_f', None)
f_lower = input_params.pop('f_lower', None)
f_final = input_params.pop('f_final', None)
if not delta_f:
delta_f = lm_deltaf(tau, lmns)
if not f_final:
f_final = lm_ffinal(f_0, tau, lmns)
if not f_lower:
f_lower = delta_f
kmax = int(f_final / delta_f) + 1
outplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
outcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
for lmn in lmns:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
hplustilde, hcrosstilde = get_fd_lm(freqs=f_0, taus=tau,
l=l, m=m, nmodes=nmodes,
inclination=inc,
delta_f=delta_f, f_lower=f_lower,
f_final=f_final, **input_params)
outplustilde.data += hplustilde.data
outcrosstilde.data += hcrosstilde.data
return outplustilde, outcrosstilde | Return frequency domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown.
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower: {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain. | Below is the the instruction that describes the task:
### Input:
Return frequency domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown.
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower: {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
### Response:
def get_fd_from_freqtau(template=None, **kwargs):
"""Return frequency domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_f : {None, float}, optional
The frequency step used to generate the ringdown.
If None, it will be set to the inverse of the time at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
f_lower: {None, float}, optional
The starting frequency of the output frequency series.
If None, it will be set to delta_f.
f_final : {None, float}, optional
The ending frequency of the output frequency series.
If None, it will be set to the frequency at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, freqtau_required_args, **kwargs)
# Get required args
f_0, tau = lm_freqs_taus(**input_params)
lmns = input_params['lmns']
for lmn in lmns:
if int(lmn[2]) == 0:
raise ValueError('Number of overtones (nmodes) must be greater '
'than zero.')
# The following may not be in input_params
inc = input_params.pop('inclination', None)
delta_f = input_params.pop('delta_f', None)
f_lower = input_params.pop('f_lower', None)
f_final = input_params.pop('f_final', None)
if not delta_f:
delta_f = lm_deltaf(tau, lmns)
if not f_final:
f_final = lm_ffinal(f_0, tau, lmns)
if not f_lower:
f_lower = delta_f
kmax = int(f_final / delta_f) + 1
outplustilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
outcrosstilde = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f)
for lmn in lmns:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
hplustilde, hcrosstilde = get_fd_lm(freqs=f_0, taus=tau,
l=l, m=m, nmodes=nmodes,
inclination=inc,
delta_f=delta_f, f_lower=f_lower,
f_final=f_final, **input_params)
outplustilde.data += hplustilde.data
outcrosstilde.data += hcrosstilde.data
return outplustilde, outcrosstilde |
def as_parameter(nullable=True, strict=True):
"""
Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3)
"""
def decorator(cls):
base_class = Parameter if nullable else NonNullParameter
return type(cls.__name__, (base_class,), {
# Preserve text for documentation
'__name__': cls.__name__,
'__doc__': cls.__doc__,
'__module__': cls.__module__,
# Sphinx doc type string
'_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format(
class_name=cls.__name__, module=__name__
),
#
'type': lambda self, value: cls(**value)
})
return decorator | Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3) | Below is the the instruction that describes the task:
### Input:
Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3)
### Response:
def as_parameter(nullable=True, strict=True):
"""
Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3)
"""
def decorator(cls):
base_class = Parameter if nullable else NonNullParameter
return type(cls.__name__, (base_class,), {
# Preserve text for documentation
'__name__': cls.__name__,
'__doc__': cls.__doc__,
'__module__': cls.__module__,
# Sphinx doc type string
'_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format(
class_name=cls.__name__, module=__name__
),
#
'type': lambda self, value: cls(**value)
})
return decorator |
def add_schemas(path, ext="json"):
"""Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON.
"""
if not os.path.exists(path):
raise SchemaPathError()
filepat = "*." + ext if ext else "*"
for f in glob.glob(os.path.join(path, filepat)):
with open(f, 'r') as fp:
try:
schema = json.load(fp)
except ValueError:
raise SchemaParseError("error parsing '{}'".format(f))
name = os.path.splitext(os.path.basename(f))[0]
schemata[name] = Schema(schema) | Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON. | Below is the the instruction that describes the task:
### Input:
Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON.
### Response:
def add_schemas(path, ext="json"):
"""Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON.
"""
if not os.path.exists(path):
raise SchemaPathError()
filepat = "*." + ext if ext else "*"
for f in glob.glob(os.path.join(path, filepat)):
with open(f, 'r') as fp:
try:
schema = json.load(fp)
except ValueError:
raise SchemaParseError("error parsing '{}'".format(f))
name = os.path.splitext(os.path.basename(f))[0]
schemata[name] = Schema(schema) |
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix) | Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.) | Below is the the instruction that describes the task:
### Input:
Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
### Response:
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix) |
def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix)) | Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz' | Below is the the instruction that describes the task:
### Input:
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
### Response:
def add_suffix(in_files, suffix):
"""
Wrap nipype's fname_presuffix to conveniently just add a prefix
>>> add_suffix([
... '/path/to/sub-045_ses-test_T1w.nii.gz',
... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test')
'sub-045_ses-test_T1w_test.nii.gz'
"""
import os.path as op
from nipype.utils.filemanip import fname_presuffix, filename_to_list
return op.basename(fname_presuffix(filename_to_list(in_files)[0],
suffix=suffix)) |
def readTFAM(fileName):
"""Reads the TFAM file.
:param fileName: the name of the ``tfam`` file.
:type fileName: str
:returns: a representation the ``tfam`` file (:py:class:`numpy.array`).
"""
# Saving the TFAM file
tfam = None
with open(fileName, 'r') as inputFile:
tfam = [
tuple(i.rstrip("\r\n").split("\t")) for i in inputFile.readlines()
]
tfam = np.array(tfam)
return tfam | Reads the TFAM file.
:param fileName: the name of the ``tfam`` file.
:type fileName: str
:returns: a representation the ``tfam`` file (:py:class:`numpy.array`). | Below is the the instruction that describes the task:
### Input:
Reads the TFAM file.
:param fileName: the name of the ``tfam`` file.
:type fileName: str
:returns: a representation the ``tfam`` file (:py:class:`numpy.array`).
### Response:
def readTFAM(fileName):
"""Reads the TFAM file.
:param fileName: the name of the ``tfam`` file.
:type fileName: str
:returns: a representation the ``tfam`` file (:py:class:`numpy.array`).
"""
# Saving the TFAM file
tfam = None
with open(fileName, 'r') as inputFile:
tfam = [
tuple(i.rstrip("\r\n").split("\t")) for i in inputFile.readlines()
]
tfam = np.array(tfam)
return tfam |
def block_lengths(self):
"""Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._lengths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same length in a
# row of blocks.
self._lengths_cache = (
[obj.length() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache | Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed. | Below is the the instruction that describes the task:
### Input:
Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
### Response:
def block_lengths(self):
"""Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._lengths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same length in a
# row of blocks.
self._lengths_cache = (
[obj.length() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache |
def score_leaves(self) -> Set[BaseEntity]:
"""Calculate the score for all leaves.
:return: The set of leaf nodes that were scored
"""
leaves = set(self.iter_leaves())
if not leaves:
log.warning('no leaves.')
return set()
for leaf in leaves:
self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf)
log.log(5, 'chomping %s', leaf)
return leaves | Calculate the score for all leaves.
:return: The set of leaf nodes that were scored | Below is the the instruction that describes the task:
### Input:
Calculate the score for all leaves.
:return: The set of leaf nodes that were scored
### Response:
def score_leaves(self) -> Set[BaseEntity]:
"""Calculate the score for all leaves.
:return: The set of leaf nodes that were scored
"""
leaves = set(self.iter_leaves())
if not leaves:
log.warning('no leaves.')
return set()
for leaf in leaves:
self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf)
log.log(5, 'chomping %s', leaf)
return leaves |
def enable(self, interface_id, dns_relay_profile=None):
"""
Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None
"""
if not dns_relay_profile: # Use default
href = DNSRelayProfile('Cache Only').href
else:
href = element_resolver(dns_relay_profile)
intf = self.engine.interface.get(interface_id)
self.engine.data.update(dns_relay_profile_ref=href)
self.engine.data.update(dns_relay_interface=intf.ndi_interfaces) | Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None | Below is the the instruction that describes the task:
### Input:
Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None
### Response:
def enable(self, interface_id, dns_relay_profile=None):
"""
Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None
"""
if not dns_relay_profile: # Use default
href = DNSRelayProfile('Cache Only').href
else:
href = element_resolver(dns_relay_profile)
intf = self.engine.interface.get(interface_id)
self.engine.data.update(dns_relay_profile_ref=href)
self.engine.data.update(dns_relay_interface=intf.ndi_interfaces) |
def bit_string_index(s):
"""Return the index of a string of 0s and 1s."""
n = len(s)
k = s.count("1")
if s.count("0") != n - k:
raise VisualizationError("s must be a string of 0 and 1")
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones) | Return the index of a string of 0s and 1s. | Below is the the instruction that describes the task:
### Input:
Return the index of a string of 0s and 1s.
### Response:
def bit_string_index(s):
"""Return the index of a string of 0s and 1s."""
n = len(s)
k = s.count("1")
if s.count("0") != n - k:
raise VisualizationError("s must be a string of 0 and 1")
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones) |
def stop(self):
"""Halts the acquisition, this must be called before resetting acquisition"""
try:
self.aitask.stop()
self.aotask.stop()
pass
except:
print u"No task running"
self.aitask = None
self.aotask = None | Halts the acquisition, this must be called before resetting acquisition | Below is the the instruction that describes the task:
### Input:
Halts the acquisition, this must be called before resetting acquisition
### Response:
def stop(self):
"""Halts the acquisition, this must be called before resetting acquisition"""
try:
self.aitask.stop()
self.aotask.stop()
pass
except:
print u"No task running"
self.aitask = None
self.aotask = None |
def normalize_body(self, b):
"""return the body as a string, formatted to the appropriate content type
:param b: mixed, the current raw body
:returns: unicode string
"""
if b is None: return ''
if self.is_json():
# TODO ???
# I don't like this, if we have a content type but it isn't one
# of the supported ones we were returning the exception, which threw
# Jarid off, but now it just returns a string, which is not best either
# my thought is we could have a body_type_subtype method that would
# make it possible to easily handle custom types
# eg, "application/json" would become: self.body_application_json(b, is_error)
b = json.dumps(b, cls=ResponseBody)
else:
# just return a string representation of body if no content type
b = String(b, self.encoding)
return b | return the body as a string, formatted to the appropriate content type
:param b: mixed, the current raw body
:returns: unicode string | Below is the the instruction that describes the task:
### Input:
return the body as a string, formatted to the appropriate content type
:param b: mixed, the current raw body
:returns: unicode string
### Response:
def normalize_body(self, b):
"""return the body as a string, formatted to the appropriate content type
:param b: mixed, the current raw body
:returns: unicode string
"""
if b is None: return ''
if self.is_json():
# TODO ???
# I don't like this, if we have a content type but it isn't one
# of the supported ones we were returning the exception, which threw
# Jarid off, but now it just returns a string, which is not best either
# my thought is we could have a body_type_subtype method that would
# make it possible to easily handle custom types
# eg, "application/json" would become: self.body_application_json(b, is_error)
b = json.dumps(b, cls=ResponseBody)
else:
# just return a string representation of body if no content type
b = String(b, self.encoding)
return b |
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr) | This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`. | Below is the the instruction that describes the task:
### Input:
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
### Response:
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr) |
def _equivalent(self, other):
"""Compare two entities of the same class, excluding keys."""
if other.__class__ is not self.__class__: # TODO: What about subclasses?
raise NotImplementedError('Cannot compare different model classes. '
'%s is not %s' % (self.__class__.__name__,
other.__class_.__name__))
if set(self._projection) != set(other._projection):
return False
# It's all about determining inequality early.
if len(self._properties) != len(other._properties):
return False # Can only happen for Expandos.
my_prop_names = set(self._properties.iterkeys())
their_prop_names = set(other._properties.iterkeys())
if my_prop_names != their_prop_names:
return False # Again, only possible for Expandos.
if self._projection:
my_prop_names = set(self._projection)
for name in my_prop_names:
if '.' in name:
name, _ = name.split('.', 1)
my_value = self._properties[name]._get_value(self)
their_value = other._properties[name]._get_value(other)
if my_value != their_value:
return False
return True | Compare two entities of the same class, excluding keys. | Below is the the instruction that describes the task:
### Input:
Compare two entities of the same class, excluding keys.
### Response:
def _equivalent(self, other):
"""Compare two entities of the same class, excluding keys."""
if other.__class__ is not self.__class__: # TODO: What about subclasses?
raise NotImplementedError('Cannot compare different model classes. '
'%s is not %s' % (self.__class__.__name__,
other.__class_.__name__))
if set(self._projection) != set(other._projection):
return False
# It's all about determining inequality early.
if len(self._properties) != len(other._properties):
return False # Can only happen for Expandos.
my_prop_names = set(self._properties.iterkeys())
their_prop_names = set(other._properties.iterkeys())
if my_prop_names != their_prop_names:
return False # Again, only possible for Expandos.
if self._projection:
my_prop_names = set(self._projection)
for name in my_prop_names:
if '.' in name:
name, _ = name.split('.', 1)
my_value = self._properties[name]._get_value(self)
their_value = other._properties[name]._get_value(other)
if my_value != their_value:
return False
return True |
def vintage_dates(self,series_id=None,response_type=None,params=None):
"""
Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/vintagedates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response | Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs. | Below is the the instruction that describes the task:
### Input:
Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
### Response:
def vintage_dates(self,series_id=None,response_type=None,params=None):
"""
Function to request the dates in history when a series' data values were
revised or new data values were released. Vintage dates are the release dates
for a series excluding release dates when the data for the series did not change.
`<https://research.stlouisfed.org/docs/api/fred/series_vintagedates.html>`_
:arg int series_id: The id for a series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str sort_order: Sort results by vintage_date. Options are 'asc','desc'
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/vintagedates?'
params['series_id'] = series_id
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response |
def _make_write_func(file_obj):
"""Return a CFFI callback that writes to a file-like object."""
if file_obj is None:
return ffi.NULL
@ffi.callback("cairo_write_func_t", error=constants.STATUS_WRITE_ERROR)
def write_func(_closure, data, length):
file_obj.write(ffi.buffer(data, length))
return constants.STATUS_SUCCESS
return write_func | Return a CFFI callback that writes to a file-like object. | Below is the the instruction that describes the task:
### Input:
Return a CFFI callback that writes to a file-like object.
### Response:
def _make_write_func(file_obj):
"""Return a CFFI callback that writes to a file-like object."""
if file_obj is None:
return ffi.NULL
@ffi.callback("cairo_write_func_t", error=constants.STATUS_WRITE_ERROR)
def write_func(_closure, data, length):
file_obj.write(ffi.buffer(data, length))
return constants.STATUS_SUCCESS
return write_func |
def validate_signature(request, secret_key):
"""
Validates the signature associated with the given request.
"""
# Extract the request parameters according to the HTTP method
data = request.GET.copy()
if request.method != 'GET':
message_body = getattr(request, request.method, {})
data.update(message_body)
# Make sure the request contains a signature
if data.get('sig', False):
sig = data['sig']
del data['sig']
else:
return False
# Make sure the request contains a timestamp
if data.get('t', False):
timestamp = int(data.get('t', False))
del data['t']
else:
return False
# Make sure the signature has not expired
local_time = datetime.utcnow()
remote_time = datetime.utcfromtimestamp(timestamp)
# this stops a bug if the client clock is ever a little ahead of
# the server clock. Makes the window of acceptable time current +/- 5 mins
if local_time > remote_time:
delta = local_time - remote_time
else:
delta = remote_time - local_time
if delta.seconds > 5 * 60: # If the signature is older than 5 minutes, it's invalid
return False
# Make sure the signature is valid
return sig == calculate_signature(secret_key, data, timestamp) | Validates the signature associated with the given request. | Below is the the instruction that describes the task:
### Input:
Validates the signature associated with the given request.
### Response:
def validate_signature(request, secret_key):
"""
Validates the signature associated with the given request.
"""
# Extract the request parameters according to the HTTP method
data = request.GET.copy()
if request.method != 'GET':
message_body = getattr(request, request.method, {})
data.update(message_body)
# Make sure the request contains a signature
if data.get('sig', False):
sig = data['sig']
del data['sig']
else:
return False
# Make sure the request contains a timestamp
if data.get('t', False):
timestamp = int(data.get('t', False))
del data['t']
else:
return False
# Make sure the signature has not expired
local_time = datetime.utcnow()
remote_time = datetime.utcfromtimestamp(timestamp)
# this stops a bug if the client clock is ever a little ahead of
# the server clock. Makes the window of acceptable time current +/- 5 mins
if local_time > remote_time:
delta = local_time - remote_time
else:
delta = remote_time - local_time
if delta.seconds > 5 * 60: # If the signature is older than 5 minutes, it's invalid
return False
# Make sure the signature is valid
return sig == calculate_signature(secret_key, data, timestamp) |
def unique (inlist):
"""
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist
"""
uniques = []
for item in inlist:
if item not in uniques:
uniques.append(item)
return uniques | Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist | Below is the the instruction that describes the task:
### Input:
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist
### Response:
def unique (inlist):
"""
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist
"""
uniques = []
for item in inlist:
if item not in uniques:
uniques.append(item)
return uniques |
def harden_attention_weights(weights, hard_attention_k):
"""Make attention weights non-0 only on the top-hard_attention_k ones."""
# Subtract the top-kth weight and zero-out all lower ones.
# Note that currently in case of numerical ties it will retain more
# than k elements. In the future, we may want to avoid this.
weights -= common_layers.top_kth_iterative(weights, hard_attention_k)
weights = tf.nn.relu(weights)
# Re-normalize the weights.
weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True)
weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0.
weights /= weights_sum
return weights | Make attention weights non-0 only on the top-hard_attention_k ones. | Below is the the instruction that describes the task:
### Input:
Make attention weights non-0 only on the top-hard_attention_k ones.
### Response:
def harden_attention_weights(weights, hard_attention_k):
"""Make attention weights non-0 only on the top-hard_attention_k ones."""
# Subtract the top-kth weight and zero-out all lower ones.
# Note that currently in case of numerical ties it will retain more
# than k elements. In the future, we may want to avoid this.
weights -= common_layers.top_kth_iterative(weights, hard_attention_k)
weights = tf.nn.relu(weights)
# Re-normalize the weights.
weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True)
weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0.
weights /= weights_sum
return weights |
def _kwarg(self, kwargs, kwname, default=None):
"""
Resolves keyword arguments from constructor or :doc:`config`.
.. note::
The keyword arguments take this order of precedence:
1. Arguments passed to constructor through the
:func:`authomatic.login`.
2. Provider specific arguments from :doc:`config`.
3. Arguments from :doc:`config` set in the ``__defaults__`` key.
2. The value from :data:`default` argument.
:param dict kwargs:
Keyword arguments dictionary.
:param str kwname:
Name of the desired keyword argument.
"""
return kwargs.get(kwname) or \
self.settings.config.get(self.name, {}).get(kwname) or \
self.settings.config.get('__defaults__', {}).get(kwname) or \
default | Resolves keyword arguments from constructor or :doc:`config`.
.. note::
The keyword arguments take this order of precedence:
1. Arguments passed to constructor through the
:func:`authomatic.login`.
2. Provider specific arguments from :doc:`config`.
3. Arguments from :doc:`config` set in the ``__defaults__`` key.
2. The value from :data:`default` argument.
:param dict kwargs:
Keyword arguments dictionary.
:param str kwname:
Name of the desired keyword argument. | Below is the the instruction that describes the task:
### Input:
Resolves keyword arguments from constructor or :doc:`config`.
.. note::
The keyword arguments take this order of precedence:
1. Arguments passed to constructor through the
:func:`authomatic.login`.
2. Provider specific arguments from :doc:`config`.
3. Arguments from :doc:`config` set in the ``__defaults__`` key.
2. The value from :data:`default` argument.
:param dict kwargs:
Keyword arguments dictionary.
:param str kwname:
Name of the desired keyword argument.
### Response:
def _kwarg(self, kwargs, kwname, default=None):
"""
Resolves keyword arguments from constructor or :doc:`config`.
.. note::
The keyword arguments take this order of precedence:
1. Arguments passed to constructor through the
:func:`authomatic.login`.
2. Provider specific arguments from :doc:`config`.
3. Arguments from :doc:`config` set in the ``__defaults__`` key.
2. The value from :data:`default` argument.
:param dict kwargs:
Keyword arguments dictionary.
:param str kwname:
Name of the desired keyword argument.
"""
return kwargs.get(kwname) or \
self.settings.config.get(self.name, {}).get(kwname) or \
self.settings.config.get('__defaults__', {}).get(kwname) or \
default |
def validate_split_runs_file(split_runs_file):
"""Check if structure of file is as expected and return dictionary linking names to run_IDs."""
try:
content = [l.strip() for l in split_runs_file.readlines()]
if content[0].upper().split('\t') == ['NAME', 'RUN_ID']:
return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c}
else:
sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
except IndexError:
sys.exit("ERROR: Format of --split_runs tab separated file not as expected")
logging.error("ERROR: Format of --split_runs tab separated file not as expected") | Check if structure of file is as expected and return dictionary linking names to run_IDs. | Below is the the instruction that describes the task:
### Input:
Check if structure of file is as expected and return dictionary linking names to run_IDs.
### Response:
def validate_split_runs_file(split_runs_file):
"""Check if structure of file is as expected and return dictionary linking names to run_IDs."""
try:
content = [l.strip() for l in split_runs_file.readlines()]
if content[0].upper().split('\t') == ['NAME', 'RUN_ID']:
return {c.split('\t')[1]: c.split('\t')[0] for c in content[1:] if c}
else:
sys.exit("ERROR: Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
logging.error("Mandatory header of --split_runs tsv file not found: 'NAME', 'RUN_ID'")
except IndexError:
sys.exit("ERROR: Format of --split_runs tab separated file not as expected")
logging.error("ERROR: Format of --split_runs tab separated file not as expected") |
def _handle_raw_packet(self, raw_packet):
"""Parse incoming packet."""
if raw_packet[1:2] == b'\x1f':
self._reset_timeout()
year = raw_packet[2]
month = raw_packet[3]
day = raw_packet[4]
hour = raw_packet[5]
minute = raw_packet[6]
sec = raw_packet[7]
week = raw_packet[8]
self.logger.debug(
'received date: Year: %s, Month: %s, Day: %s, Hour: %s, '
'Minute: %s, Sec: %s, Week %s',
year, month, day, hour, minute, sec, week)
elif raw_packet[1:2] == b'\x0c':
states = {}
changes = []
for switch in range(0, 16):
if raw_packet[2+switch:3+switch] == b'\x01':
states[format(switch, 'x')] = True
if (self.client.states.get(format(switch, 'x'), None)
is not True):
changes.append(format(switch, 'x'))
self.client.states[format(switch, 'x')] = True
elif raw_packet[2+switch:3+switch] == b'\x02':
states[format(switch, 'x')] = False
if (self.client.states.get(format(switch, 'x'), None)
is not False):
changes.append(format(switch, 'x'))
self.client.states[format(switch, 'x')] = False
for switch in changes:
for status_cb in self.client.status_callbacks.get(switch, []):
status_cb(states[switch])
self.logger.debug(states)
if self.client.in_transaction:
self.client.in_transaction = False
self.client.active_packet = False
self.client.active_transaction.set_result(states)
while self.client.status_waiters:
waiter = self.client.status_waiters.popleft()
waiter.set_result(states)
if self.client.waiters:
self.send_packet()
else:
self._cmd_timeout.cancel()
elif self._cmd_timeout:
self._cmd_timeout.cancel()
else:
self.logger.warning('received unknown packet: %s',
binascii.hexlify(raw_packet)) | Parse incoming packet. | Below is the the instruction that describes the task:
### Input:
Parse incoming packet.
### Response:
def _handle_raw_packet(self, raw_packet):
"""Parse incoming packet."""
if raw_packet[1:2] == b'\x1f':
self._reset_timeout()
year = raw_packet[2]
month = raw_packet[3]
day = raw_packet[4]
hour = raw_packet[5]
minute = raw_packet[6]
sec = raw_packet[7]
week = raw_packet[8]
self.logger.debug(
'received date: Year: %s, Month: %s, Day: %s, Hour: %s, '
'Minute: %s, Sec: %s, Week %s',
year, month, day, hour, minute, sec, week)
elif raw_packet[1:2] == b'\x0c':
states = {}
changes = []
for switch in range(0, 16):
if raw_packet[2+switch:3+switch] == b'\x01':
states[format(switch, 'x')] = True
if (self.client.states.get(format(switch, 'x'), None)
is not True):
changes.append(format(switch, 'x'))
self.client.states[format(switch, 'x')] = True
elif raw_packet[2+switch:3+switch] == b'\x02':
states[format(switch, 'x')] = False
if (self.client.states.get(format(switch, 'x'), None)
is not False):
changes.append(format(switch, 'x'))
self.client.states[format(switch, 'x')] = False
for switch in changes:
for status_cb in self.client.status_callbacks.get(switch, []):
status_cb(states[switch])
self.logger.debug(states)
if self.client.in_transaction:
self.client.in_transaction = False
self.client.active_packet = False
self.client.active_transaction.set_result(states)
while self.client.status_waiters:
waiter = self.client.status_waiters.popleft()
waiter.set_result(states)
if self.client.waiters:
self.send_packet()
else:
self._cmd_timeout.cancel()
elif self._cmd_timeout:
self._cmd_timeout.cancel()
else:
self.logger.warning('received unknown packet: %s',
binascii.hexlify(raw_packet)) |
def path_param(name, ns):
"""
Build a path parameter definition.
"""
if ns.identifier_type == "uuid":
param_type = "string"
param_format = "uuid"
else:
param_type = "string"
param_format = None
kwargs = {
"name": name,
"in": "path",
"required": True,
"type": param_type,
}
if param_format:
kwargs["format"] = param_format
return swagger.PathParameterSubSchema(**kwargs) | Build a path parameter definition. | Below is the the instruction that describes the task:
### Input:
Build a path parameter definition.
### Response:
def path_param(name, ns):
"""
Build a path parameter definition.
"""
if ns.identifier_type == "uuid":
param_type = "string"
param_format = "uuid"
else:
param_type = "string"
param_format = None
kwargs = {
"name": name,
"in": "path",
"required": True,
"type": param_type,
}
if param_format:
kwargs["format"] = param_format
return swagger.PathParameterSubSchema(**kwargs) |
def nacm_rule_list_rule_context(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
context = ET.SubElement(rule, "context", xmlns="http://tail-f.com/yang/acm")
context.text = kwargs.pop('context')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def nacm_rule_list_rule_context(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm")
rule_list = ET.SubElement(nacm, "rule-list")
name_key = ET.SubElement(rule_list, "name")
name_key.text = kwargs.pop('name')
rule = ET.SubElement(rule_list, "rule")
name_key = ET.SubElement(rule, "name")
name_key.text = kwargs.pop('name')
context = ET.SubElement(rule, "context", xmlns="http://tail-f.com/yang/acm")
context.text = kwargs.pop('context')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_vnport_ka(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_rx_vnport_ka = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-vnport-ka")
fcoe_intf_rx_vnport_ka.text = kwargs.pop('fcoe_intf_rx_vnport_ka')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_vnport_ka(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_rx_vnport_ka = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-vnport-ka")
fcoe_intf_rx_vnport_ka.text = kwargs.pop('fcoe_intf_rx_vnport_ka')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def rpcexec(self, payload):
""" Execute a call by sending the payload
:param json payload: Payload data
:raises ValueError: if the server does not respond in proper JSON
format
"""
if not self.ws: # pragma: no cover
self.connect()
log.debug(json.dumps(payload))
# Mutex/Lock
# We need to lock because we need to wait for websocket
# response but don't want to allow other threads to send
# requests (that might take less time) to disturb
self.__lock.acquire()
# Send over websocket
try:
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
# Receive from websocket
ret = self.ws.recv()
finally:
# Release lock
self.__lock.release()
return ret | Execute a call by sending the payload
:param json payload: Payload data
:raises ValueError: if the server does not respond in proper JSON
format | Below is the the instruction that describes the task:
### Input:
Execute a call by sending the payload
:param json payload: Payload data
:raises ValueError: if the server does not respond in proper JSON
format
### Response:
def rpcexec(self, payload):
""" Execute a call by sending the payload
:param json payload: Payload data
:raises ValueError: if the server does not respond in proper JSON
format
"""
if not self.ws: # pragma: no cover
self.connect()
log.debug(json.dumps(payload))
# Mutex/Lock
# We need to lock because we need to wait for websocket
# response but don't want to allow other threads to send
# requests (that might take less time) to disturb
self.__lock.acquire()
# Send over websocket
try:
self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
# Receive from websocket
ret = self.ws.recv()
finally:
# Release lock
self.__lock.release()
return ret |
def shells_difference(s1, s2):
"""
Computes and prints the differences between two lists of shells
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
"""
max_rdiff = 0.0
nsh = len(s1)
if len(s2) != nsh:
print("Different number of shells: {} vs {}".format(len(s1), len(s2)))
return float('inf')
shells1 = sort_shells(s1)
shells2 = sort_shells(s2)
for n in range(nsh):
sh1 = shells1[n]
sh2 = shells2[n]
if sh1['angular_momentum'] != sh2['angular_momentum']:
print("Different angular momentum for shell {}".format(n))
return float('inf')
nprim = len(sh1['exponents'])
if len(sh2['exponents']) != nprim:
print("Different number of primitives for shell {}".format(n))
return float('inf')
ngen = len(sh1['coefficients'])
if len(sh2['coefficients']) != ngen:
print("Different number of general contractions for shell {}".format(n))
return float('inf')
for p in range(nprim):
e1 = sh1['exponents'][p]
e2 = sh2['exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = sh1['coefficients'][g][p]
c2 = sh2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print("Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these shells: {}".format(max_rdiff))
return max_rdiff | Computes and prints the differences between two lists of shells
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned. | Below is the the instruction that describes the task:
### Input:
Computes and prints the differences between two lists of shells
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
### Response:
def shells_difference(s1, s2):
"""
Computes and prints the differences between two lists of shells
If the shells contain a different number primitives,
or the lists are of different length, inf is returned.
Otherwise, the maximum relative difference is returned.
"""
max_rdiff = 0.0
nsh = len(s1)
if len(s2) != nsh:
print("Different number of shells: {} vs {}".format(len(s1), len(s2)))
return float('inf')
shells1 = sort_shells(s1)
shells2 = sort_shells(s2)
for n in range(nsh):
sh1 = shells1[n]
sh2 = shells2[n]
if sh1['angular_momentum'] != sh2['angular_momentum']:
print("Different angular momentum for shell {}".format(n))
return float('inf')
nprim = len(sh1['exponents'])
if len(sh2['exponents']) != nprim:
print("Different number of primitives for shell {}".format(n))
return float('inf')
ngen = len(sh1['coefficients'])
if len(sh2['coefficients']) != ngen:
print("Different number of general contractions for shell {}".format(n))
return float('inf')
for p in range(nprim):
e1 = sh1['exponents'][p]
e2 = sh2['exponents'][p]
r = _reldiff(e1, e2)
if r > 0.0:
print(" Exponent {:3}: {:20} {:20} -> {:16.8e}".format(p, e1, e2, r))
max_rdiff = max(max_rdiff, r)
for g in range(ngen):
c1 = sh1['coefficients'][g][p]
c2 = sh2['coefficients'][g][p]
r = _reldiff(c1, c2)
if r > 0.0:
print("Coefficient {:3}: {:20} {:20} -> {:16.8e}".format(p, c1, c2, r))
max_rdiff = max(max_rdiff, r)
print()
print("Max relative difference for these shells: {}".format(max_rdiff))
return max_rdiff |
def checkPrediction2(self, patternNZs, output=None, confidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
confidence: The cell confidences. If not specified, then use the
TP's current self.confidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column. During training, each segment's confidence
# number is computed as a running average of how often it correctly
# predicted bottom-up activity on that column. A cell's confidence number
# is taken from the first active segment found in the cell. Note that
# confidence will only be non-zero for predicted columns.
if confidence is None:
confidence = self.confidence['t']
# Set the column confidence to be the max of the cell confidences in that
# column.
colConfidence = self.columnConfidences(confidence)
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum/positiveColumnCount
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum/negativeColumnCount
else:
negativePredictionScore = 0.0
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output) \
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences) | This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
confidence: The cell confidences. If not specified, then use the
TP's current self.confidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True. | Below is the the instruction that describes the task:
### Input:
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
confidence: The cell confidences. If not specified, then use the
TP's current self.confidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
### Response:
def checkPrediction2(self, patternNZs, output=None, confidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
confidence: The cell confidences. If not specified, then use the
TP's current self.confidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column. During training, each segment's confidence
# number is computed as a running average of how often it correctly
# predicted bottom-up activity on that column. A cell's confidence number
# is taken from the first active segment found in the cell. Note that
# confidence will only be non-zero for predicted columns.
if confidence is None:
confidence = self.confidence['t']
# Set the column confidence to be the max of the cell confidences in that
# column.
colConfidence = self.columnConfidences(confidence)
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum/positiveColumnCount
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum/negativeColumnCount
else:
negativePredictionScore = 0.0
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output) \
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences) |
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications | Return a list of modified files.
:return: List[Modification] modifications | Below is the the instruction that describes the task:
### Input:
Return a list of modified files.
:return: List[Modification] modifications
### Response:
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications |
def _onShortcutHome(self, select):
"""Home pressed. Run a state machine:
1. Not at the line beginning. Move to the beginning of the line or
the beginning of the indent, whichever is closest to the current
cursor position.
2. At the line beginning. Move to the beginning of the indent.
3. At the beginning of the indent. Go to the beginning of the block.
4. At the beginning of the block. Go to the beginning of the indent.
"""
# Gather info for cursor state and movement.
cursor = self.textCursor()
text = cursor.block().text()
indent = len(text) - len(text.lstrip())
anchor = QTextCursor.KeepAnchor if select else QTextCursor.MoveAnchor
# Determine current state and move based on that.
if cursor.positionInBlock() == indent:
# We're at the beginning of the indent. Go to the beginning of the
# block.
cursor.movePosition(QTextCursor.StartOfBlock, anchor)
elif cursor.atBlockStart():
# We're at the beginning of the block. Go to the beginning of the
# indent.
setPositionInBlock(cursor, indent, anchor)
else:
# Neither of the above. There's no way I can find to directly
# determine if we're at the beginning of a line. So, try moving and
# see if the cursor location changes.
pos = cursor.positionInBlock()
cursor.movePosition(QTextCursor.StartOfLine, anchor)
# If we didn't move, we were already at the beginning of the line.
# So, move to the indent.
if pos == cursor.positionInBlock():
setPositionInBlock(cursor, indent, anchor)
# If we did move, check to see if the indent was closer to the
# cursor than the beginning of the indent. If so, move to the
# indent.
elif cursor.positionInBlock() < indent:
setPositionInBlock(cursor, indent, anchor)
self.setTextCursor(cursor) | Home pressed. Run a state machine:
1. Not at the line beginning. Move to the beginning of the line or
the beginning of the indent, whichever is closest to the current
cursor position.
2. At the line beginning. Move to the beginning of the indent.
3. At the beginning of the indent. Go to the beginning of the block.
4. At the beginning of the block. Go to the beginning of the indent. | Below is the the instruction that describes the task:
### Input:
Home pressed. Run a state machine:
1. Not at the line beginning. Move to the beginning of the line or
the beginning of the indent, whichever is closest to the current
cursor position.
2. At the line beginning. Move to the beginning of the indent.
3. At the beginning of the indent. Go to the beginning of the block.
4. At the beginning of the block. Go to the beginning of the indent.
### Response:
def _onShortcutHome(self, select):
"""Home pressed. Run a state machine:
1. Not at the line beginning. Move to the beginning of the line or
the beginning of the indent, whichever is closest to the current
cursor position.
2. At the line beginning. Move to the beginning of the indent.
3. At the beginning of the indent. Go to the beginning of the block.
4. At the beginning of the block. Go to the beginning of the indent.
"""
# Gather info for cursor state and movement.
cursor = self.textCursor()
text = cursor.block().text()
indent = len(text) - len(text.lstrip())
anchor = QTextCursor.KeepAnchor if select else QTextCursor.MoveAnchor
# Determine current state and move based on that.
if cursor.positionInBlock() == indent:
# We're at the beginning of the indent. Go to the beginning of the
# block.
cursor.movePosition(QTextCursor.StartOfBlock, anchor)
elif cursor.atBlockStart():
# We're at the beginning of the block. Go to the beginning of the
# indent.
setPositionInBlock(cursor, indent, anchor)
else:
# Neither of the above. There's no way I can find to directly
# determine if we're at the beginning of a line. So, try moving and
# see if the cursor location changes.
pos = cursor.positionInBlock()
cursor.movePosition(QTextCursor.StartOfLine, anchor)
# If we didn't move, we were already at the beginning of the line.
# So, move to the indent.
if pos == cursor.positionInBlock():
setPositionInBlock(cursor, indent, anchor)
# If we did move, check to see if the indent was closer to the
# cursor than the beginning of the indent. If so, move to the
# indent.
elif cursor.positionInBlock() < indent:
setPositionInBlock(cursor, indent, anchor)
self.setTextCursor(cursor) |
def com_google_fonts_check_fvar_name_entries(ttFont):
"""All name entries referenced by fvar instances exist on the name table?"""
failed = False
for instance in ttFont["fvar"].instances:
entries = [entry for entry in ttFont["name"].names if entry.nameID == instance.subfamilyNameID]
if len(entries) == 0:
failed = True
yield FAIL, (f"Named instance with coordinates {instance.coordinates}"
f" lacks an entry on the name table (nameID={instance.subfamilyNameID}).")
if not failed:
yield PASS, "OK" | All name entries referenced by fvar instances exist on the name table? | Below is the the instruction that describes the task:
### Input:
All name entries referenced by fvar instances exist on the name table?
### Response:
def com_google_fonts_check_fvar_name_entries(ttFont):
"""All name entries referenced by fvar instances exist on the name table?"""
failed = False
for instance in ttFont["fvar"].instances:
entries = [entry for entry in ttFont["name"].names if entry.nameID == instance.subfamilyNameID]
if len(entries) == 0:
failed = True
yield FAIL, (f"Named instance with coordinates {instance.coordinates}"
f" lacks an entry on the name table (nameID={instance.subfamilyNameID}).")
if not failed:
yield PASS, "OK" |
def HasStorage(self):
"""
Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise.
"""
from neo.Core.State.ContractState import ContractPropertyState
return self.ContractProperties & ContractPropertyState.HasStorage > 0 | Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise. | Below is the the instruction that describes the task:
### Input:
Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise.
### Response:
def HasStorage(self):
"""
Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise.
"""
from neo.Core.State.ContractState import ContractPropertyState
return self.ContractProperties & ContractPropertyState.HasStorage > 0 |
def add(self, extension):
"""
Add a extension to the editor.
:param extension: The extension instance to add.
"""
logger.debug('adding extension {}'.format(extension.name))
self._extensions[extension.name] = extension
extension.on_install(self.editor)
return extension | Add a extension to the editor.
:param extension: The extension instance to add. | Below is the the instruction that describes the task:
### Input:
Add a extension to the editor.
:param extension: The extension instance to add.
### Response:
def add(self, extension):
"""
Add a extension to the editor.
:param extension: The extension instance to add.
"""
logger.debug('adding extension {}'.format(extension.name))
self._extensions[extension.name] = extension
extension.on_install(self.editor)
return extension |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.