code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _create_values_table(self):
"""Create table lexem_type->{identificator->vocabulary},
and return it with sizes of an identificator as lexem_type->identificator_size"""
# number of existing character, and returned dicts
len_alph = len(self.alphabet)
identificators_table = {k:{} for k in self.voc_values.keys()}
identificators_sizes = {k:-1 for k in self.voc_values.keys()}
for lexem_type, vocabulary in self.voc_values.items():
# find number of different values that can be found,
# and size of an identificator.
len_vocb = len(vocabulary)
identificators_sizes[lexem_type] = ceil(log(len_vocb, len_alph))
# create list of possible identificators
num2alph = lambda x, n: self.alphabet[(x // len_alph**n) % len_alph]
identificators = [[str(num2alph(x, n))
for n in range(identificators_sizes[lexem_type])
] # this list is an identificator
for x in range(len_alph**identificators_sizes[lexem_type])
] # this one is a list of identificator
# initialize iterable
zip_id_voc = zip_longest(
identificators, vocabulary,
fillvalue=None
)
# create dict {identificator:word}
for idt, voc in zip_id_voc:
identificators_table[lexem_type][''.join(idt)] = voc
# return all
return identificators_table, identificators_sizes | Create table lexem_type->{identificator->vocabulary},
and return it with sizes of an identificator as lexem_type->identificator_size | Below is the the instruction that describes the task:
### Input:
Create table lexem_type->{identificator->vocabulary},
and return it with sizes of an identificator as lexem_type->identificator_size
### Response:
def _create_values_table(self):
"""Create table lexem_type->{identificator->vocabulary},
and return it with sizes of an identificator as lexem_type->identificator_size"""
# number of existing character, and returned dicts
len_alph = len(self.alphabet)
identificators_table = {k:{} for k in self.voc_values.keys()}
identificators_sizes = {k:-1 for k in self.voc_values.keys()}
for lexem_type, vocabulary in self.voc_values.items():
# find number of different values that can be found,
# and size of an identificator.
len_vocb = len(vocabulary)
identificators_sizes[lexem_type] = ceil(log(len_vocb, len_alph))
# create list of possible identificators
num2alph = lambda x, n: self.alphabet[(x // len_alph**n) % len_alph]
identificators = [[str(num2alph(x, n))
for n in range(identificators_sizes[lexem_type])
] # this list is an identificator
for x in range(len_alph**identificators_sizes[lexem_type])
] # this one is a list of identificator
# initialize iterable
zip_id_voc = zip_longest(
identificators, vocabulary,
fillvalue=None
)
# create dict {identificator:word}
for idt, voc in zip_id_voc:
identificators_table[lexem_type][''.join(idt)] = voc
# return all
return identificators_table, identificators_sizes |
def _parse_summary_frames(self, file_obj):
"""Iterate through the byte data and fill the summary_frames"""
for _ in range(self.n_summary_frames):
dom_id = unpack('<i', file_obj.read(4))[0]
dq_status = file_obj.read(4) # probably dom status? # noqa
dom_status = unpack('<iiii', file_obj.read(16))
raw_rates = unpack('b' * 31, file_obj.read(31))
pmt_rates = [self._get_rate(value) for value in raw_rates]
self.summary_frames[dom_id] = pmt_rates
self.dq_status[dom_id] = dq_status
self.dom_status[dom_id] = dom_status
self.dom_rates[dom_id] = np.sum(pmt_rates) | Iterate through the byte data and fill the summary_frames | Below is the the instruction that describes the task:
### Input:
Iterate through the byte data and fill the summary_frames
### Response:
def _parse_summary_frames(self, file_obj):
"""Iterate through the byte data and fill the summary_frames"""
for _ in range(self.n_summary_frames):
dom_id = unpack('<i', file_obj.read(4))[0]
dq_status = file_obj.read(4) # probably dom status? # noqa
dom_status = unpack('<iiii', file_obj.read(16))
raw_rates = unpack('b' * 31, file_obj.read(31))
pmt_rates = [self._get_rate(value) for value in raw_rates]
self.summary_frames[dom_id] = pmt_rates
self.dq_status[dom_id] = dq_status
self.dom_status[dom_id] = dom_status
self.dom_rates[dom_id] = np.sum(pmt_rates) |
def val_xml(self):
"""
Return the unicode XML snippet for the ``<c:val>`` element describing
this series, containing the series values and their spreadsheet range
reference.
"""
return self._val_tmpl.format(**{
'nsdecls': '',
'values_ref': self._series.values_ref,
'number_format': self._series.number_format,
'val_count': len(self._series),
'val_pt_xml': self._val_pt_xml,
}) | Return the unicode XML snippet for the ``<c:val>`` element describing
this series, containing the series values and their spreadsheet range
reference. | Below is the the instruction that describes the task:
### Input:
Return the unicode XML snippet for the ``<c:val>`` element describing
this series, containing the series values and their spreadsheet range
reference.
### Response:
def val_xml(self):
"""
Return the unicode XML snippet for the ``<c:val>`` element describing
this series, containing the series values and their spreadsheet range
reference.
"""
return self._val_tmpl.format(**{
'nsdecls': '',
'values_ref': self._series.values_ref,
'number_format': self._series.number_format,
'val_count': len(self._series),
'val_pt_xml': self._val_pt_xml,
}) |
def detect_cid_in_current_path(i):
"""
Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
}
"""
p=i.get('path','')
if p=='': p=os.getcwd()
p=os.path.normpath(p)
dirs=[]
p1=''
pr='*'
found=False
while pr!='':
p1=os.path.join(p, cfg['repo_file'])
if os.path.isfile(p1):
found=True
break
p2=os.path.split(p)
p=p2[0]
pr=p2[1]
dirs.append(pr)
if not found:
return {'return':16, 'error':'repository is not detected in the current path'}
# Find info about repo (prepared as return dict)
r=find_repo_by_path({'path':p})
if r['return']>0: return r
# Check info about module
ld=len(dirs)
if ld>0:
m=dirs[ld-1]
rx=find_path_to_entry({'path':p, 'data_uoa':m})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['module_uoa']=rx['data_uoa']
r['module_uid']=rx['data_uid']
r['module_alias']=rx['data_alias']
# Check info about data
if ld>1:
d=dirs[ld-2]
rx=find_path_to_entry({'path':os.path.join(p,m), 'data_uoa':d})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['data_uoa']=rx['data_uoa']
r['data_uid']=rx['data_uid']
r['data_alias']=rx['data_alias']
return r | Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
} | Below is the the instruction that describes the task:
### Input:
Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
}
### Response:
def detect_cid_in_current_path(i):
"""
Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
}
"""
p=i.get('path','')
if p=='': p=os.getcwd()
p=os.path.normpath(p)
dirs=[]
p1=''
pr='*'
found=False
while pr!='':
p1=os.path.join(p, cfg['repo_file'])
if os.path.isfile(p1):
found=True
break
p2=os.path.split(p)
p=p2[0]
pr=p2[1]
dirs.append(pr)
if not found:
return {'return':16, 'error':'repository is not detected in the current path'}
# Find info about repo (prepared as return dict)
r=find_repo_by_path({'path':p})
if r['return']>0: return r
# Check info about module
ld=len(dirs)
if ld>0:
m=dirs[ld-1]
rx=find_path_to_entry({'path':p, 'data_uoa':m})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['module_uoa']=rx['data_uoa']
r['module_uid']=rx['data_uid']
r['module_alias']=rx['data_alias']
# Check info about data
if ld>1:
d=dirs[ld-2]
rx=find_path_to_entry({'path':os.path.join(p,m), 'data_uoa':d})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['data_uoa']=rx['data_uoa']
r['data_uid']=rx['data_uid']
r['data_alias']=rx['data_alias']
return r |
def add_from_child(self, resource, **kwargs):
""" Add a resource with its all children resources to the current
resource.
"""
new_resource = self.add(
resource.member_name, resource.collection_name, **kwargs)
for child in resource.children:
new_resource.add_from_child(child, **kwargs) | Add a resource with its all children resources to the current
resource. | Below is the the instruction that describes the task:
### Input:
Add a resource with its all children resources to the current
resource.
### Response:
def add_from_child(self, resource, **kwargs):
""" Add a resource with its all children resources to the current
resource.
"""
new_resource = self.add(
resource.member_name, resource.collection_name, **kwargs)
for child in resource.children:
new_resource.add_from_child(child, **kwargs) |
def do_query(self, args):
"""Query resource in use: query *IDN? """
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
print('Response: {}'.format(self.current.query(args)))
except Exception as e:
print(e) | Query resource in use: query *IDN? | Below is the the instruction that describes the task:
### Input:
Query resource in use: query *IDN?
### Response:
def do_query(self, args):
"""Query resource in use: query *IDN? """
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
print('Response: {}'.format(self.current.query(args)))
except Exception as e:
print(e) |
def _create_producer(self):
"""Tries to establish a Kafka consumer connection"""
if not self.closed:
try:
self.logger.debug("Creating new kafka producer using brokers: " +
str(self.settings['KAFKA_HOSTS']))
return KafkaProducer(bootstrap_servers=self.settings['KAFKA_HOSTS'],
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
retries=3,
linger_ms=self.settings['KAFKA_PRODUCER_BATCH_LINGER_MS'],
buffer_memory=self.settings['KAFKA_PRODUCER_BUFFER_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka producer.",
{'ex': traceback.format_exc()})
raise | Tries to establish a Kafka consumer connection | Below is the the instruction that describes the task:
### Input:
Tries to establish a Kafka consumer connection
### Response:
def _create_producer(self):
"""Tries to establish a Kafka consumer connection"""
if not self.closed:
try:
self.logger.debug("Creating new kafka producer using brokers: " +
str(self.settings['KAFKA_HOSTS']))
return KafkaProducer(bootstrap_servers=self.settings['KAFKA_HOSTS'],
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
retries=3,
linger_ms=self.settings['KAFKA_PRODUCER_BATCH_LINGER_MS'],
buffer_memory=self.settings['KAFKA_PRODUCER_BUFFER_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka producer.",
{'ex': traceback.format_exc()})
raise |
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True | Preconditions:
- our agent applies to this entry
- filename is URL decoded | Below is the the instruction that describes the task:
### Input:
Preconditions:
- our agent applies to this entry
- filename is URL decoded
### Response:
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True |
def find_company(cmp_id=None, cmp_name=None):
"""
find the company according company id (prioritary) or company name
:param cmp_id: the company id
:param cmp_name: the company name
:return: found company or None if not found
"""
LOGGER.debug("CompanyService.find_company")
if (cmp_id is None or not cmp_id) and (cmp_name is None or not cmp_name):
raise exceptions.ArianeCallParametersError('id and name')
if (cmp_id is not None and cmp_id) and (cmp_name is not None and cmp_name):
LOGGER.warn('CompanyService.find_company - Both id and name are defined. Will give you search on id.')
cmp_name = None
params = None
if cmp_id is not None and cmp_id:
params = {'id': cmp_id}
elif cmp_name is not None and cmp_name:
params = {'name': cmp_name}
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = CompanyService.requester.call(args)
if response.rc == 0:
ret = Company.json_2_company(response.response_content)
elif response.rc != 404:
err_msg = 'CompanyService.find_company - Problem while finding company (id:' + str(cmp_id) + \
', name:' + str(cmp_name) + '). ' + \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret | find the company according company id (prioritary) or company name
:param cmp_id: the company id
:param cmp_name: the company name
:return: found company or None if not found | Below is the the instruction that describes the task:
### Input:
find the company according company id (prioritary) or company name
:param cmp_id: the company id
:param cmp_name: the company name
:return: found company or None if not found
### Response:
def find_company(cmp_id=None, cmp_name=None):
"""
find the company according company id (prioritary) or company name
:param cmp_id: the company id
:param cmp_name: the company name
:return: found company or None if not found
"""
LOGGER.debug("CompanyService.find_company")
if (cmp_id is None or not cmp_id) and (cmp_name is None or not cmp_name):
raise exceptions.ArianeCallParametersError('id and name')
if (cmp_id is not None and cmp_id) and (cmp_name is not None and cmp_name):
LOGGER.warn('CompanyService.find_company - Both id and name are defined. Will give you search on id.')
cmp_name = None
params = None
if cmp_id is not None and cmp_id:
params = {'id': cmp_id}
elif cmp_name is not None and cmp_name:
params = {'name': cmp_name}
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = CompanyService.requester.call(args)
if response.rc == 0:
ret = Company.json_2_company(response.response_content)
elif response.rc != 404:
err_msg = 'CompanyService.find_company - Problem while finding company (id:' + str(cmp_id) + \
', name:' + str(cmp_name) + '). ' + \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret |
def options_handler(self, sock, cmd, opt):
"Negotiate options"
if cmd == NOP:
self.sendcommand(NOP)
elif cmd == WILL or cmd == WONT:
if self.WILLACK.has_key(opt):
self.sendcommand(self.WILLACK[opt], opt)
else:
self.sendcommand(DONT, opt)
if cmd == WILL and opt == TTYPE:
self.writecooked(IAC + SB + TTYPE + SEND + IAC + SE)
elif cmd == DO or cmd == DONT:
if self.DOACK.has_key(opt):
self.sendcommand(self.DOACK[opt], opt)
else:
self.sendcommand(WONT, opt)
if opt == ECHO:
self.DOECHO = (cmd == DO)
elif cmd == SE:
subreq = self.read_sb_data()
if subreq[0] == TTYPE and subreq[1] == IS:
try:
self.setterm(subreq[2:])
except:
log.debug("Terminal type not known")
elif subreq[0] == NAWS:
self.setnaws(subreq[1:])
elif cmd == SB:
pass
else:
log.debug("Unhandled option: %s %s" % (cmdtxt, opttxt, )) | Negotiate options | Below is the the instruction that describes the task:
### Input:
Negotiate options
### Response:
def options_handler(self, sock, cmd, opt):
"Negotiate options"
if cmd == NOP:
self.sendcommand(NOP)
elif cmd == WILL or cmd == WONT:
if self.WILLACK.has_key(opt):
self.sendcommand(self.WILLACK[opt], opt)
else:
self.sendcommand(DONT, opt)
if cmd == WILL and opt == TTYPE:
self.writecooked(IAC + SB + TTYPE + SEND + IAC + SE)
elif cmd == DO or cmd == DONT:
if self.DOACK.has_key(opt):
self.sendcommand(self.DOACK[opt], opt)
else:
self.sendcommand(WONT, opt)
if opt == ECHO:
self.DOECHO = (cmd == DO)
elif cmd == SE:
subreq = self.read_sb_data()
if subreq[0] == TTYPE and subreq[1] == IS:
try:
self.setterm(subreq[2:])
except:
log.debug("Terminal type not known")
elif subreq[0] == NAWS:
self.setnaws(subreq[1:])
elif cmd == SB:
pass
else:
log.debug("Unhandled option: %s %s" % (cmdtxt, opttxt, )) |
def create_attribute_query(self, destination, name_id=None,
attribute=None, message_id=0, consent=None,
extensions=None, sign=False, sign_prepare=False, sign_alg=None,
digest_alg=None,
**kwargs):
""" Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance
"""
if name_id is None:
if "subject_id" in kwargs:
name_id = saml.NameID(text=kwargs["subject_id"])
for key in ["sp_name_qualifier", "name_qualifier",
"format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
else:
raise AttributeError("Missing required parameter")
elif isinstance(name_id, six.string_types):
name_id = saml.NameID(text=name_id)
for key in ["sp_name_qualifier", "name_qualifier", "format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
subject = saml.Subject(name_id=name_id)
if attribute:
attribute = do_attributes(attribute)
try:
nsprefix = kwargs["nsprefix"]
except KeyError:
nsprefix = None
return self._message(AttributeQuery, destination, message_id, consent,
extensions, sign, sign_prepare, subject=subject,
attribute=attribute, nsprefix=nsprefix,
sign_alg=sign_alg, digest_alg=digest_alg) | Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance | Below is the the instruction that describes the task:
### Input:
Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance
### Response:
def create_attribute_query(self, destination, name_id=None,
attribute=None, message_id=0, consent=None,
extensions=None, sign=False, sign_prepare=False, sign_alg=None,
digest_alg=None,
**kwargs):
""" Constructs an AttributeQuery
:param destination: To whom the query should be sent
:param name_id: The identifier of the subject
:param attribute: A dictionary of attributes and values that is
asked for. The key are one of 4 variants:
3-tuple of name_format,name and friendly_name,
2-tuple of name_format and name,
1-tuple with name or
just the name as a string.
:param sp_name_qualifier: The unique identifier of the
service provider or affiliation of providers for whom the
identifier was generated.
:param name_qualifier: The unique identifier of the identity
provider that generated the identifier.
:param format: The format of the name ID
:param message_id: The identifier of the session
:param consent: Whether the principal have given her consent
:param extensions: Possible extensions
:param sign: Whether the query should be signed or not.
:param sign_prepare: Whether the Signature element should be added.
:return: Tuple of request ID and an AttributeQuery instance
"""
if name_id is None:
if "subject_id" in kwargs:
name_id = saml.NameID(text=kwargs["subject_id"])
for key in ["sp_name_qualifier", "name_qualifier",
"format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
else:
raise AttributeError("Missing required parameter")
elif isinstance(name_id, six.string_types):
name_id = saml.NameID(text=name_id)
for key in ["sp_name_qualifier", "name_qualifier", "format"]:
try:
setattr(name_id, key, kwargs[key])
except KeyError:
pass
subject = saml.Subject(name_id=name_id)
if attribute:
attribute = do_attributes(attribute)
try:
nsprefix = kwargs["nsprefix"]
except KeyError:
nsprefix = None
return self._message(AttributeQuery, destination, message_id, consent,
extensions, sign, sign_prepare, subject=subject,
attribute=attribute, nsprefix=nsprefix,
sign_alg=sign_alg, digest_alg=digest_alg) |
def granular_markings_circular_refs(instance):
"""Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property).
"""
if instance['type'] != 'marking-definition':
return
if 'granular_markings' in instance:
for marking in instance['granular_markings']:
if 'marking_ref' in marking and marking['marking_ref'] == instance['id']:
yield JSONError("`granular_markings` cannot contain any "
"references to this marking definition object"
" (no circular references).", instance['id']) | Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property). | Below is the the instruction that describes the task:
### Input:
Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property).
### Response:
def granular_markings_circular_refs(instance):
"""Ensure that marking definitions do not contain circular references (ie.
they do not reference themselves in the `granular_markings` property).
"""
if instance['type'] != 'marking-definition':
return
if 'granular_markings' in instance:
for marking in instance['granular_markings']:
if 'marking_ref' in marking and marking['marking_ref'] == instance['id']:
yield JSONError("`granular_markings` cannot contain any "
"references to this marking definition object"
" (no circular references).", instance['id']) |
def fetch_captcha_store(self, name, value, attrs=None, generator=None):
"""
Fetches a new CaptchaStore
This has to be called inside render
"""
try:
reverse('captcha-image', args=('dummy',))
except NoReverseMatch:
raise ImproperlyConfigured('Make sure you\'ve included captcha.urls as explained in the INSTALLATION section on http://readthedocs.org/docs/django-simple-captcha/en/latest/usage.html#installation')
if settings.CAPTCHA_GET_FROM_POOL:
key = CaptchaStore.pick()
else:
key = CaptchaStore.generate_key(generator)
# these can be used by format_output and render
self._value = [key, u('')]
self._key = key
self.id_ = self.build_attrs(attrs).get('id', None) | Fetches a new CaptchaStore
This has to be called inside render | Below is the the instruction that describes the task:
### Input:
Fetches a new CaptchaStore
This has to be called inside render
### Response:
def fetch_captcha_store(self, name, value, attrs=None, generator=None):
"""
Fetches a new CaptchaStore
This has to be called inside render
"""
try:
reverse('captcha-image', args=('dummy',))
except NoReverseMatch:
raise ImproperlyConfigured('Make sure you\'ve included captcha.urls as explained in the INSTALLATION section on http://readthedocs.org/docs/django-simple-captcha/en/latest/usage.html#installation')
if settings.CAPTCHA_GET_FROM_POOL:
key = CaptchaStore.pick()
else:
key = CaptchaStore.generate_key(generator)
# these can be used by format_output and render
self._value = [key, u('')]
self._key = key
self.id_ = self.build_attrs(attrs).get('id', None) |
def convert_table(shell_output, delimiter='\t|\s{2,}', output='dict'):
'''
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
'''
# retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index)
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass
elif output == 'list':
python_list.append(column_headers)
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ','
csv_string += column_headers[i]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ','
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
# return output
if csv_string:
return csv_string
return python_list | a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format | Below is the the instruction that describes the task:
### Input:
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
### Response:
def convert_table(shell_output, delimiter='\t|\s{2,}', output='dict'):
'''
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
'''
# retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index)
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass
elif output == 'list':
python_list.append(column_headers)
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ','
csv_string += column_headers[i]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ','
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
# return output
if csv_string:
return csv_string
return python_list |
def plural_adj(self, text, count=None):
"""
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return "{}{}{}".format(pre, plural, post) | Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved. | Below is the the instruction that describes the task:
### Input:
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
### Response:
def plural_adj(self, text, count=None):
"""
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
"""
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return "{}{}{}".format(pre, plural, post) |
def randdate(self, start=date(1970, 1, 1), end=date.today()):
"""Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
"""
if isinstance(start, str):
start = self.str2date(start)
if isinstance(end, str):
end = self.str2date(end)
if start > end:
raise Exception("start must be smaller than end! "
"your start=%s, end=%s" % (start, end))
return date.fromordinal(random.randint(start.toordinal(), end.toordinal())) | Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 | Below is the the instruction that describes the task:
### Input:
Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
### Response:
def randdate(self, start=date(1970, 1, 1), end=date.today()):
"""Generate a random date between ``start`` to ``end``.
:param start: Left bound
:type start: string or datetime.date, (default date(1970, 1, 1))
:param end: Right bound
:type end: string or datetime.date, (default date.today())
:return: a datetime.date object
**中文文档**
随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
"""
if isinstance(start, str):
start = self.str2date(start)
if isinstance(end, str):
end = self.str2date(end)
if start > end:
raise Exception("start must be smaller than end! "
"your start=%s, end=%s" % (start, end))
return date.fromordinal(random.randint(start.toordinal(), end.toordinal())) |
def require(self, lock, guard_func, *guard_args, **guard_kw):
"""Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if self.granted(lock):
self.msg('Granted {}'.format(lock))
return f(*args, **kw)
if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
return f(*args, **kw)
return None
return wrapper
return decorator | Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted. | Below is the the instruction that describes the task:
### Input:
Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted.
### Response:
def require(self, lock, guard_func, *guard_args, **guard_kw):
"""Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if self.granted(lock):
self.msg('Granted {}'.format(lock))
return f(*args, **kw)
if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
return f(*args, **kw)
return None
return wrapper
return decorator |
def image_task(self):
"""
Returns a json-schema document that represents an task entity.
"""
uri = "/%s/task" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body | Returns a json-schema document that represents an task entity. | Below is the the instruction that describes the task:
### Input:
Returns a json-schema document that represents an task entity.
### Response:
def image_task(self):
"""
Returns a json-schema document that represents an task entity.
"""
uri = "/%s/task" % self.uri_base
resp, resp_body = self.api.method_get(uri)
return resp_body |
def state_category(value):
"""Parse categories."""
if value == re.sre_parse.CATEGORY_DIGIT:
return (yield '0')
if value == re.sre_parse.CATEGORY_WORD:
return (yield 'x') | Parse categories. | Below is the the instruction that describes the task:
### Input:
Parse categories.
### Response:
def state_category(value):
"""Parse categories."""
if value == re.sre_parse.CATEGORY_DIGIT:
return (yield '0')
if value == re.sre_parse.CATEGORY_WORD:
return (yield 'x') |
def run(self, config, workflow_id, signal, *, data=None):
""" Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty.
"""
graph = self.make_graph(self._schema)
# pre-checks
self.validate(graph)
if config is None:
raise ConfigNotDefinedError()
# create the celery app for submitting tasks
celery_app = create_app(config)
# the task queue for managing the current state of the tasks
tasks = []
stopped = False
# add all tasks without predecessors to the task list
for task in nx.topological_sort(graph):
task.workflow_name = self.workflow_name
task.dag_name = self.name
if len(list(graph.predecessors(task))) == 0:
task.state = TaskState.Waiting
tasks.append(task)
def set_task_completed(completed_task):
""" For each completed task, add all successor tasks to the task list.
If they are not in the task list yet, flag them as 'waiting'.
"""
completed_task.state = TaskState.Completed
for successor in graph.successors(completed_task):
if successor not in tasks:
successor.state = TaskState.Waiting
tasks.append(successor)
# process the task queue as long as there are tasks in it
while tasks:
if not stopped:
stopped = signal.is_stopped
# delay the execution by the polling time
if config.dag_polling_time > 0.0:
sleep(config.dag_polling_time)
for i in range(len(tasks) - 1, -1, -1):
task = tasks[i]
# for each waiting task, wait for all predecessor tasks to be
# completed. Then check whether the task should be skipped by
# interrogating the predecessor tasks.
if task.is_waiting:
if stopped:
task.state = TaskState.Stopped
else:
pre_tasks = list(graph.predecessors(task))
if all([p.is_completed for p in pre_tasks]):
# check whether the task should be skipped
run_task = task.has_to_run or len(pre_tasks) == 0
for pre in pre_tasks:
if run_task:
break
# predecessor task is skipped and flag should
# not be propagated
if pre.is_skipped and not pre.propagate_skip:
run_task = True
# limits of a non-skipped predecessor task
if not pre.is_skipped:
if pre.celery_result.result.limit is not None:
if task.name in [
n.name if isinstance(n, BaseTask) else n
for n in pre.celery_result.result.limit]:
run_task = True
else:
run_task = True
task.is_skipped = not run_task
# send the task to celery or, if skipped, mark it as completed
if task.is_skipped:
set_task_completed(task)
else:
# compose the input data from the predecessor tasks
# output. Data from skipped predecessor tasks do not
# contribute to the input data
if len(pre_tasks) == 0:
input_data = data
else:
input_data = MultiTaskData()
for pt in [p for p in pre_tasks if not p.is_skipped]:
slot = graph[pt][task]['slot']
input_data.add_dataset(
pt.name,
pt.celery_result.result.data.default_dataset,
aliases=[slot] if slot is not None else None)
task.state = TaskState.Running
task.celery_result = celery_app.send_task(
JobExecPath.Task,
args=(task, workflow_id, input_data),
queue=task.queue,
routing_key=task.queue
)
# flag task as completed
elif task.is_running:
if task.celery_completed:
set_task_completed(task)
elif task.celery_failed:
task.state = TaskState.Aborted
signal.stop_workflow()
# cleanup task results that are not required anymore
elif task.is_completed:
if all([s.is_completed or s.is_stopped or s.is_aborted
for s in graph.successors(task)]):
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task)
# cleanup and remove stopped and aborted tasks
elif task.is_stopped or task.is_aborted:
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task) | Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty. | Below is the the instruction that describes the task:
### Input:
Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty.
### Response:
def run(self, config, workflow_id, signal, *, data=None):
""" Run the dag by calling the tasks in the correct order.
Args:
config (Config): Reference to the configuration object from which the
settings for the dag are retrieved.
workflow_id (str): The unique ID of the workflow that runs this dag.
signal (DagSignal): The signal object for dags. It wraps the construction
and sending of signals into easy to use methods.
data (MultiTaskData): The initial data that is passed on to the start tasks.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a dag (e.g. contains loops).
ConfigNotDefinedError: If the configuration for the dag is empty.
"""
graph = self.make_graph(self._schema)
# pre-checks
self.validate(graph)
if config is None:
raise ConfigNotDefinedError()
# create the celery app for submitting tasks
celery_app = create_app(config)
# the task queue for managing the current state of the tasks
tasks = []
stopped = False
# add all tasks without predecessors to the task list
for task in nx.topological_sort(graph):
task.workflow_name = self.workflow_name
task.dag_name = self.name
if len(list(graph.predecessors(task))) == 0:
task.state = TaskState.Waiting
tasks.append(task)
def set_task_completed(completed_task):
""" For each completed task, add all successor tasks to the task list.
If they are not in the task list yet, flag them as 'waiting'.
"""
completed_task.state = TaskState.Completed
for successor in graph.successors(completed_task):
if successor not in tasks:
successor.state = TaskState.Waiting
tasks.append(successor)
# process the task queue as long as there are tasks in it
while tasks:
if not stopped:
stopped = signal.is_stopped
# delay the execution by the polling time
if config.dag_polling_time > 0.0:
sleep(config.dag_polling_time)
for i in range(len(tasks) - 1, -1, -1):
task = tasks[i]
# for each waiting task, wait for all predecessor tasks to be
# completed. Then check whether the task should be skipped by
# interrogating the predecessor tasks.
if task.is_waiting:
if stopped:
task.state = TaskState.Stopped
else:
pre_tasks = list(graph.predecessors(task))
if all([p.is_completed for p in pre_tasks]):
# check whether the task should be skipped
run_task = task.has_to_run or len(pre_tasks) == 0
for pre in pre_tasks:
if run_task:
break
# predecessor task is skipped and flag should
# not be propagated
if pre.is_skipped and not pre.propagate_skip:
run_task = True
# limits of a non-skipped predecessor task
if not pre.is_skipped:
if pre.celery_result.result.limit is not None:
if task.name in [
n.name if isinstance(n, BaseTask) else n
for n in pre.celery_result.result.limit]:
run_task = True
else:
run_task = True
task.is_skipped = not run_task
# send the task to celery or, if skipped, mark it as completed
if task.is_skipped:
set_task_completed(task)
else:
# compose the input data from the predecessor tasks
# output. Data from skipped predecessor tasks do not
# contribute to the input data
if len(pre_tasks) == 0:
input_data = data
else:
input_data = MultiTaskData()
for pt in [p for p in pre_tasks if not p.is_skipped]:
slot = graph[pt][task]['slot']
input_data.add_dataset(
pt.name,
pt.celery_result.result.data.default_dataset,
aliases=[slot] if slot is not None else None)
task.state = TaskState.Running
task.celery_result = celery_app.send_task(
JobExecPath.Task,
args=(task, workflow_id, input_data),
queue=task.queue,
routing_key=task.queue
)
# flag task as completed
elif task.is_running:
if task.celery_completed:
set_task_completed(task)
elif task.celery_failed:
task.state = TaskState.Aborted
signal.stop_workflow()
# cleanup task results that are not required anymore
elif task.is_completed:
if all([s.is_completed or s.is_stopped or s.is_aborted
for s in graph.successors(task)]):
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task)
# cleanup and remove stopped and aborted tasks
elif task.is_stopped or task.is_aborted:
if celery_app.conf.result_expires == 0:
task.clear_celery_result()
tasks.remove(task) |
def create_can_publish_and_can_republish_permissions(sender, **kwargs):
"""
Add `can_publish` and `ca_nrepublish` permissions for each publishable
model in the system.
"""
for model in sender.get_models():
if not issubclass(model, PublishingModel):
continue
content_type = ContentType.objects.get_for_model(model)
permission, created = Permission.objects.get_or_create(
content_type=content_type, codename='can_publish',
defaults=dict(name='Can Publish %s' % model.__name__))
permission, created = Permission.objects.get_or_create(
content_type=content_type, codename='can_republish',
defaults=dict(name='Can Republish %s' % model.__name__)) | Add `can_publish` and `ca_nrepublish` permissions for each publishable
model in the system. | Below is the the instruction that describes the task:
### Input:
Add `can_publish` and `ca_nrepublish` permissions for each publishable
model in the system.
### Response:
def create_can_publish_and_can_republish_permissions(sender, **kwargs):
"""
Add `can_publish` and `ca_nrepublish` permissions for each publishable
model in the system.
"""
for model in sender.get_models():
if not issubclass(model, PublishingModel):
continue
content_type = ContentType.objects.get_for_model(model)
permission, created = Permission.objects.get_or_create(
content_type=content_type, codename='can_publish',
defaults=dict(name='Can Publish %s' % model.__name__))
permission, created = Permission.objects.get_or_create(
content_type=content_type, codename='can_republish',
defaults=dict(name='Can Republish %s' % model.__name__)) |
def get_datacenter(self, datacenter_id, depth=1):
"""
Retrieves a data center by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s?depth=%s' % (datacenter_id, str(depth)))
return response | Retrieves a data center by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int`` | Below is the the instruction that describes the task:
### Input:
Retrieves a data center by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
### Response:
def get_datacenter(self, datacenter_id, depth=1):
"""
Retrieves a data center by its ID.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s?depth=%s' % (datacenter_id, str(depth)))
return response |
def get_course_details(self, course_id):
"""
Query the Enrollment API for the course details of the given course_id.
Args:
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)
"""
try:
return self.client.course(course_id).get()
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception(
'Failed to retrieve course enrollment details for course [%s] due to: [%s]',
course_id, str(exc)
)
return {} | Query the Enrollment API for the course details of the given course_id.
Args:
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.) | Below is the the instruction that describes the task:
### Input:
Query the Enrollment API for the course details of the given course_id.
Args:
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)
### Response:
def get_course_details(self, course_id):
"""
Query the Enrollment API for the course details of the given course_id.
Args:
course_id (str): The string value of the course's unique identifier
Returns:
dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)
"""
try:
return self.client.course(course_id).get()
except (SlumberBaseException, ConnectionError, Timeout) as exc:
LOGGER.exception(
'Failed to retrieve course enrollment details for course [%s] due to: [%s]',
course_id, str(exc)
)
return {} |
def forum_post_list(self, creator_id=None, creator_name=None,
topic_id=None, topic_title_matches=None,
topic_category_id=None, body_matches=None):
"""Return a list of forum posts.
Parameters:
creator_id (int):
creator_name (str):
topic_id (int):
topic_title_matches (str):
topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs &
Features respectively).
body_matches (str): Can be part of the post content.
"""
params = {
'search[creator_id]': creator_id,
'search[creator_name]': creator_name,
'search[topic_id]': topic_id,
'search[topic_title_matches]': topic_title_matches,
'search[topic_category_id]': topic_category_id,
'search[body_matches]': body_matches
}
return self._get('forum_posts.json', params) | Return a list of forum posts.
Parameters:
creator_id (int):
creator_name (str):
topic_id (int):
topic_title_matches (str):
topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs &
Features respectively).
body_matches (str): Can be part of the post content. | Below is the the instruction that describes the task:
### Input:
Return a list of forum posts.
Parameters:
creator_id (int):
creator_name (str):
topic_id (int):
topic_title_matches (str):
topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs &
Features respectively).
body_matches (str): Can be part of the post content.
### Response:
def forum_post_list(self, creator_id=None, creator_name=None,
topic_id=None, topic_title_matches=None,
topic_category_id=None, body_matches=None):
"""Return a list of forum posts.
Parameters:
creator_id (int):
creator_name (str):
topic_id (int):
topic_title_matches (str):
topic_category_id (int): Can be: 0, 1, 2 (General, Tags, Bugs &
Features respectively).
body_matches (str): Can be part of the post content.
"""
params = {
'search[creator_id]': creator_id,
'search[creator_name]': creator_name,
'search[topic_id]': topic_id,
'search[topic_title_matches]': topic_title_matches,
'search[topic_category_id]': topic_category_id,
'search[body_matches]': body_matches
}
return self._get('forum_posts.json', params) |
def __remove_args_first_item(self):
"""
# Todo: finding a better solution
This is a dirty solution
Because the first argument of inspectors' args will be itself
For current implementation, it should be ignore
"""
if len(self.args) > 0:
new_args_list = []
for item in self.args:
if len(item) > 0 and self.obj == item[0].__class__:
new_args_list.append(item[1:])
else:
new_args_list.append(item[:])
self.__set_args_list(new_args_list) | # Todo: finding a better solution
This is a dirty solution
Because the first argument of inspectors' args will be itself
For current implementation, it should be ignore | Below is the the instruction that describes the task:
### Input:
# Todo: finding a better solution
This is a dirty solution
Because the first argument of inspectors' args will be itself
For current implementation, it should be ignore
### Response:
def __remove_args_first_item(self):
"""
# Todo: finding a better solution
This is a dirty solution
Because the first argument of inspectors' args will be itself
For current implementation, it should be ignore
"""
if len(self.args) > 0:
new_args_list = []
for item in self.args:
if len(item) > 0 and self.obj == item[0].__class__:
new_args_list.append(item[1:])
else:
new_args_list.append(item[:])
self.__set_args_list(new_args_list) |
def create(self, environment, target_name):
"""
Sends "create project" command to the remote server
"""
remote_server_command(
["ssh", environment.deploy_target, "create", target_name],
environment, self,
clean_up=True,
) | Sends "create project" command to the remote server | Below is the the instruction that describes the task:
### Input:
Sends "create project" command to the remote server
### Response:
def create(self, environment, target_name):
"""
Sends "create project" command to the remote server
"""
remote_server_command(
["ssh", environment.deploy_target, "create", target_name],
environment, self,
clean_up=True,
) |
def _has_not_qual(ntd):
"""Return True if the qualifiers contain a 'NOT'"""
for qual in ntd.Qualifier:
if 'not' in qual:
return True
if 'NOT' in qual:
return True
return False | Return True if the qualifiers contain a 'NOT | Below is the the instruction that describes the task:
### Input:
Return True if the qualifiers contain a 'NOT
### Response:
def _has_not_qual(ntd):
"""Return True if the qualifiers contain a 'NOT'"""
for qual in ntd.Qualifier:
if 'not' in qual:
return True
if 'NOT' in qual:
return True
return False |
def _get_device_group(self, device):
'''Get the device group through a device.
:param device: bigip object -- device
:returns: tm.cm.device_groups.device_group object
'''
return device.tm.cm.device_groups.device_group.load(
name=self.name, partition=self.partition
) | Get the device group through a device.
:param device: bigip object -- device
:returns: tm.cm.device_groups.device_group object | Below is the the instruction that describes the task:
### Input:
Get the device group through a device.
:param device: bigip object -- device
:returns: tm.cm.device_groups.device_group object
### Response:
def _get_device_group(self, device):
'''Get the device group through a device.
:param device: bigip object -- device
:returns: tm.cm.device_groups.device_group object
'''
return device.tm.cm.device_groups.device_group.load(
name=self.name, partition=self.partition
) |
def to_er7(self, encoding_chars=None, trailing_children=False):
"""
Returns the HL7 representation of the :class:`Element <hl7apy.core.Element>`. It adds the appropriate
separator at the end if needed
:type encoding_chars: ``dict``
:param encoding_chars: The encoding chars to use.
If it is ``None`` it uses :attr:`self.encoding_chars`,
which by default is the ones return by
:func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` values
:rtype: ``str``
:return: the HL7 representation of the :class:`Element <hl7apy.core.Element>`
"""
if encoding_chars is None:
encoding_chars = self.encoding_chars
child_class = list(self.child_classes.values())[0]
separator = encoding_chars.get(child_class.__name__.upper(), '')
s = []
for child in self._get_children(trailing_children):
if child:
s.extend(repetition.to_er7(encoding_chars, trailing_children) for repetition in child)
else:
try:
s.append(self._handle_empty_children(encoding_chars))
except NotImplementedError:
pass
return separator.join(s) | Returns the HL7 representation of the :class:`Element <hl7apy.core.Element>`. It adds the appropriate
separator at the end if needed
:type encoding_chars: ``dict``
:param encoding_chars: The encoding chars to use.
If it is ``None`` it uses :attr:`self.encoding_chars`,
which by default is the ones return by
:func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` values
:rtype: ``str``
:return: the HL7 representation of the :class:`Element <hl7apy.core.Element>` | Below is the the instruction that describes the task:
### Input:
Returns the HL7 representation of the :class:`Element <hl7apy.core.Element>`. It adds the appropriate
separator at the end if needed
:type encoding_chars: ``dict``
:param encoding_chars: The encoding chars to use.
If it is ``None`` it uses :attr:`self.encoding_chars`,
which by default is the ones return by
:func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` values
:rtype: ``str``
:return: the HL7 representation of the :class:`Element <hl7apy.core.Element>`
### Response:
def to_er7(self, encoding_chars=None, trailing_children=False):
"""
Returns the HL7 representation of the :class:`Element <hl7apy.core.Element>`. It adds the appropriate
separator at the end if needed
:type encoding_chars: ``dict``
:param encoding_chars: The encoding chars to use.
If it is ``None`` it uses :attr:`self.encoding_chars`,
which by default is the ones return by
:func:`get_default_encoding_chars <hl7apy.get_default_encoding_chars>` values
:rtype: ``str``
:return: the HL7 representation of the :class:`Element <hl7apy.core.Element>`
"""
if encoding_chars is None:
encoding_chars = self.encoding_chars
child_class = list(self.child_classes.values())[0]
separator = encoding_chars.get(child_class.__name__.upper(), '')
s = []
for child in self._get_children(trailing_children):
if child:
s.extend(repetition.to_er7(encoding_chars, trailing_children) for repetition in child)
else:
try:
s.append(self._handle_empty_children(encoding_chars))
except NotImplementedError:
pass
return separator.join(s) |
def eval_string_type(self, text, is_string=False):
"""Evaluate string type."""
stype = set()
wstype = set()
for m in RE_ITER_STRING_TYPES.finditer(text):
value = m.group(0)
if value == '*':
wstype.add('u')
wstype.add('f')
wstype.add('r')
wstype.add('b')
elif value.endswith('*'):
wstype.add(value[0].lower())
else:
stype.add(value.lower())
if is_string and 'b' not in stype and 'f' not in stype:
stype.add('u')
return stype, wstype | Evaluate string type. | Below is the the instruction that describes the task:
### Input:
Evaluate string type.
### Response:
def eval_string_type(self, text, is_string=False):
"""Evaluate string type."""
stype = set()
wstype = set()
for m in RE_ITER_STRING_TYPES.finditer(text):
value = m.group(0)
if value == '*':
wstype.add('u')
wstype.add('f')
wstype.add('r')
wstype.add('b')
elif value.endswith('*'):
wstype.add(value[0].lower())
else:
stype.add(value.lower())
if is_string and 'b' not in stype and 'f' not in stype:
stype.add('u')
return stype, wstype |
def _notify_remove(self, slice_):
"""Notify about a RemoveChange."""
change = RemoveChange(self, slice_)
self.notify_observers(change) | Notify about a RemoveChange. | Below is the the instruction that describes the task:
### Input:
Notify about a RemoveChange.
### Response:
def _notify_remove(self, slice_):
"""Notify about a RemoveChange."""
change = RemoveChange(self, slice_)
self.notify_observers(change) |
def deprecated(fun_name=None, msg=""):
'''Issue a deprecation warning for a function'''
def _deprecated(fun):
'''Issue a deprecation warning for a function'''
@wraps(fun)
def _wrapper(*args, **kwargs):
'''Issue deprecation warning and forward arguments to fun'''
name = fun_name if fun_name is not None else fun.__name__
_warn_deprecated('Call to deprecated function %s. %s' % (name, msg))
return fun(*args, **kwargs)
return _wrapper
return _deprecated | Issue a deprecation warning for a function | Below is the the instruction that describes the task:
### Input:
Issue a deprecation warning for a function
### Response:
def deprecated(fun_name=None, msg=""):
'''Issue a deprecation warning for a function'''
def _deprecated(fun):
'''Issue a deprecation warning for a function'''
@wraps(fun)
def _wrapper(*args, **kwargs):
'''Issue deprecation warning and forward arguments to fun'''
name = fun_name if fun_name is not None else fun.__name__
_warn_deprecated('Call to deprecated function %s. %s' % (name, msg))
return fun(*args, **kwargs)
return _wrapper
return _deprecated |
def get_all(self, paths: Union[str, Sequence[str]]) -> Union[IrodsMetadata, List[IrodsMetadata]]:
"""
Gets all of the metadata for the iRODS entities at the given path or paths.
If multiple paths are given, the metadata collection at index `i` on the output corresponds to the path at index
`i` on the input. i.e.
```
output = mapper.get_all(["path_1", "path_2"])
metadata_for_path_1 = output[0]
metadata_for_path_2 = output[1]
```
A `ValueError` will be raised will be raised if the path does not correspond to a valid entity.
:param path: the path of the entity or entities to get the metadata for
:return: metadata for the given entity or entities
""" | Gets all of the metadata for the iRODS entities at the given path or paths.
If multiple paths are given, the metadata collection at index `i` on the output corresponds to the path at index
`i` on the input. i.e.
```
output = mapper.get_all(["path_1", "path_2"])
metadata_for_path_1 = output[0]
metadata_for_path_2 = output[1]
```
A `ValueError` will be raised will be raised if the path does not correspond to a valid entity.
:param path: the path of the entity or entities to get the metadata for
:return: metadata for the given entity or entities | Below is the the instruction that describes the task:
### Input:
Gets all of the metadata for the iRODS entities at the given path or paths.
If multiple paths are given, the metadata collection at index `i` on the output corresponds to the path at index
`i` on the input. i.e.
```
output = mapper.get_all(["path_1", "path_2"])
metadata_for_path_1 = output[0]
metadata_for_path_2 = output[1]
```
A `ValueError` will be raised will be raised if the path does not correspond to a valid entity.
:param path: the path of the entity or entities to get the metadata for
:return: metadata for the given entity or entities
### Response:
def get_all(self, paths: Union[str, Sequence[str]]) -> Union[IrodsMetadata, List[IrodsMetadata]]:
"""
Gets all of the metadata for the iRODS entities at the given path or paths.
If multiple paths are given, the metadata collection at index `i` on the output corresponds to the path at index
`i` on the input. i.e.
```
output = mapper.get_all(["path_1", "path_2"])
metadata_for_path_1 = output[0]
metadata_for_path_2 = output[1]
```
A `ValueError` will be raised will be raised if the path does not correspond to a valid entity.
:param path: the path of the entity or entities to get the metadata for
:return: metadata for the given entity or entities
""" |
def _pfp__set_packer(self, pack_type, packer=None, pack=None, unpack=None, func_call_info=None):
"""Set the packer/pack/unpack functions for this field, as
well as the pack type.
:pack_type: The data type of the packed data
:packer: A function that can handle packing and unpacking. First
arg is true/false (to pack or unpack). Second arg is the stream.
Must return an array of chars.
:pack: A function that packs data. It must accept an array of chars and return an
array of chars that is a packed form of the input.
:unpack: A function that unpacks data. It must accept an array of chars and
return an array of chars
"""
self._pfp__pack_type = pack_type
self._pfp__unpack = unpack
self._pfp__pack = pack
self._pfp__packer = packer
self._pfp__pack_func_call_info = func_call_info | Set the packer/pack/unpack functions for this field, as
well as the pack type.
:pack_type: The data type of the packed data
:packer: A function that can handle packing and unpacking. First
arg is true/false (to pack or unpack). Second arg is the stream.
Must return an array of chars.
:pack: A function that packs data. It must accept an array of chars and return an
array of chars that is a packed form of the input.
:unpack: A function that unpacks data. It must accept an array of chars and
return an array of chars | Below is the the instruction that describes the task:
### Input:
Set the packer/pack/unpack functions for this field, as
well as the pack type.
:pack_type: The data type of the packed data
:packer: A function that can handle packing and unpacking. First
arg is true/false (to pack or unpack). Second arg is the stream.
Must return an array of chars.
:pack: A function that packs data. It must accept an array of chars and return an
array of chars that is a packed form of the input.
:unpack: A function that unpacks data. It must accept an array of chars and
return an array of chars
### Response:
def _pfp__set_packer(self, pack_type, packer=None, pack=None, unpack=None, func_call_info=None):
"""Set the packer/pack/unpack functions for this field, as
well as the pack type.
:pack_type: The data type of the packed data
:packer: A function that can handle packing and unpacking. First
arg is true/false (to pack or unpack). Second arg is the stream.
Must return an array of chars.
:pack: A function that packs data. It must accept an array of chars and return an
array of chars that is a packed form of the input.
:unpack: A function that unpacks data. It must accept an array of chars and
return an array of chars
"""
self._pfp__pack_type = pack_type
self._pfp__unpack = unpack
self._pfp__pack = pack
self._pfp__packer = packer
self._pfp__pack_func_call_info = func_call_info |
def _parse_resource(resource):
""" Parses and completes resource information """
resource = resource.strip() if resource else resource
if resource in {ME_RESOURCE, USERS_RESOURCE}:
return resource
elif '@' in resource and not resource.startswith(USERS_RESOURCE):
# when for example accessing a shared mailbox the
# resource is set to the email address. we have to prefix
# the email with the resource 'users/' so --> 'users/email_address'
return '{}/{}'.format(USERS_RESOURCE, resource)
else:
return resource | Parses and completes resource information | Below is the the instruction that describes the task:
### Input:
Parses and completes resource information
### Response:
def _parse_resource(resource):
""" Parses and completes resource information """
resource = resource.strip() if resource else resource
if resource in {ME_RESOURCE, USERS_RESOURCE}:
return resource
elif '@' in resource and not resource.startswith(USERS_RESOURCE):
# when for example accessing a shared mailbox the
# resource is set to the email address. we have to prefix
# the email with the resource 'users/' so --> 'users/email_address'
return '{}/{}'.format(USERS_RESOURCE, resource)
else:
return resource |
def project_update_sponsorship(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/updateSponsorship API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FupdateSponsorship
"""
return DXHTTPRequest('/%s/updateSponsorship' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /project-xxxx/updateSponsorship API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FupdateSponsorship | Below is the the instruction that describes the task:
### Input:
Invokes the /project-xxxx/updateSponsorship API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FupdateSponsorship
### Response:
def project_update_sponsorship(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /project-xxxx/updateSponsorship API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FupdateSponsorship
"""
return DXHTTPRequest('/%s/updateSponsorship' % object_id, input_params, always_retry=always_retry, **kwargs) |
def compare(orderby_item1, orderby_item2):
"""compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int
"""
type1_ord = _OrderByHelper.getTypeOrd(orderby_item1)
type2_ord = _OrderByHelper.getTypeOrd(orderby_item2)
type_ord_diff = type1_ord - type2_ord
if type_ord_diff:
return type_ord_diff
# the same type,
if type1_ord == 0:
return 0
return _compare_helper(orderby_item1['item'], orderby_item2['item']) | compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int | Below is the the instruction that describes the task:
### Input:
compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int
### Response:
def compare(orderby_item1, orderby_item2):
"""compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int
"""
type1_ord = _OrderByHelper.getTypeOrd(orderby_item1)
type2_ord = _OrderByHelper.getTypeOrd(orderby_item2)
type_ord_diff = type1_ord - type2_ord
if type_ord_diff:
return type_ord_diff
# the same type,
if type1_ord == 0:
return 0
return _compare_helper(orderby_item1['item'], orderby_item2['item']) |
def date_to_long_form_string(dt, locale_ = 'en_US.utf8'):
'''dt should be a datetime.date object.'''
if locale_:
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, locale_)
v = dt.strftime("%A %B %d %Y")
if locale_:
locale.setlocale(locale.LC_ALL, old_locale)
return v | dt should be a datetime.date object. | Below is the the instruction that describes the task:
### Input:
dt should be a datetime.date object.
### Response:
def date_to_long_form_string(dt, locale_ = 'en_US.utf8'):
'''dt should be a datetime.date object.'''
if locale_:
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, locale_)
v = dt.strftime("%A %B %d %Y")
if locale_:
locale.setlocale(locale.LC_ALL, old_locale)
return v |
def fetch_internal(item, request):
"""Fetches the given request by using the local Flask context."""
# Break client dependence on Flask if internal fetches aren't being used.
from flask import make_response
from werkzeug.test import EnvironBuilder
# Break circular dependencies.
from dpxdt.server import app
# Attempt to create a Flask environment from a urllib2.Request object.
environ_base = {
'REMOTE_ADDR': '127.0.0.1',
}
# The data object may be a generator from poster.multipart_encode, so we
# need to convert that to raw bytes here. Unfortunately EnvironBuilder
# only works with the whole request buffered in memory.
data = request.get_data()
if data and not isinstance(data, str):
data = ''.join(list(data))
builder = EnvironBuilder(
path=request.get_selector(),
base_url='%s://%s' % (request.get_type(), request.get_host()),
method=request.get_method(),
data=data,
headers=request.header_items(),
environ_base=environ_base)
with app.request_context(builder.get_environ()):
response = make_response(app.dispatch_request())
LOGGER.info('"%s" %s via internal routing',
request.get_selector(), response.status_code)
item.status_code = response.status_code
item.content_type = response.mimetype
if item.result_path:
# TODO: Is there a better way to access the response stream?
with open(item.result_path, 'wb') as result_file:
for piece in response.iter_encoded():
result_file.write(piece)
else:
item.data = response.get_data()
return item | Fetches the given request by using the local Flask context. | Below is the the instruction that describes the task:
### Input:
Fetches the given request by using the local Flask context.
### Response:
def fetch_internal(item, request):
"""Fetches the given request by using the local Flask context."""
# Break client dependence on Flask if internal fetches aren't being used.
from flask import make_response
from werkzeug.test import EnvironBuilder
# Break circular dependencies.
from dpxdt.server import app
# Attempt to create a Flask environment from a urllib2.Request object.
environ_base = {
'REMOTE_ADDR': '127.0.0.1',
}
# The data object may be a generator from poster.multipart_encode, so we
# need to convert that to raw bytes here. Unfortunately EnvironBuilder
# only works with the whole request buffered in memory.
data = request.get_data()
if data and not isinstance(data, str):
data = ''.join(list(data))
builder = EnvironBuilder(
path=request.get_selector(),
base_url='%s://%s' % (request.get_type(), request.get_host()),
method=request.get_method(),
data=data,
headers=request.header_items(),
environ_base=environ_base)
with app.request_context(builder.get_environ()):
response = make_response(app.dispatch_request())
LOGGER.info('"%s" %s via internal routing',
request.get_selector(), response.status_code)
item.status_code = response.status_code
item.content_type = response.mimetype
if item.result_path:
# TODO: Is there a better way to access the response stream?
with open(item.result_path, 'wb') as result_file:
for piece in response.iter_encoded():
result_file.write(piece)
else:
item.data = response.get_data()
return item |
def get_run_events(cls, crawler, run_id, start, end, level=None):
"""Events from a particular run"""
key = make_key(crawler, "events", run_id, level)
return cls.event_list(key, start, end) | Events from a particular run | Below is the the instruction that describes the task:
### Input:
Events from a particular run
### Response:
def get_run_events(cls, crawler, run_id, start, end, level=None):
"""Events from a particular run"""
key = make_key(crawler, "events", run_id, level)
return cls.event_list(key, start, end) |
def resources(ctx, gpu):
"""Get build job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build -b 2 resources
```
For GPU resources
\b
```bash
$ polyaxon build -b 2 resources --gpu
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().build_job.resources(user,
project_name,
_build,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) | Get build job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build -b 2 resources
```
For GPU resources
\b
```bash
$ polyaxon build -b 2 resources --gpu
``` | Below is the the instruction that describes the task:
### Input:
Get build job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build -b 2 resources
```
For GPU resources
\b
```bash
$ polyaxon build -b 2 resources --gpu
```
### Response:
def resources(ctx, gpu):
"""Get build job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon build -b 2 resources
```
For GPU resources
\b
```bash
$ polyaxon build -b 2 resources --gpu
```
"""
user, project_name, _build = get_build_or_local(ctx.obj.get('project'), ctx.obj.get('build'))
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().build_job.resources(user,
project_name,
_build,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for build job `{}`.'.format(_build))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) |
def get_py_impl():
"""Return what kind of Python this is"""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'PyPy'
elif sys.platform.startswith('java'):
pyimpl = 'Jython'
elif sys.platform == 'cli':
pyimpl = 'IronPython'
else:
pyimpl = 'CPython'
return pyimpl | Return what kind of Python this is | Below is the the instruction that describes the task:
### Input:
Return what kind of Python this is
### Response:
def get_py_impl():
"""Return what kind of Python this is"""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'PyPy'
elif sys.platform.startswith('java'):
pyimpl = 'Jython'
elif sys.platform == 'cli':
pyimpl = 'IronPython'
else:
pyimpl = 'CPython'
return pyimpl |
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
syncfilename = targetbase + '.synctex.gz'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
env.SideEffect(syncfilename,target[0])
if Verbose:
print("side effect :",auxfilename,logfilename,flsfilename,syncfilename)
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
env.Clean(target[0],syncfilename)
content = source[0].get_text_contents()
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
bibunit_re,
multibib_re,
addbibresource_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re,
newglossary_re,
biblatex_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux','aux_file'],
['.idx', '.ind', '.ilg','makeindex'],
['.bbl', '.blg','bibliography'],
['.bbl', '.blg','bibunit'],
['.bbl', '.blg','multibib'],
['.bbl', '.blg','.bcf','addbibresource'],
['.toc','contents'],
['.lof','figures'],
['.lot','tables'],
['.out','hyperref'],
['.nlo', '.nls', '.nlg','nomenclature'],
['.glo', '.gls', '.glg','glossary'],
['.glo', '.gls', '.glg','glossaries'],
['.acn', '.acr', '.alg','acronyms'],
['.nav', '.snm', '.out', '.toc','beamer'],
['newglossary',],
['.bcf', '.blg','biblatex'] ]
# for newglossary the suffixes are added as we find the command
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("search path ",paths)
# scan all sources for side effect files
aux_files = []
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
print("check side effects for ",suffix_list[-1])
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
if suffix_list[-1] == 'bibunit':
file_basename = os.path.join(targetdir, 'bu*.aux')
file_list = glob.glob(file_basename)
# remove the suffix '.aux'
for i in range(len(file_list)):
file_list.append(SCons.Util.splitext(file_list[i])[0])
# for multibib we need a list of files
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
print("multibib match ",multibibmatch.group(1))
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
print("multibib list ", baselist)
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
for file_name in file_list:
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
print("side effect tst :",file_name + suffix, " target is ",str(target[0]))
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
print("side effect aux :",aFile_base + '.aux')
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.isfile(flsfilename):
content = open(flsfilename, "r").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print("side effect fls :",out_files)
env.Clean(target[0],out_files)
return (target, source) | An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references. | Below is the the instruction that describes the task:
### Input:
An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
### Response:
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
syncfilename = targetbase + '.synctex.gz'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
env.SideEffect(syncfilename,target[0])
if Verbose:
print("side effect :",auxfilename,logfilename,flsfilename,syncfilename)
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
env.Clean(target[0],syncfilename)
content = source[0].get_text_contents()
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
bibunit_re,
multibib_re,
addbibresource_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re,
newglossary_re,
biblatex_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux','aux_file'],
['.idx', '.ind', '.ilg','makeindex'],
['.bbl', '.blg','bibliography'],
['.bbl', '.blg','bibunit'],
['.bbl', '.blg','multibib'],
['.bbl', '.blg','.bcf','addbibresource'],
['.toc','contents'],
['.lof','figures'],
['.lot','tables'],
['.out','hyperref'],
['.nlo', '.nls', '.nlg','nomenclature'],
['.glo', '.gls', '.glg','glossary'],
['.glo', '.gls', '.glg','glossaries'],
['.acn', '.acr', '.alg','acronyms'],
['.nav', '.snm', '.out', '.toc','beamer'],
['newglossary',],
['.bcf', '.blg','biblatex'] ]
# for newglossary the suffixes are added as we find the command
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print("search path ",paths)
# scan all sources for side effect files
aux_files = []
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
print("check side effects for ",suffix_list[-1])
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
if suffix_list[-1] == 'bibunit':
file_basename = os.path.join(targetdir, 'bu*.aux')
file_list = glob.glob(file_basename)
# remove the suffix '.aux'
for i in range(len(file_list)):
file_list.append(SCons.Util.splitext(file_list[i])[0])
# for multibib we need a list of files
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
print("multibib match ",multibibmatch.group(1))
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
print("multibib list ", baselist)
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
for file_name in file_list:
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
print("side effect tst :",file_name + suffix, " target is ",str(target[0]))
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
print("side effect aux :",aFile_base + '.aux')
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.isfile(flsfilename):
content = open(flsfilename, "r").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print("side effect fls :",out_files)
env.Clean(target[0],out_files)
return (target, source) |
def open_grindstone(self):
"""
Opens a grindstone file and populates the grindstone with it's
contents.
Returns an empty grindstone json object if a file does not exist.
"""
try:
with open(self.grindstone_path, 'r') as f:
# Try opening the file
return json.loads(f.read())
# If the file is empty
except json.decoder.JSONDecodeError:
# Default return empty object with empty tasks list
return {'tasks': []}
# The file does not yet exist
except FileNotFoundError:
# Default return empty object with empty tasks list
return {'tasks': []} | Opens a grindstone file and populates the grindstone with it's
contents.
Returns an empty grindstone json object if a file does not exist. | Below is the the instruction that describes the task:
### Input:
Opens a grindstone file and populates the grindstone with it's
contents.
Returns an empty grindstone json object if a file does not exist.
### Response:
def open_grindstone(self):
"""
Opens a grindstone file and populates the grindstone with it's
contents.
Returns an empty grindstone json object if a file does not exist.
"""
try:
with open(self.grindstone_path, 'r') as f:
# Try opening the file
return json.loads(f.read())
# If the file is empty
except json.decoder.JSONDecodeError:
# Default return empty object with empty tasks list
return {'tasks': []}
# The file does not yet exist
except FileNotFoundError:
# Default return empty object with empty tasks list
return {'tasks': []} |
def brightness(level=100, group=0):
""" Assumes level is out of 100 """
if level not in range(0,101):
raise Exception("Brightness must be value between 0 and 100")
b = int(floor(level / 4.0) + 2) #lights want values 2-27
return (COMMANDS['ON'][group], Command(0x4E, b)) | Assumes level is out of 100 | Below is the the instruction that describes the task:
### Input:
Assumes level is out of 100
### Response:
def brightness(level=100, group=0):
""" Assumes level is out of 100 """
if level not in range(0,101):
raise Exception("Brightness must be value between 0 and 100")
b = int(floor(level / 4.0) + 2) #lights want values 2-27
return (COMMANDS['ON'][group], Command(0x4E, b)) |
def process_tsuite(tsuite):
"""Goes through the tsuite and processes "*.log" """
# scoop of output from all run-logs
tsuite["log_content"] = runlogs_to_html(tsuite["res_root"])
tsuite["aux_list"] = aux_listing(tsuite["aux_root"])
tsuite["hnames"] = extract_hook_names(tsuite)
return True | Goes through the tsuite and processes "*.log" | Below is the the instruction that describes the task:
### Input:
Goes through the tsuite and processes "*.log"
### Response:
def process_tsuite(tsuite):
"""Goes through the tsuite and processes "*.log" """
# scoop of output from all run-logs
tsuite["log_content"] = runlogs_to_html(tsuite["res_root"])
tsuite["aux_list"] = aux_listing(tsuite["aux_root"])
tsuite["hnames"] = extract_hook_names(tsuite)
return True |
def fire(self, name, operation, args=None, **kwargs):
"""Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
d = dict(op=operation, args=args)
headers = {'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _fire(channel):
with Producer(channel) as producer:
producer.publish(d, routing_key=dest,
headers=headers, serializer=self._serializer,
exchange=self._exchange, declare=[self._exchange])
log.debug("sending message to %s", dest)
with connections[self._pool_conn].acquire(block=True) as conn:
_, channel = self.ensure(conn, _fire)
conn.maybe_close_channel(channel) | Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation | Below is the the instruction that describes the task:
### Input:
Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
### Response:
def fire(self, name, operation, args=None, **kwargs):
"""Send a message without waiting for a reply
@param name: name of destination service queue
@param operation: name of service operation to invoke
@param args: dictionary of keyword args to pass to operation.
Use this OR kwargs.
@param kwargs: additional args to pass to operation
"""
if args:
if kwargs:
raise TypeError("specify args dict or keyword arguments, not both")
else:
args = kwargs
d = dict(op=operation, args=args)
headers = {'sender': self.add_sysname(self.name)}
dest = self.add_sysname(name)
def _fire(channel):
with Producer(channel) as producer:
producer.publish(d, routing_key=dest,
headers=headers, serializer=self._serializer,
exchange=self._exchange, declare=[self._exchange])
log.debug("sending message to %s", dest)
with connections[self._pool_conn].acquire(block=True) as conn:
_, channel = self.ensure(conn, _fire)
conn.maybe_close_channel(channel) |
def output(self, resource):
"""Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
rv = resource(*args, **kwargs)
rv = self.responder(rv)
return rv
return wrapper | Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function | Below is the the instruction that describes the task:
### Input:
Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function
### Response:
def output(self, resource):
"""Wrap a resource (as a flask view function).
This is for cases where the resource does not directly return
a response object. Now everything should be a Response object.
:param resource: The resource as a flask view function
"""
@wraps(resource)
def wrapper(*args, **kwargs):
rv = resource(*args, **kwargs)
rv = self.responder(rv)
return rv
return wrapper |
def batch(data, batch_size, batch_size_fn=None):
"""Yield elements from data in chunks of batch_size."""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
if minibatch:
yield minibatch | Yield elements from data in chunks of batch_size. | Below is the the instruction that describes the task:
### Input:
Yield elements from data in chunks of batch_size.
### Response:
def batch(data, batch_size, batch_size_fn=None):
"""Yield elements from data in chunks of batch_size."""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
if minibatch:
yield minibatch |
def parse_coach_ec_df(infile):
"""Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
"""
ec_df = pd.read_table(infile, delim_whitespace=True,
names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',
'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',
'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df | Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions | Below is the the instruction that describes the task:
### Input:
Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
### Response:
def parse_coach_ec_df(infile):
"""Parse the EC.dat output file of COACH and return a dataframe of results
EC.dat contains the predicted EC number and active residues.
The columns are: PDB_ID, TM-score, RMSD, Sequence identity,
Coverage, Confidence score, EC number, and Active site residues
Args:
infile (str): Path to EC.dat
Returns:
DataFrame: Pandas DataFrame summarizing EC number predictions
"""
ec_df = pd.read_table(infile, delim_whitespace=True,
names=['pdb_template', 'tm_score', 'rmsd', 'seq_ident', 'seq_coverage',
'c_score', 'ec_number', 'binding_residues'])
ec_df['pdb_template_id'] = ec_df['pdb_template'].apply(lambda x: x[:4])
ec_df['pdb_template_chain'] = ec_df['pdb_template'].apply(lambda x: x[4])
ec_df = ec_df[['pdb_template_id', 'pdb_template_chain', 'tm_score', 'rmsd',
'seq_ident', 'seq_coverage', 'c_score', 'ec_number', 'binding_residues']]
ec_df['c_score'] = pd.to_numeric(ec_df.c_score, errors='coerce')
return ec_df |
def _tool_to_dict(tool):
"""Parse a tool definition into a cwl2wdl style dictionary.
"""
out = {"name": _id_to_name(tool.tool["id"]),
"baseCommand": " ".join(tool.tool["baseCommand"]),
"arguments": [],
"inputs": [_input_to_dict(i) for i in tool.tool["inputs"]],
"outputs": [_output_to_dict(o) for o in tool.tool["outputs"]],
"requirements": _requirements_to_dict(tool.requirements + tool.hints),
"stdin": None, "stdout": None}
return out | Parse a tool definition into a cwl2wdl style dictionary. | Below is the the instruction that describes the task:
### Input:
Parse a tool definition into a cwl2wdl style dictionary.
### Response:
def _tool_to_dict(tool):
"""Parse a tool definition into a cwl2wdl style dictionary.
"""
out = {"name": _id_to_name(tool.tool["id"]),
"baseCommand": " ".join(tool.tool["baseCommand"]),
"arguments": [],
"inputs": [_input_to_dict(i) for i in tool.tool["inputs"]],
"outputs": [_output_to_dict(o) for o in tool.tool["outputs"]],
"requirements": _requirements_to_dict(tool.requirements + tool.hints),
"stdin": None, "stdout": None}
return out |
def feature_info(self):
"""
Returns information about the features available for this CPC.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
:term:`iterable`:
An iterable where each item represents one feature that is
available for this CPC.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if the
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.name)
return feature_list | Returns information about the features available for this CPC.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
:term:`iterable`:
An iterable where each item represents one feature that is
available for this CPC.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if the
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
Returns information about the features available for this CPC.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
:term:`iterable`:
An iterable where each item represents one feature that is
available for this CPC.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if the
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def feature_info(self):
"""
Returns information about the features available for this CPC.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
:term:`iterable`:
An iterable where each item represents one feature that is
available for this CPC.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if the
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.name)
return feature_list |
def add_routes(fapp, routes, prefix=""):
"""Batch routes registering
Register routes to a blueprint/flask_app previously collected
with :func:`routes_collector`.
:param fapp: bluprint or flask_app to whom attach new routes.
:param routes: dict of routes collected by :func:`routes_collector`
:param prefix: url prefix under which register all routes
"""
for r in routes:
r['rule'] = prefix + r['rule']
fapp.add_url_rule(**r) | Batch routes registering
Register routes to a blueprint/flask_app previously collected
with :func:`routes_collector`.
:param fapp: bluprint or flask_app to whom attach new routes.
:param routes: dict of routes collected by :func:`routes_collector`
:param prefix: url prefix under which register all routes | Below is the the instruction that describes the task:
### Input:
Batch routes registering
Register routes to a blueprint/flask_app previously collected
with :func:`routes_collector`.
:param fapp: bluprint or flask_app to whom attach new routes.
:param routes: dict of routes collected by :func:`routes_collector`
:param prefix: url prefix under which register all routes
### Response:
def add_routes(fapp, routes, prefix=""):
"""Batch routes registering
Register routes to a blueprint/flask_app previously collected
with :func:`routes_collector`.
:param fapp: bluprint or flask_app to whom attach new routes.
:param routes: dict of routes collected by :func:`routes_collector`
:param prefix: url prefix under which register all routes
"""
for r in routes:
r['rule'] = prefix + r['rule']
fapp.add_url_rule(**r) |
def theme(name='readthedocs'):
"""set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir"""
os.environ['SPHINX_THEME'] = name
if os.environ['SPHINX_THEME'] == 'bootstrap':
local('cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html')
elif name is 'readthedocs':
return
else:
local('cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html') | set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir | Below is the the instruction that describes the task:
### Input:
set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir
### Response:
def theme(name='readthedocs'):
"""set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir"""
os.environ['SPHINX_THEME'] = name
if os.environ['SPHINX_THEME'] == 'bootstrap':
local('cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html')
elif name is 'readthedocs':
return
else:
local('cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html') |
def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | View the page. | Below is the the instruction that describes the task:
### Input:
View the page.
### Response:
def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) |
def QA_SU_save_future_min(engine, client=DATABASE):
"""save future_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min(client=client) | save future_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE}) | Below is the the instruction that describes the task:
### Input:
save future_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
### Response:
def QA_SU_save_future_min(engine, client=DATABASE):
"""save future_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_future_min(client=client) |
def _remove_fronts_that_are_too_small(fronts, size):
"""
Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length.
"""
ids = np.unique(fronts)
for id in ids:
if id == 0 or id == -1:
continue
front = _get_front_idxs_from_id(fronts, id)
if len(front) < size:
indexes = ([f for f, _ in front], [s for _, s in front])
fronts[indexes] = 0 | Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length. | Below is the the instruction that describes the task:
### Input:
Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length.
### Response:
def _remove_fronts_that_are_too_small(fronts, size):
"""
Removes all fronts from `fronts` which are strictly smaller than
`size` consecutive frequencies in length.
"""
ids = np.unique(fronts)
for id in ids:
if id == 0 or id == -1:
continue
front = _get_front_idxs_from_id(fronts, id)
if len(front) < size:
indexes = ([f for f, _ in front], [s for _, s in front])
fronts[indexes] = 0 |
def reset(self, params, repetition):
"""
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment
"""
print params["name"], ":", repetition
self.debug = params.get("debug", False)
L2Params = json.loads('{' + params["l2_params"] + '}')
L4Params = json.loads('{' + params["l4_params"] + '}')
L6aParams = json.loads('{' + params["l6a_params"] + '}')
# Make sure random seed is different for each repetition
seed = params.get("seed", 42)
np.random.seed(seed + repetition)
random.seed(seed + repetition)
L2Params["seed"] = seed + repetition
L4Params["seed"] = seed + repetition
L6aParams["seed"] = seed + repetition
# Configure L6a params
numModules = params["num_modules"]
L6aParams["scale"] = [params["scale"]] * numModules
angle = params["angle"] / numModules
orientation = range(angle / 2, angle * numModules, angle)
L6aParams["orientation"] = np.radians(orientation).tolist()
# Create multi-column L2-L4-L6a network
self.numColumns = params["num_cortical_columns"]
network = Network()
network = createMultipleL246aLocationColumn(network=network,
numberOfColumns=self.numColumns,
L2Params=L2Params,
L4Params=L4Params,
L6aParams=L6aParams)
network.initialize()
self.network = network
self.sensorInput = []
self.motorInput = []
self.L2Regions = []
self.L4Regions = []
self.L6aRegions = []
for i in xrange(self.numColumns):
col = str(i)
self.sensorInput.append(network.regions["sensorInput_" + col].getSelf())
self.motorInput.append(network.regions["motorInput_" + col].getSelf())
self.L2Regions.append(network.regions["L2_" + col])
self.L4Regions.append(network.regions["L4_" + col])
self.L6aRegions.append(network.regions["L6a_" + col])
# Use the number of iterations as the number of objects. This will allow us
# to execute one iteration per object and use the "iteration" parameter as
# the object index
numObjects = params["iterations"]
# Generate feature SDRs
numFeatures = params["num_features"]
numOfMinicolumns = L4Params["columnCount"]
numOfActiveMinicolumns = params["num_active_minicolumns"]
self.featureSDR = [{
str(f): sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns))
for f in xrange(numFeatures)
} for _ in xrange(self.numColumns)]
# Generate objects used in the experiment
self.objects = generateObjects(numObjects=numObjects,
featuresPerObject=params["features_per_object"],
objectWidth=params["object_width"],
numFeatures=numFeatures,
distribution=params["feature_distribution"])
# Make sure the objects are unique
uniqueObjs = np.unique([{"features": obj["features"]}
for obj in self.objects])
assert len(uniqueObjs) == len(self.objects)
self.sdrSize = L2Params["sdrSize"]
# Learn objects
self.numLearningPoints = params["num_learning_points"]
self.numOfSensations = params["num_sensations"]
self.learnedObjects = {}
self.learn() | Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment | Below is the the instruction that describes the task:
### Input:
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment
### Response:
def reset(self, params, repetition):
"""
Take the steps necessary to reset the experiment before each repetition:
- Make sure random seed is different for each repetition
- Create the L2-L4-L6a network
- Generate objects used by the experiment
- Learn all objects used by the experiment
"""
print params["name"], ":", repetition
self.debug = params.get("debug", False)
L2Params = json.loads('{' + params["l2_params"] + '}')
L4Params = json.loads('{' + params["l4_params"] + '}')
L6aParams = json.loads('{' + params["l6a_params"] + '}')
# Make sure random seed is different for each repetition
seed = params.get("seed", 42)
np.random.seed(seed + repetition)
random.seed(seed + repetition)
L2Params["seed"] = seed + repetition
L4Params["seed"] = seed + repetition
L6aParams["seed"] = seed + repetition
# Configure L6a params
numModules = params["num_modules"]
L6aParams["scale"] = [params["scale"]] * numModules
angle = params["angle"] / numModules
orientation = range(angle / 2, angle * numModules, angle)
L6aParams["orientation"] = np.radians(orientation).tolist()
# Create multi-column L2-L4-L6a network
self.numColumns = params["num_cortical_columns"]
network = Network()
network = createMultipleL246aLocationColumn(network=network,
numberOfColumns=self.numColumns,
L2Params=L2Params,
L4Params=L4Params,
L6aParams=L6aParams)
network.initialize()
self.network = network
self.sensorInput = []
self.motorInput = []
self.L2Regions = []
self.L4Regions = []
self.L6aRegions = []
for i in xrange(self.numColumns):
col = str(i)
self.sensorInput.append(network.regions["sensorInput_" + col].getSelf())
self.motorInput.append(network.regions["motorInput_" + col].getSelf())
self.L2Regions.append(network.regions["L2_" + col])
self.L4Regions.append(network.regions["L4_" + col])
self.L6aRegions.append(network.regions["L6a_" + col])
# Use the number of iterations as the number of objects. This will allow us
# to execute one iteration per object and use the "iteration" parameter as
# the object index
numObjects = params["iterations"]
# Generate feature SDRs
numFeatures = params["num_features"]
numOfMinicolumns = L4Params["columnCount"]
numOfActiveMinicolumns = params["num_active_minicolumns"]
self.featureSDR = [{
str(f): sorted(np.random.choice(numOfMinicolumns, numOfActiveMinicolumns))
for f in xrange(numFeatures)
} for _ in xrange(self.numColumns)]
# Generate objects used in the experiment
self.objects = generateObjects(numObjects=numObjects,
featuresPerObject=params["features_per_object"],
objectWidth=params["object_width"],
numFeatures=numFeatures,
distribution=params["feature_distribution"])
# Make sure the objects are unique
uniqueObjs = np.unique([{"features": obj["features"]}
for obj in self.objects])
assert len(uniqueObjs) == len(self.objects)
self.sdrSize = L2Params["sdrSize"]
# Learn objects
self.numLearningPoints = params["num_learning_points"]
self.numOfSensations = params["num_sensations"]
self.learnedObjects = {}
self.learn() |
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding) | Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_). | Below is the the instruction that describes the task:
### Input:
Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
### Response:
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding) |
def delete_namespace(self, namespace):
"""
Delete the specified CIM namespace in the WBEM server and
update this WBEMServer object to reflect the removed namespace
there.
The specified namespace must be empty (i.e. must not contain any
classes, instances, or qualifier types.
This method attempts the following approaches for deleting the
namespace, in order, until an approach succeeds:
1. Issuing the `DeleteInstance` operation using the CIM class
representing namespaces ('PG_Namespace' for OpenPegasus,
and 'CIM_Namespace' otherwise), against the Interop namespace.
This approach is typically supported in WBEM servers that
support the creation of CIM namespaces. This approach is
similar to the approach described in :term:`DSP0200`.
The approach described in the WBEM Server profile (:term:`DSP1092`) via
deleting the `CIM_WBEMServerNamespace` instance is not implemented
because that would also delete any classes, instances, and
qualifier types in the namespace.
Deleting namespaces using the `__Namespace` pseudo-class has been
deprecated already in DSP0200 1.1.0 (released in 01/2003), and pywbem
does not implement that approach.
Parameters:
namespace (:term:`string`): CIM namespace name. Must not be `None`.
The namespace may contain leading and a trailing slash, both of
which will be ignored.
Returns:
:term:`unicode string`: The specified CIM namespace name in its
standard format (i.e. without leading or trailing slash characters).
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Specified namespace does not exist.
CIMError: CIM_ERR_NAMESPACE_NOT_EMPTY, Specified namespace is not
empty.
Additional CIM errors.
"""
std_namespace = _ensure_unicode(namespace.strip('/'))
# Use approach 1: DeleteInstance of CIM class for namespaces
# Refresh the list of namespaces in this object to make sure
# it is up to date.
self._determine_namespaces()
if std_namespace not in self.namespaces:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Specified namespace does not exist: {0!A}",
std_namespace),
conn_id=self.conn.conn_id)
ns_path = None
for p in self.namespace_paths:
if p.keybindings['Name'] == std_namespace:
ns_path = p
assert ns_path is not None
# Ensure the namespace is empty. We do not check for instances, because
# classes are a prerequisite for instances, so if no classes exist,
# no instances will exist.
# WBEM servers that do not support class operations (e.g. SFCB) will
# raise a CIMError with status CIM_ERR_NOT_SUPPORTED.
class_paths = self.conn.EnumerateClassNames(
namespace=std_namespace, ClassName=None, DeepInheritance=False)
quals = self.conn.EnumerateQualifiers(namespace=std_namespace)
if class_paths or quals:
raise CIMError(
CIM_ERR_NAMESPACE_NOT_EMPTY,
_format("Specified namespace {0!A} is not empty; it contains "
"{1} top-level classes and {2} qualifier types",
std_namespace, len(class_paths), len(quals)),
conn_id=self.conn.conn_id)
self.conn.DeleteInstance(ns_path)
# Refresh the list of namespaces in this object to remove the one
# we just deleted.
self._determine_namespaces()
return std_namespace | Delete the specified CIM namespace in the WBEM server and
update this WBEMServer object to reflect the removed namespace
there.
The specified namespace must be empty (i.e. must not contain any
classes, instances, or qualifier types.
This method attempts the following approaches for deleting the
namespace, in order, until an approach succeeds:
1. Issuing the `DeleteInstance` operation using the CIM class
representing namespaces ('PG_Namespace' for OpenPegasus,
and 'CIM_Namespace' otherwise), against the Interop namespace.
This approach is typically supported in WBEM servers that
support the creation of CIM namespaces. This approach is
similar to the approach described in :term:`DSP0200`.
The approach described in the WBEM Server profile (:term:`DSP1092`) via
deleting the `CIM_WBEMServerNamespace` instance is not implemented
because that would also delete any classes, instances, and
qualifier types in the namespace.
Deleting namespaces using the `__Namespace` pseudo-class has been
deprecated already in DSP0200 1.1.0 (released in 01/2003), and pywbem
does not implement that approach.
Parameters:
namespace (:term:`string`): CIM namespace name. Must not be `None`.
The namespace may contain leading and a trailing slash, both of
which will be ignored.
Returns:
:term:`unicode string`: The specified CIM namespace name in its
standard format (i.e. without leading or trailing slash characters).
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Specified namespace does not exist.
CIMError: CIM_ERR_NAMESPACE_NOT_EMPTY, Specified namespace is not
empty.
Additional CIM errors. | Below is the the instruction that describes the task:
### Input:
Delete the specified CIM namespace in the WBEM server and
update this WBEMServer object to reflect the removed namespace
there.
The specified namespace must be empty (i.e. must not contain any
classes, instances, or qualifier types.
This method attempts the following approaches for deleting the
namespace, in order, until an approach succeeds:
1. Issuing the `DeleteInstance` operation using the CIM class
representing namespaces ('PG_Namespace' for OpenPegasus,
and 'CIM_Namespace' otherwise), against the Interop namespace.
This approach is typically supported in WBEM servers that
support the creation of CIM namespaces. This approach is
similar to the approach described in :term:`DSP0200`.
The approach described in the WBEM Server profile (:term:`DSP1092`) via
deleting the `CIM_WBEMServerNamespace` instance is not implemented
because that would also delete any classes, instances, and
qualifier types in the namespace.
Deleting namespaces using the `__Namespace` pseudo-class has been
deprecated already in DSP0200 1.1.0 (released in 01/2003), and pywbem
does not implement that approach.
Parameters:
namespace (:term:`string`): CIM namespace name. Must not be `None`.
The namespace may contain leading and a trailing slash, both of
which will be ignored.
Returns:
:term:`unicode string`: The specified CIM namespace name in its
standard format (i.e. without leading or trailing slash characters).
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Specified namespace does not exist.
CIMError: CIM_ERR_NAMESPACE_NOT_EMPTY, Specified namespace is not
empty.
Additional CIM errors.
### Response:
def delete_namespace(self, namespace):
"""
Delete the specified CIM namespace in the WBEM server and
update this WBEMServer object to reflect the removed namespace
there.
The specified namespace must be empty (i.e. must not contain any
classes, instances, or qualifier types.
This method attempts the following approaches for deleting the
namespace, in order, until an approach succeeds:
1. Issuing the `DeleteInstance` operation using the CIM class
representing namespaces ('PG_Namespace' for OpenPegasus,
and 'CIM_Namespace' otherwise), against the Interop namespace.
This approach is typically supported in WBEM servers that
support the creation of CIM namespaces. This approach is
similar to the approach described in :term:`DSP0200`.
The approach described in the WBEM Server profile (:term:`DSP1092`) via
deleting the `CIM_WBEMServerNamespace` instance is not implemented
because that would also delete any classes, instances, and
qualifier types in the namespace.
Deleting namespaces using the `__Namespace` pseudo-class has been
deprecated already in DSP0200 1.1.0 (released in 01/2003), and pywbem
does not implement that approach.
Parameters:
namespace (:term:`string`): CIM namespace name. Must not be `None`.
The namespace may contain leading and a trailing slash, both of
which will be ignored.
Returns:
:term:`unicode string`: The specified CIM namespace name in its
standard format (i.e. without leading or trailing slash characters).
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
ModelError: An issue with the model implemented by the WBEM server.
CIMError: CIM_ERR_NOT_FOUND, Specified namespace does not exist.
CIMError: CIM_ERR_NAMESPACE_NOT_EMPTY, Specified namespace is not
empty.
Additional CIM errors.
"""
std_namespace = _ensure_unicode(namespace.strip('/'))
# Use approach 1: DeleteInstance of CIM class for namespaces
# Refresh the list of namespaces in this object to make sure
# it is up to date.
self._determine_namespaces()
if std_namespace not in self.namespaces:
raise CIMError(
CIM_ERR_NOT_FOUND,
_format("Specified namespace does not exist: {0!A}",
std_namespace),
conn_id=self.conn.conn_id)
ns_path = None
for p in self.namespace_paths:
if p.keybindings['Name'] == std_namespace:
ns_path = p
assert ns_path is not None
# Ensure the namespace is empty. We do not check for instances, because
# classes are a prerequisite for instances, so if no classes exist,
# no instances will exist.
# WBEM servers that do not support class operations (e.g. SFCB) will
# raise a CIMError with status CIM_ERR_NOT_SUPPORTED.
class_paths = self.conn.EnumerateClassNames(
namespace=std_namespace, ClassName=None, DeepInheritance=False)
quals = self.conn.EnumerateQualifiers(namespace=std_namespace)
if class_paths or quals:
raise CIMError(
CIM_ERR_NAMESPACE_NOT_EMPTY,
_format("Specified namespace {0!A} is not empty; it contains "
"{1} top-level classes and {2} qualifier types",
std_namespace, len(class_paths), len(quals)),
conn_id=self.conn.conn_id)
self.conn.DeleteInstance(ns_path)
# Refresh the list of namespaces in this object to remove the one
# we just deleted.
self._determine_namespaces()
return std_namespace |
def v(*args, **kwargs):
'''
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans
'''
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance() | print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans | Below is the the instruction that describes the task:
### Input:
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans
### Response:
def v(*args, **kwargs):
'''
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans
'''
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance() |
def sendEmail(self, subject, body, toAddress=False):
"""
sends an email using the agrcpythonemailer@gmail.com account
"""
if not toAddress:
toAddress = self.toAddress
toAddress = toAddress.split(';')
message = MIMEText(body)
message['Subject'] = subject
message['From'] = self.fromAddress
message['To'] = ','.join(toAddress)
if not self.testing:
s = SMTP(self.server, self.port)
s.sendmail(self.fromAddress, toAddress, message.as_string())
s.quit()
print('email sent')
else:
print('***Begin Test Email Message***')
print(message)
print('***End Test Email Message***') | sends an email using the agrcpythonemailer@gmail.com account | Below is the the instruction that describes the task:
### Input:
sends an email using the agrcpythonemailer@gmail.com account
### Response:
def sendEmail(self, subject, body, toAddress=False):
"""
sends an email using the agrcpythonemailer@gmail.com account
"""
if not toAddress:
toAddress = self.toAddress
toAddress = toAddress.split(';')
message = MIMEText(body)
message['Subject'] = subject
message['From'] = self.fromAddress
message['To'] = ','.join(toAddress)
if not self.testing:
s = SMTP(self.server, self.port)
s.sendmail(self.fromAddress, toAddress, message.as_string())
s.quit()
print('email sent')
else:
print('***Begin Test Email Message***')
print(message)
print('***End Test Email Message***') |
def validate_adapter_class(validate_class, adapter_class):
"""
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
"""
from chatterbot.adapters import Adapter
# If a dictionary was passed in, check if it has an import_path attribute
if isinstance(validate_class, dict):
if 'import_path' not in validate_class:
raise Adapter.InvalidAdapterTypeException(
'The dictionary {} must contain a value for "import_path"'.format(
str(validate_class)
)
)
# Set the class to the import path for the next check
validate_class = validate_class.get('import_path')
if not issubclass(import_module(validate_class), adapter_class):
raise Adapter.InvalidAdapterTypeException(
'{} must be a subclass of {}'.format(
validate_class,
adapter_class.__name__
)
) | Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException | Below is the the instruction that describes the task:
### Input:
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
### Response:
def validate_adapter_class(validate_class, adapter_class):
"""
Raises an exception if validate_class is not a
subclass of adapter_class.
:param validate_class: The class to be validated.
:type validate_class: class
:param adapter_class: The class type to check against.
:type adapter_class: class
:raises: Adapter.InvalidAdapterTypeException
"""
from chatterbot.adapters import Adapter
# If a dictionary was passed in, check if it has an import_path attribute
if isinstance(validate_class, dict):
if 'import_path' not in validate_class:
raise Adapter.InvalidAdapterTypeException(
'The dictionary {} must contain a value for "import_path"'.format(
str(validate_class)
)
)
# Set the class to the import path for the next check
validate_class = validate_class.get('import_path')
if not issubclass(import_module(validate_class), adapter_class):
raise Adapter.InvalidAdapterTypeException(
'{} must be a subclass of {}'.format(
validate_class,
adapter_class.__name__
)
) |
def export(self, timestamp=None):
"""
Get the current notebook data and export.
"""
if self._timestamp is None:
raise Exception("No timestamp set. Has the archive been initialized?")
if self.skip_notebook_export:
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
return
self.export_success = None
name = self.get_namespace()
# Unfortunate javascript hacks to get at notebook data
capture_cmd = ((r"var capture = '%s._notebook_data=r\"\"\"'" % name)
+ r"+json_string+'\"\"\"'; ")
cmd = (r'var kernel = IPython.notebook.kernel; '
+ r'var json_data = IPython.notebook.toJSON(); '
+ r'var json_string = JSON.stringify(json_data); '
+ capture_cmd
+ "var pycmd = capture + ';%s._export_with_html()'; " % name
+ r"kernel.execute(pycmd)")
tstamp = time.strftime(self.timestamp_format, self._timestamp)
export_name = self._format(self.export_name, {'timestamp':tstamp, 'notebook':self.notebook_name})
print(('Export name: %r\nDirectory %r' % (export_name,
os.path.join(os.path.abspath(self.root))))
+ '\n\nIf no output appears, please check holoviews.archive.last_export_status()')
display(Javascript(cmd)) | Get the current notebook data and export. | Below is the the instruction that describes the task:
### Input:
Get the current notebook data and export.
### Response:
def export(self, timestamp=None):
"""
Get the current notebook data and export.
"""
if self._timestamp is None:
raise Exception("No timestamp set. Has the archive been initialized?")
if self.skip_notebook_export:
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
return
self.export_success = None
name = self.get_namespace()
# Unfortunate javascript hacks to get at notebook data
capture_cmd = ((r"var capture = '%s._notebook_data=r\"\"\"'" % name)
+ r"+json_string+'\"\"\"'; ")
cmd = (r'var kernel = IPython.notebook.kernel; '
+ r'var json_data = IPython.notebook.toJSON(); '
+ r'var json_string = JSON.stringify(json_data); '
+ capture_cmd
+ "var pycmd = capture + ';%s._export_with_html()'; " % name
+ r"kernel.execute(pycmd)")
tstamp = time.strftime(self.timestamp_format, self._timestamp)
export_name = self._format(self.export_name, {'timestamp':tstamp, 'notebook':self.notebook_name})
print(('Export name: %r\nDirectory %r' % (export_name,
os.path.join(os.path.abspath(self.root))))
+ '\n\nIf no output appears, please check holoviews.archive.last_export_status()')
display(Javascript(cmd)) |
def parse_annotations(
self, annotation_file, genes, db_sel='UniProtKB',
select_evidence=None, exclude_evidence=None,
exclude_ref=None, strip_species=False, ignore_case=False):
"""Parse a GO annotation file (in GAF 2.0 format).
GO annotation files can be downloaded from the
`UniProt-GOA download site`__ or from their `FTP server`__.
__ goa_download_
__ goa_ftp_
.. _goa_download: http://www.ebi.ac.uk/GOA/downloads
.. _goa_ftp: ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/
Parameters
----------
annotation_file: str
Path of the annotation file (in GAF 2.0 format).
genes: List (tuple, set) of str
List of valid gene names.
db_sel: str, optional
Select only annotations with this ``DB`` (column 1) value.
If empty, disable filtering based on the ``DB`` value.
select_evidence: list of str, optional
Only include annotations with the given evidence codes.
It not specified, allow all evidence codes, except for those listed
in ``exclude_evidence``.
exclude_evidence: list of str, optional
Exclude all annotations with any of the given evidence codes.
If ``select_evidence`` is specified, this parameter is ignored.
If not specified, allow all evidence codes.
exclude_ref: list of str, optional
Exclude all annotations with the given DB:reference (column 6).
Example: ``["PMID:2676709"]``. Note: This filter is currently
ignored if an annotation has more than one reference.
strip_species: bool, optional
Undocumented.
ignore_case: bool, optional
Undocumented.
Returns
-------
None
"""
assert isinstance(annotation_file, str)
assert isinstance(genes, (list, tuple))
if not self.terms:
raise ValueError('You need to first parse an OBO file!')
if select_evidence is None:
select_evidence = []
if exclude_evidence is None:
exclude_evidence = []
if exclude_ref is None:
exclude_ref = []
# always overwrite all previously parsed annotations
self.clear_annotation_data()
# store genes
self.genes = set(genes) # store the list of genes for later use
genes_upper = dict((g.upper(), g) for g in genes)
logger.info('Read %d genes.', len(genes))
# read annotations
self.term_annotations = dict((id_, []) for id_ in self.terms)
self.gene_annotations = dict((g, []) for g in self.genes)
# gene_terms is used for statistics
gene_terms = dict((g, set()) for g in self.genes)
# isoform_pattern = re.compile(r"UniProtKB:([A-Z][0-9A-Z]{5}-\d+)")
# gene_pattern = re.compile(r"[a-zA-Z0-9]+\.\d+$")
# pmid_pattern = re.compile(r"(?:PMID:\d+|DOI:[^\s]+)")
# uniprot_pattern = re.compile(r"UniProtKB:([A-Z][0-9A-Z]{5}(?:-\d+)?)")
unknown_gene_names = Counter()
unknown_gene_annotations = 0
unknown_term_ids = Counter()
unknown_term_annotations = 0
# Parsing!
logger.info('Parsing annotations...')
n = 0
excluded_evidence_annotations = 0
excluded_reference_annotations = 0
valid_annotations = 0
with misc.smart_open_read(annotation_file, mode='rb',
try_gzip=True) as fh:
reader = csv.reader(fh, dialect='excel-tab', encoding='UTF-8')
for i, l in enumerate(reader):
# gene = None
if not l:
continue
if ((not db_sel) or l[0] == db_sel) and l[3] != 'NOT':
n += 1
# test if evidence code is excluded
if (select_evidence and l[6] not in select_evidence) \
or l[6] in exclude_evidence:
excluded_evidence_annotations += 1
continue
# test if reference is excluded
db_ref = []
if l[5]:
db_ref = l[5].split('|')
if len(db_ref) == 1 and db_ref[0] in exclude_ref:
excluded_reference_annotations += 1
continue
# determine target gene
if not l[2]:
raise Exception('Missing target gene in line %d:\n%s'
% (i+1, '\t'.join(l)))
gene = l[2]
# db = l[0]
db_id = l[1]
if strip_species:
try:
gene = gene[:gene.rindex('_')]
except ValueError:
pass
term_id = l[4]
evidence = l[6]
invalid = False
if (ignore_case and gene.upper() not in genes_upper) \
or ((not ignore_case) and gene not in self.genes):
unknown_gene_annotations += 1
unknown_gene_names[l[2]] += 1
invalid = True
if term_id not in self.terms:
unknown_term_annotations += 1
unknown_term_ids[term_id] += 1
invalid = True
if not invalid:
valid_annotations += 1
# if ignore_case, convert gene to "original" name
if ignore_case:
gene = genes_upper[gene.upper()]
term = self.terms[term_id]
# parse secondary information
# (associated UniProt and PubMed entries)
# pmid = pmid_pattern.search(l[5])
# if pmid is not None: pmid = pmid.group(0)
# uniprot = uniprot_pattern.search(l[7])
# if uniprot is not None: uniprot = uniprot.group(1)
with_ = []
if l[7]:
with_ = l[7].split('|')
# generate annotation
ann = GOAnnotation(
gene=gene, term=term,
evidence=evidence, db_id=db_id,
db_ref=db_ref, with_=with_)
# add annotation to global list
self.annotations.append(ann)
# add annotation under term ID
self.term_annotations[term_id].append(ann)
# add annotation under gene
self.gene_annotations[gene].append(ann)
gene_terms[gene].add(term_id)
# output some statistics
if n > 0:
logger.info('Parsed %d positive GO annotations '
'(%d = %.1f%% excluded based on evidence type).',
n, excluded_evidence_annotations,
100*(excluded_evidence_annotations/float(n)))
if unknown_gene_annotations > 0:
logger.warning('Warning: %d annotations with %d unkonwn gene '
'names.',
unknown_gene_annotations, len(unknown_gene_names))
if unknown_term_annotations > 0:
logger.warning('Warning: %d annotations with %d unkonwn term IDs.',
unknown_term_annotations, len(unknown_term_ids))
logger.info('Found a total of %d valid annotations.',
valid_annotations)
logger.info('%d unique Gene-Term associations.',
sum(len(gene_terms[g]) for g in genes)) | Parse a GO annotation file (in GAF 2.0 format).
GO annotation files can be downloaded from the
`UniProt-GOA download site`__ or from their `FTP server`__.
__ goa_download_
__ goa_ftp_
.. _goa_download: http://www.ebi.ac.uk/GOA/downloads
.. _goa_ftp: ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/
Parameters
----------
annotation_file: str
Path of the annotation file (in GAF 2.0 format).
genes: List (tuple, set) of str
List of valid gene names.
db_sel: str, optional
Select only annotations with this ``DB`` (column 1) value.
If empty, disable filtering based on the ``DB`` value.
select_evidence: list of str, optional
Only include annotations with the given evidence codes.
It not specified, allow all evidence codes, except for those listed
in ``exclude_evidence``.
exclude_evidence: list of str, optional
Exclude all annotations with any of the given evidence codes.
If ``select_evidence`` is specified, this parameter is ignored.
If not specified, allow all evidence codes.
exclude_ref: list of str, optional
Exclude all annotations with the given DB:reference (column 6).
Example: ``["PMID:2676709"]``. Note: This filter is currently
ignored if an annotation has more than one reference.
strip_species: bool, optional
Undocumented.
ignore_case: bool, optional
Undocumented.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Parse a GO annotation file (in GAF 2.0 format).
GO annotation files can be downloaded from the
`UniProt-GOA download site`__ or from their `FTP server`__.
__ goa_download_
__ goa_ftp_
.. _goa_download: http://www.ebi.ac.uk/GOA/downloads
.. _goa_ftp: ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/
Parameters
----------
annotation_file: str
Path of the annotation file (in GAF 2.0 format).
genes: List (tuple, set) of str
List of valid gene names.
db_sel: str, optional
Select only annotations with this ``DB`` (column 1) value.
If empty, disable filtering based on the ``DB`` value.
select_evidence: list of str, optional
Only include annotations with the given evidence codes.
It not specified, allow all evidence codes, except for those listed
in ``exclude_evidence``.
exclude_evidence: list of str, optional
Exclude all annotations with any of the given evidence codes.
If ``select_evidence`` is specified, this parameter is ignored.
If not specified, allow all evidence codes.
exclude_ref: list of str, optional
Exclude all annotations with the given DB:reference (column 6).
Example: ``["PMID:2676709"]``. Note: This filter is currently
ignored if an annotation has more than one reference.
strip_species: bool, optional
Undocumented.
ignore_case: bool, optional
Undocumented.
Returns
-------
None
### Response:
def parse_annotations(
self, annotation_file, genes, db_sel='UniProtKB',
select_evidence=None, exclude_evidence=None,
exclude_ref=None, strip_species=False, ignore_case=False):
"""Parse a GO annotation file (in GAF 2.0 format).
GO annotation files can be downloaded from the
`UniProt-GOA download site`__ or from their `FTP server`__.
__ goa_download_
__ goa_ftp_
.. _goa_download: http://www.ebi.ac.uk/GOA/downloads
.. _goa_ftp: ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/
Parameters
----------
annotation_file: str
Path of the annotation file (in GAF 2.0 format).
genes: List (tuple, set) of str
List of valid gene names.
db_sel: str, optional
Select only annotations with this ``DB`` (column 1) value.
If empty, disable filtering based on the ``DB`` value.
select_evidence: list of str, optional
Only include annotations with the given evidence codes.
It not specified, allow all evidence codes, except for those listed
in ``exclude_evidence``.
exclude_evidence: list of str, optional
Exclude all annotations with any of the given evidence codes.
If ``select_evidence`` is specified, this parameter is ignored.
If not specified, allow all evidence codes.
exclude_ref: list of str, optional
Exclude all annotations with the given DB:reference (column 6).
Example: ``["PMID:2676709"]``. Note: This filter is currently
ignored if an annotation has more than one reference.
strip_species: bool, optional
Undocumented.
ignore_case: bool, optional
Undocumented.
Returns
-------
None
"""
assert isinstance(annotation_file, str)
assert isinstance(genes, (list, tuple))
if not self.terms:
raise ValueError('You need to first parse an OBO file!')
if select_evidence is None:
select_evidence = []
if exclude_evidence is None:
exclude_evidence = []
if exclude_ref is None:
exclude_ref = []
# always overwrite all previously parsed annotations
self.clear_annotation_data()
# store genes
self.genes = set(genes) # store the list of genes for later use
genes_upper = dict((g.upper(), g) for g in genes)
logger.info('Read %d genes.', len(genes))
# read annotations
self.term_annotations = dict((id_, []) for id_ in self.terms)
self.gene_annotations = dict((g, []) for g in self.genes)
# gene_terms is used for statistics
gene_terms = dict((g, set()) for g in self.genes)
# isoform_pattern = re.compile(r"UniProtKB:([A-Z][0-9A-Z]{5}-\d+)")
# gene_pattern = re.compile(r"[a-zA-Z0-9]+\.\d+$")
# pmid_pattern = re.compile(r"(?:PMID:\d+|DOI:[^\s]+)")
# uniprot_pattern = re.compile(r"UniProtKB:([A-Z][0-9A-Z]{5}(?:-\d+)?)")
unknown_gene_names = Counter()
unknown_gene_annotations = 0
unknown_term_ids = Counter()
unknown_term_annotations = 0
# Parsing!
logger.info('Parsing annotations...')
n = 0
excluded_evidence_annotations = 0
excluded_reference_annotations = 0
valid_annotations = 0
with misc.smart_open_read(annotation_file, mode='rb',
try_gzip=True) as fh:
reader = csv.reader(fh, dialect='excel-tab', encoding='UTF-8')
for i, l in enumerate(reader):
# gene = None
if not l:
continue
if ((not db_sel) or l[0] == db_sel) and l[3] != 'NOT':
n += 1
# test if evidence code is excluded
if (select_evidence and l[6] not in select_evidence) \
or l[6] in exclude_evidence:
excluded_evidence_annotations += 1
continue
# test if reference is excluded
db_ref = []
if l[5]:
db_ref = l[5].split('|')
if len(db_ref) == 1 and db_ref[0] in exclude_ref:
excluded_reference_annotations += 1
continue
# determine target gene
if not l[2]:
raise Exception('Missing target gene in line %d:\n%s'
% (i+1, '\t'.join(l)))
gene = l[2]
# db = l[0]
db_id = l[1]
if strip_species:
try:
gene = gene[:gene.rindex('_')]
except ValueError:
pass
term_id = l[4]
evidence = l[6]
invalid = False
if (ignore_case and gene.upper() not in genes_upper) \
or ((not ignore_case) and gene not in self.genes):
unknown_gene_annotations += 1
unknown_gene_names[l[2]] += 1
invalid = True
if term_id not in self.terms:
unknown_term_annotations += 1
unknown_term_ids[term_id] += 1
invalid = True
if not invalid:
valid_annotations += 1
# if ignore_case, convert gene to "original" name
if ignore_case:
gene = genes_upper[gene.upper()]
term = self.terms[term_id]
# parse secondary information
# (associated UniProt and PubMed entries)
# pmid = pmid_pattern.search(l[5])
# if pmid is not None: pmid = pmid.group(0)
# uniprot = uniprot_pattern.search(l[7])
# if uniprot is not None: uniprot = uniprot.group(1)
with_ = []
if l[7]:
with_ = l[7].split('|')
# generate annotation
ann = GOAnnotation(
gene=gene, term=term,
evidence=evidence, db_id=db_id,
db_ref=db_ref, with_=with_)
# add annotation to global list
self.annotations.append(ann)
# add annotation under term ID
self.term_annotations[term_id].append(ann)
# add annotation under gene
self.gene_annotations[gene].append(ann)
gene_terms[gene].add(term_id)
# output some statistics
if n > 0:
logger.info('Parsed %d positive GO annotations '
'(%d = %.1f%% excluded based on evidence type).',
n, excluded_evidence_annotations,
100*(excluded_evidence_annotations/float(n)))
if unknown_gene_annotations > 0:
logger.warning('Warning: %d annotations with %d unkonwn gene '
'names.',
unknown_gene_annotations, len(unknown_gene_names))
if unknown_term_annotations > 0:
logger.warning('Warning: %d annotations with %d unkonwn term IDs.',
unknown_term_annotations, len(unknown_term_ids))
logger.info('Found a total of %d valid annotations.',
valid_annotations)
logger.info('%d unique Gene-Term associations.',
sum(len(gene_terms[g]) for g in genes)) |
def create_asset_class(self, item: AssetClass):
""" Inserts the record """
session = self.open_session()
session.add(item)
session.commit() | Inserts the record | Below is the the instruction that describes the task:
### Input:
Inserts the record
### Response:
def create_asset_class(self, item: AssetClass):
""" Inserts the record """
session = self.open_session()
session.add(item)
session.commit() |
def search(self, search_string):
'''Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based'''
seq = self.seq.upper()
search_string = search_string.upper()
pos = 0
found = seq.find(search_string, pos)
hits = []
while found != -1:
hits.append((found, '+'))
pos = found + 1
found = seq.find(search_string, pos)
pos = 0
search_string = Fasta('x', search_string)
search_string.revcomp()
search_string = search_string.seq
found = seq.find(search_string, pos)
while found != -1:
hits.append((found, '-'))
pos = found + 1
found = seq.find(search_string, pos)
return hits | Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based | Below is the the instruction that describes the task:
### Input:
Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based
### Response:
def search(self, search_string):
'''Finds every occurrence (including overlapping ones) of the search_string, including on the reverse strand. Returns a list where each element is a tuple (position, strand) where strand is in ['-', '+']. Positions are zero-based'''
seq = self.seq.upper()
search_string = search_string.upper()
pos = 0
found = seq.find(search_string, pos)
hits = []
while found != -1:
hits.append((found, '+'))
pos = found + 1
found = seq.find(search_string, pos)
pos = 0
search_string = Fasta('x', search_string)
search_string.revcomp()
search_string = search_string.seq
found = seq.find(search_string, pos)
while found != -1:
hits.append((found, '-'))
pos = found + 1
found = seq.find(search_string, pos)
return hits |
def _normalize_window(window, nfft, library, dtype):
"""Normalise a window specification for a PSD calculation
Parameters
----------
window : `str`, `numpy.ndarray`, `None`
the input window specification
nfft : `int`
the length of the Fourier transform, in samples
library : `str`
the name of the library that provides the PSD routine
dtype : `type`
the required type of the window array, only used if
`library='lal'` is given
Returns
-------
window : `numpy.ndarray`, `lal.REAL8Window`
a numpy-, or `LAL`-format window array
"""
if library == '_lal' and isinstance(window, numpy.ndarray):
from ._lal import window_from_array
return window_from_array(window)
if library == '_lal':
from ._lal import generate_window
return generate_window(nfft, window=window, dtype=dtype)
if isinstance(window, string_types):
window = canonical_name(window)
if isinstance(window, string_types + (tuple,)):
return get_window(window, nfft)
return None | Normalise a window specification for a PSD calculation
Parameters
----------
window : `str`, `numpy.ndarray`, `None`
the input window specification
nfft : `int`
the length of the Fourier transform, in samples
library : `str`
the name of the library that provides the PSD routine
dtype : `type`
the required type of the window array, only used if
`library='lal'` is given
Returns
-------
window : `numpy.ndarray`, `lal.REAL8Window`
a numpy-, or `LAL`-format window array | Below is the the instruction that describes the task:
### Input:
Normalise a window specification for a PSD calculation
Parameters
----------
window : `str`, `numpy.ndarray`, `None`
the input window specification
nfft : `int`
the length of the Fourier transform, in samples
library : `str`
the name of the library that provides the PSD routine
dtype : `type`
the required type of the window array, only used if
`library='lal'` is given
Returns
-------
window : `numpy.ndarray`, `lal.REAL8Window`
a numpy-, or `LAL`-format window array
### Response:
def _normalize_window(window, nfft, library, dtype):
"""Normalise a window specification for a PSD calculation
Parameters
----------
window : `str`, `numpy.ndarray`, `None`
the input window specification
nfft : `int`
the length of the Fourier transform, in samples
library : `str`
the name of the library that provides the PSD routine
dtype : `type`
the required type of the window array, only used if
`library='lal'` is given
Returns
-------
window : `numpy.ndarray`, `lal.REAL8Window`
a numpy-, or `LAL`-format window array
"""
if library == '_lal' and isinstance(window, numpy.ndarray):
from ._lal import window_from_array
return window_from_array(window)
if library == '_lal':
from ._lal import generate_window
return generate_window(nfft, window=window, dtype=dtype)
if isinstance(window, string_types):
window = canonical_name(window)
if isinstance(window, string_types + (tuple,)):
return get_window(window, nfft)
return None |
def ls(system, user, local, include_missing):
"""List configuration files detected (and/or examined paths)."""
# default action is to list *all* auto-detected files
if not (system or user or local):
system = user = local = True
for path in get_configfile_paths(system=system, user=user, local=local,
only_existing=not include_missing):
click.echo(path) | List configuration files detected (and/or examined paths). | Below is the the instruction that describes the task:
### Input:
List configuration files detected (and/or examined paths).
### Response:
def ls(system, user, local, include_missing):
"""List configuration files detected (and/or examined paths)."""
# default action is to list *all* auto-detected files
if not (system or user or local):
system = user = local = True
for path in get_configfile_paths(system=system, user=user, local=local,
only_existing=not include_missing):
click.echo(path) |
def add(self, subcmd_cb):
"""Add subcmd to the available subcommands for this object.
It will have the supplied docstring, and subcmd_cb will be called
when we want to run the command. min_len is the minimum length
allowed to abbreviate the command. in_list indicates with the
show command will be run when giving a list of all sub commands
of this object. Some commands have long output like "show commands"
so we might not want to show that.
"""
subcmd_name = subcmd_cb.name
self.subcmds[subcmd_name] = subcmd_cb
# We keep a list of subcommands to assist command completion
self.cmdlist.append(subcmd_name) | Add subcmd to the available subcommands for this object.
It will have the supplied docstring, and subcmd_cb will be called
when we want to run the command. min_len is the minimum length
allowed to abbreviate the command. in_list indicates with the
show command will be run when giving a list of all sub commands
of this object. Some commands have long output like "show commands"
so we might not want to show that. | Below is the the instruction that describes the task:
### Input:
Add subcmd to the available subcommands for this object.
It will have the supplied docstring, and subcmd_cb will be called
when we want to run the command. min_len is the minimum length
allowed to abbreviate the command. in_list indicates with the
show command will be run when giving a list of all sub commands
of this object. Some commands have long output like "show commands"
so we might not want to show that.
### Response:
def add(self, subcmd_cb):
"""Add subcmd to the available subcommands for this object.
It will have the supplied docstring, and subcmd_cb will be called
when we want to run the command. min_len is the minimum length
allowed to abbreviate the command. in_list indicates with the
show command will be run when giving a list of all sub commands
of this object. Some commands have long output like "show commands"
so we might not want to show that.
"""
subcmd_name = subcmd_cb.name
self.subcmds[subcmd_name] = subcmd_cb
# We keep a list of subcommands to assist command completion
self.cmdlist.append(subcmd_name) |
def connected_sites( self, site_labels=None ):
"""
Searches the lattice to find sets of sites that are contiguously neighbouring.
Mutually exclusive sets of contiguous sites are returned as Cluster objects.
Args:
site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search.
This can be a list::
[ 'A', 'B' ]
a set::
( 'A', 'B' )
or a string::
'A'.
Returns:
(List(Cluster)): List of Cluster objects for groups of contiguous sites.
"""
if site_labels:
selected_sites = self.select_sites( site_labels )
else:
selected_sites = self.sites
initial_clusters = [ cluster.Cluster( [ site ] ) for site in selected_sites ]
if site_labels:
blocking_sites = self.site_labels - set( site_labels )
for c in initial_clusters:
c.remove_sites_from_neighbours( blocking_sites )
final_clusters = []
while initial_clusters: # loop until initial_clusters is empty
this_cluster = initial_clusters.pop(0)
while this_cluster.neighbours:
neighbouring_clusters = [ c for c in initial_clusters if this_cluster.is_neighbouring( c ) ]
for nc in neighbouring_clusters:
initial_clusters.remove( nc )
this_cluster = this_cluster.merge( nc )
final_clusters.append( this_cluster )
return final_clusters | Searches the lattice to find sets of sites that are contiguously neighbouring.
Mutually exclusive sets of contiguous sites are returned as Cluster objects.
Args:
site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search.
This can be a list::
[ 'A', 'B' ]
a set::
( 'A', 'B' )
or a string::
'A'.
Returns:
(List(Cluster)): List of Cluster objects for groups of contiguous sites. | Below is the the instruction that describes the task:
### Input:
Searches the lattice to find sets of sites that are contiguously neighbouring.
Mutually exclusive sets of contiguous sites are returned as Cluster objects.
Args:
site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search.
This can be a list::
[ 'A', 'B' ]
a set::
( 'A', 'B' )
or a string::
'A'.
Returns:
(List(Cluster)): List of Cluster objects for groups of contiguous sites.
### Response:
def connected_sites( self, site_labels=None ):
"""
Searches the lattice to find sets of sites that are contiguously neighbouring.
Mutually exclusive sets of contiguous sites are returned as Cluster objects.
Args:
site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search.
This can be a list::
[ 'A', 'B' ]
a set::
( 'A', 'B' )
or a string::
'A'.
Returns:
(List(Cluster)): List of Cluster objects for groups of contiguous sites.
"""
if site_labels:
selected_sites = self.select_sites( site_labels )
else:
selected_sites = self.sites
initial_clusters = [ cluster.Cluster( [ site ] ) for site in selected_sites ]
if site_labels:
blocking_sites = self.site_labels - set( site_labels )
for c in initial_clusters:
c.remove_sites_from_neighbours( blocking_sites )
final_clusters = []
while initial_clusters: # loop until initial_clusters is empty
this_cluster = initial_clusters.pop(0)
while this_cluster.neighbours:
neighbouring_clusters = [ c for c in initial_clusters if this_cluster.is_neighbouring( c ) ]
for nc in neighbouring_clusters:
initial_clusters.remove( nc )
this_cluster = this_cluster.merge( nc )
final_clusters.append( this_cluster )
return final_clusters |
def remove(self, iterable, data=None, index=0):
"""Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
"""
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False | Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed | Below is the the instruction that describes the task:
### Input:
Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
### Response:
def remove(self, iterable, data=None, index=0):
"""Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
"""
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False |
def optimize_structure_handler(rule, handler):
"""
Produce an "optimized" version of handler for the dispatcher to
limit reference lookups.
"""
def runner(walk, dispatcher, node):
handler(dispatcher, node)
return
yield # pragma: no cover
return runner | Produce an "optimized" version of handler for the dispatcher to
limit reference lookups. | Below is the the instruction that describes the task:
### Input:
Produce an "optimized" version of handler for the dispatcher to
limit reference lookups.
### Response:
def optimize_structure_handler(rule, handler):
"""
Produce an "optimized" version of handler for the dispatcher to
limit reference lookups.
"""
def runner(walk, dispatcher, node):
handler(dispatcher, node)
return
yield # pragma: no cover
return runner |
def meid(number, separator=u' '):
'''
Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6
'''
if isinstance(number, six.string_types):
number = re.sub(r'[\s-]', '', number)
try:
number = '%014X' % int(number, 16)
except ValueError:
if len(number) < 18 and number.isdigit():
return meid('%014X' % int(number), separator)
else:
raise ValueError(_('Invalid MEID, size mismatch'))
else:
if len(number) not in (14, 15):
raise ValueError(_('Invalid MEID, size mismatch'))
elif isinstance(number, six.integer_types):
if number > 0xfffffffffffffff:
raise ValueError(_('Invalid MEID, size mismatch'))
return meid(('%014X' % number)[:14], separator)
else:
raise TypeError(_('Invalid MEID, input type invalid'))
number = number.upper()
region = number[:2]
manufacturer = number[2:8]
serial_number = number[8:14]
check_digit = number[14:]
if check_digit == '':
check_digit = luhn_calc(number, chars='0123456789ABCDEF')
groups = (region, manufacturer, serial_number, check_digit)
return separator.join(list(filter(None, groups))) | Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6 | Below is the the instruction that describes the task:
### Input:
Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6
### Response:
def meid(number, separator=u' '):
'''
Printable Mobile Equipment Identifier (MEID) number.
>>> print(meid(123456789012345678))
1B 69B4BA 630F34 6
>>> print(meid('1B69B4BA630F34'))
1B 69B4BA 630F34 6
'''
if isinstance(number, six.string_types):
number = re.sub(r'[\s-]', '', number)
try:
number = '%014X' % int(number, 16)
except ValueError:
if len(number) < 18 and number.isdigit():
return meid('%014X' % int(number), separator)
else:
raise ValueError(_('Invalid MEID, size mismatch'))
else:
if len(number) not in (14, 15):
raise ValueError(_('Invalid MEID, size mismatch'))
elif isinstance(number, six.integer_types):
if number > 0xfffffffffffffff:
raise ValueError(_('Invalid MEID, size mismatch'))
return meid(('%014X' % number)[:14], separator)
else:
raise TypeError(_('Invalid MEID, input type invalid'))
number = number.upper()
region = number[:2]
manufacturer = number[2:8]
serial_number = number[8:14]
check_digit = number[14:]
if check_digit == '':
check_digit = luhn_calc(number, chars='0123456789ABCDEF')
groups = (region, manufacturer, serial_number, check_digit)
return separator.join(list(filter(None, groups))) |
def set_provider_links(self, resource_ids=None):
"""Sets a provider chain in order from the most recent source to
the originating source.
:param resource_ids: the new source
:type resource_ids: ``osid.id.Id[]``
:raise: ``InvalidArgument`` -- ``resource_ids`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``resource_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if resource_ids is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['provider_link_ids'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(resource_ids, metadata, array=True):
self._my_map['providerLinkIds'] = []
for i in resource_ids:
self._my_map['providerLinkIds'].append(str(i))
else:
raise InvalidArgument() | Sets a provider chain in order from the most recent source to
the originating source.
:param resource_ids: the new source
:type resource_ids: ``osid.id.Id[]``
:raise: ``InvalidArgument`` -- ``resource_ids`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``resource_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets a provider chain in order from the most recent source to
the originating source.
:param resource_ids: the new source
:type resource_ids: ``osid.id.Id[]``
:raise: ``InvalidArgument`` -- ``resource_ids`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``resource_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_provider_links(self, resource_ids=None):
"""Sets a provider chain in order from the most recent source to
the originating source.
:param resource_ids: the new source
:type resource_ids: ``osid.id.Id[]``
:raise: ``InvalidArgument`` -- ``resource_ids`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``resource_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if resource_ids is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['provider_link_ids'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(resource_ids, metadata, array=True):
self._my_map['providerLinkIds'] = []
for i in resource_ids:
self._my_map['providerLinkIds'].append(str(i))
else:
raise InvalidArgument() |
def iflatten_dict_values(node, depth=0):
"""
>>> from utool.util_dict import * # NOQA
"""
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node | >>> from utool.util_dict import * # NOQA | Below is the the instruction that describes the task:
### Input:
>>> from utool.util_dict import * # NOQA
### Response:
def iflatten_dict_values(node, depth=0):
"""
>>> from utool.util_dict import * # NOQA
"""
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node |
def re_initiate_model_list(self, model_list_or_dict, core_objects_dict, model_name, model_class, model_key):
"""Recreate model list
The method re-initiate a handed list or dictionary of models with the new dictionary of core-objects.
:param model_list_or_dict: could be a list or dictionary of one model type
:param core_objects_dict: new dictionary of one type of core-elements (rafcon.core)
:param model_name: prop_name for the core-element hold by the model, this core-element is covered by the model
:param model_class: model-class of the elements that should be insert
:param model_key: if model_list_or_dict is a dictionary the key is the id of the respective element
(e.g. 'state_id')
:return:
"""
if model_name == "income":
if self.income.income != self.state.income:
self._add_model(self.income, self.state.income, IncomeModel)
return
for _ in range(len(model_list_or_dict)):
self.remove_additional_model(model_list_or_dict, core_objects_dict, model_name, model_key)
if core_objects_dict:
for _ in core_objects_dict:
self.add_missing_model(model_list_or_dict, core_objects_dict, model_name, model_class, model_key) | Recreate model list
The method re-initiate a handed list or dictionary of models with the new dictionary of core-objects.
:param model_list_or_dict: could be a list or dictionary of one model type
:param core_objects_dict: new dictionary of one type of core-elements (rafcon.core)
:param model_name: prop_name for the core-element hold by the model, this core-element is covered by the model
:param model_class: model-class of the elements that should be insert
:param model_key: if model_list_or_dict is a dictionary the key is the id of the respective element
(e.g. 'state_id')
:return: | Below is the the instruction that describes the task:
### Input:
Recreate model list
The method re-initiate a handed list or dictionary of models with the new dictionary of core-objects.
:param model_list_or_dict: could be a list or dictionary of one model type
:param core_objects_dict: new dictionary of one type of core-elements (rafcon.core)
:param model_name: prop_name for the core-element hold by the model, this core-element is covered by the model
:param model_class: model-class of the elements that should be insert
:param model_key: if model_list_or_dict is a dictionary the key is the id of the respective element
(e.g. 'state_id')
:return:
### Response:
def re_initiate_model_list(self, model_list_or_dict, core_objects_dict, model_name, model_class, model_key):
"""Recreate model list
The method re-initiate a handed list or dictionary of models with the new dictionary of core-objects.
:param model_list_or_dict: could be a list or dictionary of one model type
:param core_objects_dict: new dictionary of one type of core-elements (rafcon.core)
:param model_name: prop_name for the core-element hold by the model, this core-element is covered by the model
:param model_class: model-class of the elements that should be insert
:param model_key: if model_list_or_dict is a dictionary the key is the id of the respective element
(e.g. 'state_id')
:return:
"""
if model_name == "income":
if self.income.income != self.state.income:
self._add_model(self.income, self.state.income, IncomeModel)
return
for _ in range(len(model_list_or_dict)):
self.remove_additional_model(model_list_or_dict, core_objects_dict, model_name, model_key)
if core_objects_dict:
for _ in core_objects_dict:
self.add_missing_model(model_list_or_dict, core_objects_dict, model_name, model_class, model_key) |
def _ssh_state(chunks, st_kwargs,
kwargs, test=False):
'''
Function to run a state with the given chunk via salt-ssh
'''
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'])
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.data.decode(salt.utils.json.loads(stdout, object_hook=salt.utils.data.encode_dict))
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
return salt.utils.data.decode(stdout) | Function to run a state with the given chunk via salt-ssh | Below is the the instruction that describes the task:
### Input:
Function to run a state with the given chunk via salt-ssh
### Response:
def _ssh_state(chunks, st_kwargs,
kwargs, test=False):
'''
Function to run a state with the given chunk via salt-ssh
'''
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'])
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
__opts__['thin_dir'],
test,
trans_tar_sum,
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.data.decode(salt.utils.json.loads(stdout, object_hook=salt.utils.data.encode_dict))
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(str(e))
# If for some reason the json load fails, return the stdout
return salt.utils.data.decode(stdout) |
def create(self, environments):
"""
Method to create environments vip
:param environments vip: Dict containing environments vip desired
to be created on database
:return: None
"""
data = {'environments_vip': environments}
uri = 'api/v3/environment-vip/'
return super(ApiEnvironmentVip, self).post(uri, data) | Method to create environments vip
:param environments vip: Dict containing environments vip desired
to be created on database
:return: None | Below is the the instruction that describes the task:
### Input:
Method to create environments vip
:param environments vip: Dict containing environments vip desired
to be created on database
:return: None
### Response:
def create(self, environments):
"""
Method to create environments vip
:param environments vip: Dict containing environments vip desired
to be created on database
:return: None
"""
data = {'environments_vip': environments}
uri = 'api/v3/environment-vip/'
return super(ApiEnvironmentVip, self).post(uri, data) |
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames):
""" Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match.
"""
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=1)
if rows:
assert len(rows) == 1, repr(len(rows))
result = rows[0]
else:
result = None
return result | Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match. | Below is the the instruction that describes the task:
### Input:
Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match.
### Response:
def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,
selectFieldNames):
""" Return a single matching row with the requested field values from the
the requested table or None if nothing matched.
tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance
conn: Owned connection acquired from ConnectionFactory.get()
fieldsToMatch: Dictionary of internal fieldName/value mappings that
identify the desired rows. If a value is an instance of
ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the
operator 'IN' will be used in the corresponding SQL
predicate; if the value is bool: "IS TRUE/FALSE"; if the
value is None: "IS NULL"; '=' will be used for all other
cases.
selectFieldNames:
list of fields to return, using internal field names
retval: A sequence of field values of the matching row in the order
of the given field names; or None if there was no match.
"""
rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,
selectFieldNames, maxRows=1)
if rows:
assert len(rows) == 1, repr(len(rows))
result = rows[0]
else:
result = None
return result |
def from_image(cls, img_filename, mipmap=False, **kwargs):
"""Uses Pyglet's image.load function to generate a Texture from an image file. If 'mipmap', then texture will
have mipmap layers calculated."""
img = pyglet.image.load(img_filename)
tex = img.get_mipmapped_texture() if mipmap else img.get_texture()
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
return cls(id=tex.id, data=tex, mipmap=mipmap, **kwargs) | Uses Pyglet's image.load function to generate a Texture from an image file. If 'mipmap', then texture will
have mipmap layers calculated. | Below is the the instruction that describes the task:
### Input:
Uses Pyglet's image.load function to generate a Texture from an image file. If 'mipmap', then texture will
have mipmap layers calculated.
### Response:
def from_image(cls, img_filename, mipmap=False, **kwargs):
"""Uses Pyglet's image.load function to generate a Texture from an image file. If 'mipmap', then texture will
have mipmap layers calculated."""
img = pyglet.image.load(img_filename)
tex = img.get_mipmapped_texture() if mipmap else img.get_texture()
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
return cls(id=tex.id, data=tex, mipmap=mipmap, **kwargs) |
def init(name, *args, **kwargs):
"""Instantiate a plugin from the catalog.
"""
if name in _PLUGIN_CATALOG:
if rapport.config.get_int("rapport", "verbosity") >= 2:
print("Initialize plugin {0}: {1} {2}".format(name, args, kwargs))
try:
return _PLUGIN_CATALOG[name](*args, **kwargs)
except (ValueError, TypeError) as e:
print("Failed to initialize plugin {0}: {1}!".format(name, e), file=sys.stderr)
else:
print("Failed to initialize plugin {0}: Not in catalog!".format(name), file=sys.stderr) | Instantiate a plugin from the catalog. | Below is the the instruction that describes the task:
### Input:
Instantiate a plugin from the catalog.
### Response:
def init(name, *args, **kwargs):
"""Instantiate a plugin from the catalog.
"""
if name in _PLUGIN_CATALOG:
if rapport.config.get_int("rapport", "verbosity") >= 2:
print("Initialize plugin {0}: {1} {2}".format(name, args, kwargs))
try:
return _PLUGIN_CATALOG[name](*args, **kwargs)
except (ValueError, TypeError) as e:
print("Failed to initialize plugin {0}: {1}!".format(name, e), file=sys.stderr)
else:
print("Failed to initialize plugin {0}: Not in catalog!".format(name), file=sys.stderr) |
def draw(self):
"""Draws ASCII canvas on the screen."""
if sys.stdout.isatty(): # pragma: no cover
from asciimatics.screen import Screen
Screen.wrapper(self._do_draw)
else:
for line in self.canvas:
print("".join(line)) | Draws ASCII canvas on the screen. | Below is the the instruction that describes the task:
### Input:
Draws ASCII canvas on the screen.
### Response:
def draw(self):
"""Draws ASCII canvas on the screen."""
if sys.stdout.isatty(): # pragma: no cover
from asciimatics.screen import Screen
Screen.wrapper(self._do_draw)
else:
for line in self.canvas:
print("".join(line)) |
def builder_from_source(source, filename, system_includes,
nonsystem_includes, quiet=False):
"""Utility method that returns an ASTBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
ASTBuilder
"""
return ASTBuilder(tokenize.get_tokens(source),
filename,
system_includes,
nonsystem_includes,
quiet=quiet) | Utility method that returns an ASTBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
ASTBuilder | Below is the the instruction that describes the task:
### Input:
Utility method that returns an ASTBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
ASTBuilder
### Response:
def builder_from_source(source, filename, system_includes,
nonsystem_includes, quiet=False):
"""Utility method that returns an ASTBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
ASTBuilder
"""
return ASTBuilder(tokenize.get_tokens(source),
filename,
system_includes,
nonsystem_includes,
quiet=quiet) |
def to_dict(self):
"""Converts the set of values into a dictionary. Unset values are excluded."""
return {value.label: value.value for value in self.__values if not value.unset} | Converts the set of values into a dictionary. Unset values are excluded. | Below is the the instruction that describes the task:
### Input:
Converts the set of values into a dictionary. Unset values are excluded.
### Response:
def to_dict(self):
"""Converts the set of values into a dictionary. Unset values are excluded."""
return {value.label: value.value for value in self.__values if not value.unset} |
def ip_bracket(addr):
'''
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
'''
addr = ipaddress.ip_address(addr)
return ('[{}]' if addr.version == 6 else '{}').format(addr) | Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs. | Below is the the instruction that describes the task:
### Input:
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
### Response:
def ip_bracket(addr):
'''
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
'''
addr = ipaddress.ip_address(addr)
return ('[{}]' if addr.version == 6 else '{}').format(addr) |
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port']))) | Pre-fork we need to create the zmq router device | Below is the the instruction that describes the task:
### Input:
Pre-fork we need to create the zmq router device
### Response:
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port']))) |
def _get_connection(self):
"""Return native connection object."""
kwargs = {
'username': self.account,
'api_key': self.secret_key,
}
# Only add kwarg for servicenet if True because user could set
# environment variable 'RACKSPACE_SERVICENET' separately.
if self.servicenet:
kwargs['servicenet'] = True
if self.authurl:
kwargs['authurl'] = self.authurl
return cloudfiles.get_connection(**kwargs) | Return native connection object. | Below is the the instruction that describes the task:
### Input:
Return native connection object.
### Response:
def _get_connection(self):
"""Return native connection object."""
kwargs = {
'username': self.account,
'api_key': self.secret_key,
}
# Only add kwarg for servicenet if True because user could set
# environment variable 'RACKSPACE_SERVICENET' separately.
if self.servicenet:
kwargs['servicenet'] = True
if self.authurl:
kwargs['authurl'] = self.authurl
return cloudfiles.get_connection(**kwargs) |
def get_blobstore(layout):
"""Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout.
"""
if layout.is_s3:
from wal_e.blobstore import s3
blobstore = s3
elif layout.is_wabs:
from wal_e.blobstore import wabs
blobstore = wabs
elif layout.is_swift:
from wal_e.blobstore import swift
blobstore = swift
elif layout.is_gs:
from wal_e.blobstore import gs
blobstore = gs
elif layout.is_file:
from wal_e.blobstore import file
blobstore = file
return blobstore | Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout. | Below is the the instruction that describes the task:
### Input:
Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout.
### Response:
def get_blobstore(layout):
"""Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout.
"""
if layout.is_s3:
from wal_e.blobstore import s3
blobstore = s3
elif layout.is_wabs:
from wal_e.blobstore import wabs
blobstore = wabs
elif layout.is_swift:
from wal_e.blobstore import swift
blobstore = swift
elif layout.is_gs:
from wal_e.blobstore import gs
blobstore = gs
elif layout.is_file:
from wal_e.blobstore import file
blobstore = file
return blobstore |
def upgrade():
"""Upgrade database."""
op.create_table(
'records_metadata',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column(
'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), nullable=True),
sa.Column('version_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'records_metadata_version',
sa.Column('created', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('updated', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(),
autoincrement=False, nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), autoincrement=False, nullable=True),
sa.Column('version_id', sa.Integer(),
autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(),
autoincrement=False, nullable=False),
sa.Column('end_transaction_id',
sa.BigInteger(), nullable=True),
sa.Column('operation_type',
sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(
op.f('ix_records_metadata_version_end_transaction_id'),
'records_metadata_version', ['end_transaction_id'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_operation_type'),
'records_metadata_version', ['operation_type'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_transaction_id'),
'records_metadata_version', ['transaction_id'], unique=False
) | Upgrade database. | Below is the the instruction that describes the task:
### Input:
Upgrade database.
### Response:
def upgrade():
"""Upgrade database."""
op.create_table(
'records_metadata',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column(
'id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), nullable=True),
sa.Column('version_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'records_metadata_version',
sa.Column('created', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('updated', sa.DateTime(),
autoincrement=False, nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(),
autoincrement=False, nullable=False),
sa.Column('json', sqlalchemy_utils.JSONType().with_variant(
sa.dialects.postgresql.JSON(
none_as_null=True), 'postgresql',
), autoincrement=False, nullable=True),
sa.Column('version_id', sa.Integer(),
autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.BigInteger(),
autoincrement=False, nullable=False),
sa.Column('end_transaction_id',
sa.BigInteger(), nullable=True),
sa.Column('operation_type',
sa.SmallInteger(), nullable=False),
sa.PrimaryKeyConstraint('id', 'transaction_id')
)
op.create_index(
op.f('ix_records_metadata_version_end_transaction_id'),
'records_metadata_version', ['end_transaction_id'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_operation_type'),
'records_metadata_version', ['operation_type'], unique=False
)
op.create_index(
op.f('ix_records_metadata_version_transaction_id'),
'records_metadata_version', ['transaction_id'], unique=False
) |
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey="Diagnostics"
merged=[]
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close() | Merge all replacement files in a directory into a single file | Below is the the instruction that describes the task:
### Input:
Merge all replacement files in a directory into a single file
### Response:
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey="Diagnostics"
merged=[]
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close() |
def delete(self, force=False):
"""
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
self._halt_if_already_deleted()
if force:
# Forcing deletion by cleaning up all record sets first. We'll
# do it all in one change set.
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
for rrset in self.record_sets:
# You can delete a HostedZone if there are only SOA and NS
# entries left. So delete everything but SOA/NS entries.
if rrset.rrset_type not in ['SOA', 'NS']:
cset.add_change('DELETE', rrset)
if cset.deletions or cset.creations:
# Bombs away.
self.connection._change_resource_record_sets(cset)
# Now delete the HostedZone.
retval = self.connection.delete_hosted_zone_by_id(self.id)
# Used to protect against modifying a deleted HostedZone.
self._is_deleted = True
return retval | Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request. | Below is the the instruction that describes the task:
### Input:
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
### Response:
def delete(self, force=False):
"""
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
self._halt_if_already_deleted()
if force:
# Forcing deletion by cleaning up all record sets first. We'll
# do it all in one change set.
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
for rrset in self.record_sets:
# You can delete a HostedZone if there are only SOA and NS
# entries left. So delete everything but SOA/NS entries.
if rrset.rrset_type not in ['SOA', 'NS']:
cset.add_change('DELETE', rrset)
if cset.deletions or cset.creations:
# Bombs away.
self.connection._change_resource_record_sets(cset)
# Now delete the HostedZone.
retval = self.connection.delete_hosted_zone_by_id(self.id)
# Used to protect against modifying a deleted HostedZone.
self._is_deleted = True
return retval |
def _configure_logger():
"""Configure the logging module."""
if not app.debug:
_configure_logger_for_production(logging.getLogger())
elif not app.testing:
_configure_logger_for_debugging(logging.getLogger()) | Configure the logging module. | Below is the the instruction that describes the task:
### Input:
Configure the logging module.
### Response:
def _configure_logger():
"""Configure the logging module."""
if not app.debug:
_configure_logger_for_production(logging.getLogger())
elif not app.testing:
_configure_logger_for_debugging(logging.getLogger()) |
def _compute_block_attributes(function):
"""
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
"""
# The attributes we use are the distance form function start, distance from function exit and whether
# or not it has a subfunction call
distances_from_start = FunctionDiff._distances_from_function_start(function)
distances_from_exit = FunctionDiff._distances_from_function_exit(function)
call_sites = function.call_sites
attributes = {}
for block in function.graph.nodes():
if block in call_sites:
number_of_subfunction_calls = len(call_sites[block])
else:
number_of_subfunction_calls = 0
# there really shouldn't be blocks that can't be reached from the start, but there are for now
dist_start = distances_from_start[block] if block in distances_from_start else 10000
dist_exit = distances_from_exit[block] if block in distances_from_exit else 10000
attributes[block] = (dist_start, dist_exit, number_of_subfunction_calls)
return attributes | :param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes. | Below is the the instruction that describes the task:
### Input:
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
### Response:
def _compute_block_attributes(function):
"""
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
"""
# The attributes we use are the distance form function start, distance from function exit and whether
# or not it has a subfunction call
distances_from_start = FunctionDiff._distances_from_function_start(function)
distances_from_exit = FunctionDiff._distances_from_function_exit(function)
call_sites = function.call_sites
attributes = {}
for block in function.graph.nodes():
if block in call_sites:
number_of_subfunction_calls = len(call_sites[block])
else:
number_of_subfunction_calls = 0
# there really shouldn't be blocks that can't be reached from the start, but there are for now
dist_start = distances_from_start[block] if block in distances_from_start else 10000
dist_exit = distances_from_exit[block] if block in distances_from_exit else 10000
attributes[block] = (dist_start, dist_exit, number_of_subfunction_calls)
return attributes |
def add_item(self, item):
"""
Add a new script or phrase to the folder.
"""
item.parent = self
#self.phrases[phrase.description] = phrase
self.items.append(item) | Add a new script or phrase to the folder. | Below is the the instruction that describes the task:
### Input:
Add a new script or phrase to the folder.
### Response:
def add_item(self, item):
"""
Add a new script or phrase to the folder.
"""
item.parent = self
#self.phrases[phrase.description] = phrase
self.items.append(item) |
def list_kadastrale_afdelingen(self):
'''
List all `kadastrale afdelingen` in Flanders.
:param integer sort: Field to sort on.
:rtype: A :class:`list` of :class:`Afdeling`.
'''
def creator():
gemeentes = self.list_gemeenten()
res = []
for g in gemeentes:
res += self.list_kadastrale_afdelingen_by_gemeente(g)
return res
if self.caches['permanent'].is_configured:
key = 'list_afdelingen_rest'
afdelingen = self.caches['permanent'].get_or_create(key, creator)
else:
afdelingen = creator()
return afdelingen | List all `kadastrale afdelingen` in Flanders.
:param integer sort: Field to sort on.
:rtype: A :class:`list` of :class:`Afdeling`. | Below is the the instruction that describes the task:
### Input:
List all `kadastrale afdelingen` in Flanders.
:param integer sort: Field to sort on.
:rtype: A :class:`list` of :class:`Afdeling`.
### Response:
def list_kadastrale_afdelingen(self):
'''
List all `kadastrale afdelingen` in Flanders.
:param integer sort: Field to sort on.
:rtype: A :class:`list` of :class:`Afdeling`.
'''
def creator():
gemeentes = self.list_gemeenten()
res = []
for g in gemeentes:
res += self.list_kadastrale_afdelingen_by_gemeente(g)
return res
if self.caches['permanent'].is_configured:
key = 'list_afdelingen_rest'
afdelingen = self.caches['permanent'].get_or_create(key, creator)
else:
afdelingen = creator()
return afdelingen |
def get_structure_by_id(self, cod_id, **kwargs):
"""
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
"""
r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id)
return Structure.from_str(r.text, fmt="cif", **kwargs) | Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure. | Below is the the instruction that describes the task:
### Input:
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
### Response:
def get_structure_by_id(self, cod_id, **kwargs):
"""
Queries the COD for a structure by id.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A Structure.
"""
r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id)
return Structure.from_str(r.text, fmt="cif", **kwargs) |
def setCurrentRecord(self, record, autoAdd=False):
"""
Sets the index for this combobox to the inputed record instance.
:param record <orb.Table>
:return <bool> success
"""
if record is not None and not Table.recordcheck(record):
return False
# don't reassign the current record
# clear the record
if record is None:
self._currentRecord = None
blocked = self.signalsBlocked()
self.blockSignals(True)
self.setCurrentIndex(-1)
self.blockSignals(blocked)
if not blocked:
self.currentRecordChanged.emit(None)
return True
elif record == self.currentRecord():
return False
self._currentRecord = record
found = False
blocked = self.signalsBlocked()
self.blockSignals(True)
for i in range(self.count()):
stored = unwrapVariant(self.itemData(i, Qt.UserRole))
if stored == record:
self.setCurrentIndex(i)
found = True
break
if not found and autoAdd:
self.addRecord(record)
self.setCurrentIndex(self.count() - 1)
self.blockSignals(blocked)
if not blocked:
self.currentRecordChanged.emit(record)
return False | Sets the index for this combobox to the inputed record instance.
:param record <orb.Table>
:return <bool> success | Below is the the instruction that describes the task:
### Input:
Sets the index for this combobox to the inputed record instance.
:param record <orb.Table>
:return <bool> success
### Response:
def setCurrentRecord(self, record, autoAdd=False):
"""
Sets the index for this combobox to the inputed record instance.
:param record <orb.Table>
:return <bool> success
"""
if record is not None and not Table.recordcheck(record):
return False
# don't reassign the current record
# clear the record
if record is None:
self._currentRecord = None
blocked = self.signalsBlocked()
self.blockSignals(True)
self.setCurrentIndex(-1)
self.blockSignals(blocked)
if not blocked:
self.currentRecordChanged.emit(None)
return True
elif record == self.currentRecord():
return False
self._currentRecord = record
found = False
blocked = self.signalsBlocked()
self.blockSignals(True)
for i in range(self.count()):
stored = unwrapVariant(self.itemData(i, Qt.UserRole))
if stored == record:
self.setCurrentIndex(i)
found = True
break
if not found and autoAdd:
self.addRecord(record)
self.setCurrentIndex(self.count() - 1)
self.blockSignals(blocked)
if not blocked:
self.currentRecordChanged.emit(record)
return False |
def find_attacker_slider(dest_list, occ_bb, piece_bb, target_bb, pos,
domain):
""" Find a slider attacker
Parameters
----------
dest_list : list
To store the results.
occ_bb : int, bitboard
Occupancy bitboard.
piece_bb : int, bitboard
Bitboard with the position of the attacker piece.
target_bb : int, bitboard
Occupancy bitboard without any of the sliders in question.
pos : int
Target position.
pos_map : function
Mapping between a board position and its position in a single
rotated/translated rank produced with domain_trans.
domain_trans : function
Transformation from a rank/file/diagonal/anti-diagonal containing pos
to a single rank
pos_inv_map : function
Inverse of pos_map
"""
pos_map, domain_trans, pos_inv_map = domain
r = reach[pos_map(pos)][domain_trans(target_bb, pos)]
m = r & domain_trans(piece_bb, pos)
while m:
r = m&-m
rpos = r.bit_length()-1
if not (ray[rpos][pos_map(pos)] & domain_trans(occ_bb, pos)):
dest_list.append(pos_inv_map(rpos, pos))
m ^= r | Find a slider attacker
Parameters
----------
dest_list : list
To store the results.
occ_bb : int, bitboard
Occupancy bitboard.
piece_bb : int, bitboard
Bitboard with the position of the attacker piece.
target_bb : int, bitboard
Occupancy bitboard without any of the sliders in question.
pos : int
Target position.
pos_map : function
Mapping between a board position and its position in a single
rotated/translated rank produced with domain_trans.
domain_trans : function
Transformation from a rank/file/diagonal/anti-diagonal containing pos
to a single rank
pos_inv_map : function
Inverse of pos_map | Below is the the instruction that describes the task:
### Input:
Find a slider attacker
Parameters
----------
dest_list : list
To store the results.
occ_bb : int, bitboard
Occupancy bitboard.
piece_bb : int, bitboard
Bitboard with the position of the attacker piece.
target_bb : int, bitboard
Occupancy bitboard without any of the sliders in question.
pos : int
Target position.
pos_map : function
Mapping between a board position and its position in a single
rotated/translated rank produced with domain_trans.
domain_trans : function
Transformation from a rank/file/diagonal/anti-diagonal containing pos
to a single rank
pos_inv_map : function
Inverse of pos_map
### Response:
def find_attacker_slider(dest_list, occ_bb, piece_bb, target_bb, pos,
domain):
""" Find a slider attacker
Parameters
----------
dest_list : list
To store the results.
occ_bb : int, bitboard
Occupancy bitboard.
piece_bb : int, bitboard
Bitboard with the position of the attacker piece.
target_bb : int, bitboard
Occupancy bitboard without any of the sliders in question.
pos : int
Target position.
pos_map : function
Mapping between a board position and its position in a single
rotated/translated rank produced with domain_trans.
domain_trans : function
Transformation from a rank/file/diagonal/anti-diagonal containing pos
to a single rank
pos_inv_map : function
Inverse of pos_map
"""
pos_map, domain_trans, pos_inv_map = domain
r = reach[pos_map(pos)][domain_trans(target_bb, pos)]
m = r & domain_trans(piece_bb, pos)
while m:
r = m&-m
rpos = r.bit_length()-1
if not (ray[rpos][pos_map(pos)] & domain_trans(occ_bb, pos)):
dest_list.append(pos_inv_map(rpos, pos))
m ^= r |
async def forward_to(self, *args, **kwargs):
"""
Forwards the message. Shorthand for
`telethon.client.messages.MessageMethods.forward_messages`
with both ``messages`` and ``from_peer`` already set.
If you need to forward more than one message at once, don't use
this `forward_to` method. Use a
`telethon.client.telegramclient.TelegramClient` instance directly.
"""
kwargs['messages'] = self.id
kwargs['from_peer'] = await self.get_input_chat()
return await self._client.forward_messages(*args, **kwargs) | Forwards the message. Shorthand for
`telethon.client.messages.MessageMethods.forward_messages`
with both ``messages`` and ``from_peer`` already set.
If you need to forward more than one message at once, don't use
this `forward_to` method. Use a
`telethon.client.telegramclient.TelegramClient` instance directly. | Below is the the instruction that describes the task:
### Input:
Forwards the message. Shorthand for
`telethon.client.messages.MessageMethods.forward_messages`
with both ``messages`` and ``from_peer`` already set.
If you need to forward more than one message at once, don't use
this `forward_to` method. Use a
`telethon.client.telegramclient.TelegramClient` instance directly.
### Response:
async def forward_to(self, *args, **kwargs):
"""
Forwards the message. Shorthand for
`telethon.client.messages.MessageMethods.forward_messages`
with both ``messages`` and ``from_peer`` already set.
If you need to forward more than one message at once, don't use
this `forward_to` method. Use a
`telethon.client.telegramclient.TelegramClient` instance directly.
"""
kwargs['messages'] = self.id
kwargs['from_peer'] = await self.get_input_chat()
return await self._client.forward_messages(*args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.