code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def great_circle_dist(lat1, lon1, lat2, lon2):
"""
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
"""
radius = 6372795 # meters
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
# formula from:
# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula
a = math.pow(math.sin(dlat / 2), 2)
b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
d = 2 * radius * math.asin(math.sqrt(a + b))
return d | Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters. | Below is the the instruction that describes the task:
### Input:
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
### Response:
def great_circle_dist(lat1, lon1, lat2, lon2):
"""
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
"""
radius = 6372795 # meters
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
# formula from:
# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula
a = math.pow(math.sin(dlat / 2), 2)
b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
d = 2 * radius * math.asin(math.sqrt(a + b))
return d |
def parse_agi_result(line):
"""Parse AGI results using Regular expression.
AGI Result examples::
100 result=0 Trying...
200 result=0
200 result=-1
200 result=132456
200 result= (timeout)
510 Invalid or unknown command
520-Invalid command syntax. Proper usage follows:
int() argument must be a string, a bytes-like object or a number, not
'NoneType'
HANGUP
"""
# print("--------------\n", line)
if line == 'HANGUP':
return {'error': 'AGIResultHangup',
'msg': 'User hungup during execution'}
kwargs = dict(code=0, response="", line=line)
m = re_code.search(line)
try:
kwargs.update(m.groupdict())
except AttributeError:
# None has no attribute groupdict
pass
return agi_code_check(**kwargs) | Parse AGI results using Regular expression.
AGI Result examples::
100 result=0 Trying...
200 result=0
200 result=-1
200 result=132456
200 result= (timeout)
510 Invalid or unknown command
520-Invalid command syntax. Proper usage follows:
int() argument must be a string, a bytes-like object or a number, not
'NoneType'
HANGUP | Below is the the instruction that describes the task:
### Input:
Parse AGI results using Regular expression.
AGI Result examples::
100 result=0 Trying...
200 result=0
200 result=-1
200 result=132456
200 result= (timeout)
510 Invalid or unknown command
520-Invalid command syntax. Proper usage follows:
int() argument must be a string, a bytes-like object or a number, not
'NoneType'
HANGUP
### Response:
def parse_agi_result(line):
"""Parse AGI results using Regular expression.
AGI Result examples::
100 result=0 Trying...
200 result=0
200 result=-1
200 result=132456
200 result= (timeout)
510 Invalid or unknown command
520-Invalid command syntax. Proper usage follows:
int() argument must be a string, a bytes-like object or a number, not
'NoneType'
HANGUP
"""
# print("--------------\n", line)
if line == 'HANGUP':
return {'error': 'AGIResultHangup',
'msg': 'User hungup during execution'}
kwargs = dict(code=0, response="", line=line)
m = re_code.search(line)
try:
kwargs.update(m.groupdict())
except AttributeError:
# None has no attribute groupdict
pass
return agi_code_check(**kwargs) |
def addhash(frame,**kw):
'''
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
'''
hashes = genhash(frame,**kw);
frame['data'] = rfn.rec_append_fields(
frame['data'],'hash',hashes);
return frame; | helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place. | Below is the the instruction that describes the task:
### Input:
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
### Response:
def addhash(frame,**kw):
'''
helper function to add hashes to the given frame
given in the dictionary d returned from firsthash.
Parameters:
-----------
frame : frame to hash.
Keywords:
---------
same as genhash
Returns frame with added hashes, although it will be added in
place.
'''
hashes = genhash(frame,**kw);
frame['data'] = rfn.rec_append_fields(
frame['data'],'hash',hashes);
return frame; |
def should_execute(self, workload):
"""
If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information.
"""
if not self._suspended.is_set():
return True
workload = unwrap_workload(workload)
return hasattr(workload, 'keep_alive') and getattr(workload, 'keep_alive') | If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information. | Below is the the instruction that describes the task:
### Input:
If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information.
### Response:
def should_execute(self, workload):
"""
If we have been suspended by i3bar, only execute those modules that set the keep_alive flag to a truthy
value. See the docs on the suspend_signal_handler method of the io module for more information.
"""
if not self._suspended.is_set():
return True
workload = unwrap_workload(workload)
return hasattr(workload, 'keep_alive') and getattr(workload, 'keep_alive') |
def add_store(name, store, saltenv='base'):
'''
Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
cert_file = __salt__['cp.cache_file'](name, saltenv)
if cert_file is False:
ret['result'] = False
ret['comment'] += 'Certificate file not found.'
else:
cert_serial = __salt__['certutil.get_cert_serial'](cert_file)
serials = __salt__['certutil.get_stored_cert_serials'](store)
if cert_serial not in serials:
out = __salt__['certutil.add_store'](name, store)
if "successfully" in out:
ret['changes']['added'] = name
else:
ret['result'] = False
ret['comment'] += "Failed to store certificate {0}".format(name)
else:
ret['comment'] += "{0} already stored.".format(name)
return ret | Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified | Below is the the instruction that describes the task:
### Input:
Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified
### Response:
def add_store(name, store, saltenv='base'):
'''
Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
cert_file = __salt__['cp.cache_file'](name, saltenv)
if cert_file is False:
ret['result'] = False
ret['comment'] += 'Certificate file not found.'
else:
cert_serial = __salt__['certutil.get_cert_serial'](cert_file)
serials = __salt__['certutil.get_stored_cert_serials'](store)
if cert_serial not in serials:
out = __salt__['certutil.add_store'](name, store)
if "successfully" in out:
ret['changes']['added'] = name
else:
ret['result'] = False
ret['comment'] += "Failed to store certificate {0}".format(name)
else:
ret['comment'] += "{0} already stored.".format(name)
return ret |
def _data_frame(content):
"""
Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation.
"""
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
frame = DataFrame(response[key])
final_frame = _convert(frame)
return final_frame | Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation. | Below is the the instruction that describes the task:
### Input:
Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation.
### Response:
def _data_frame(content):
"""
Helper funcation that converts text-based get response
to a pandas dataframe for additional manipulation.
"""
response = loads(content)
key = [x for x in response.keys() if x in c.response_data][0]
frame = DataFrame(response[key])
final_frame = _convert(frame)
return final_frame |
def from_spec(spec, kwargs=None):
"""
Creates a baseline from a specification dict.
"""
baseline = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.baselines.baselines,
kwargs=kwargs
)
assert isinstance(baseline, Baseline)
return baseline | Creates a baseline from a specification dict. | Below is the the instruction that describes the task:
### Input:
Creates a baseline from a specification dict.
### Response:
def from_spec(spec, kwargs=None):
"""
Creates a baseline from a specification dict.
"""
baseline = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.baselines.baselines,
kwargs=kwargs
)
assert isinstance(baseline, Baseline)
return baseline |
def get_payment_token_by_id(cls, payment_token_id, **kwargs):
"""Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
else:
(data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
return data | Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread.
### Response:
def get_payment_token_by_id(cls, payment_token_id, **kwargs):
"""Find PaymentToken
Return single instance of PaymentToken by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_payment_token_by_id(payment_token_id, async=True)
>>> result = thread.get()
:param async bool
:param str payment_token_id: ID of paymentToken to return (required)
:return: PaymentToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
else:
(data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs)
return data |
def from_edges(edges, source_key='source', target_key='target', weight_key='weight', directed=True):
"""
Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph.
"""
raw = list(map(lambda x: [x[source_key], x[target_key], int(x[weight_key])], edges))
g = IGraph.TupleList(raw, weights=True, directed=directed)
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs['name']
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g | Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph. | Below is the the instruction that describes the task:
### Input:
Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph.
### Response:
def from_edges(edges, source_key='source', target_key='target', weight_key='weight', directed=True):
"""
Given a List of Dictionaries with source, target, and weight attributes, return a weighted, directed graph.
"""
raw = list(map(lambda x: [x[source_key], x[target_key], int(x[weight_key])], edges))
g = IGraph.TupleList(raw, weights=True, directed=directed)
g.vs['indegree'] = g.degree(mode="in")
g.vs['outdegree'] = g.degree(mode="out")
g.vs['label'] = g.vs['name']
if 'group' not in g.vs.attributes():
g.vs['group'] = labels_to_groups(g.vs['label'])
return g |
def create_volume(self, volume_name: str, driver_spec: str = None):
"""Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume
"""
# Default values
if driver_spec:
driver = driver_spec
else:
driver = 'local'
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
self._client.volumes.create(name=volume_name, driver=driver) | Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume | Below is the the instruction that describes the task:
### Input:
Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume
### Response:
def create_volume(self, volume_name: str, driver_spec: str = None):
"""Create new docker volumes.
Only the manager nodes can create a volume
Args:
volume_name (string): Name for the new docker volume
driver_spec (string): Driver for the docker volume
"""
# Default values
if driver_spec:
driver = driver_spec
else:
driver = 'local'
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Services can only be deleted '
'on swarm manager nodes')
self._client.volumes.create(name=volume_name, driver=driver) |
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False | Stores a password in a keychain for a particular environment and
configuration parameter pair. | Below is the the instruction that describes the task:
### Input:
Stores a password in a keychain for a particular environment and
configuration parameter pair.
### Response:
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False |
def _apply_worksheet_template_duplicate_analyses(self, wst):
"""Add duplicate analyses to worksheet according to the worksheet template
layout passed in w/o overwrite slots that are already filled.
If the slot where the duplicate must be located is available, but the
slot where the routine analysis should be found is empty, no duplicate
will be generated for that given slot.
:param wst: worksheet template used as the layout
:returns: None
"""
wst_layout = wst.getLayout()
for row in wst_layout:
if row['type'] != 'd':
continue
src_pos = to_int(row['dup'])
dest_pos = to_int(row['pos'])
self.addDuplicateAnalyses(src_pos, dest_pos) | Add duplicate analyses to worksheet according to the worksheet template
layout passed in w/o overwrite slots that are already filled.
If the slot where the duplicate must be located is available, but the
slot where the routine analysis should be found is empty, no duplicate
will be generated for that given slot.
:param wst: worksheet template used as the layout
:returns: None | Below is the the instruction that describes the task:
### Input:
Add duplicate analyses to worksheet according to the worksheet template
layout passed in w/o overwrite slots that are already filled.
If the slot where the duplicate must be located is available, but the
slot where the routine analysis should be found is empty, no duplicate
will be generated for that given slot.
:param wst: worksheet template used as the layout
:returns: None
### Response:
def _apply_worksheet_template_duplicate_analyses(self, wst):
"""Add duplicate analyses to worksheet according to the worksheet template
layout passed in w/o overwrite slots that are already filled.
If the slot where the duplicate must be located is available, but the
slot where the routine analysis should be found is empty, no duplicate
will be generated for that given slot.
:param wst: worksheet template used as the layout
:returns: None
"""
wst_layout = wst.getLayout()
for row in wst_layout:
if row['type'] != 'd':
continue
src_pos = to_int(row['dup'])
dest_pos = to_int(row['pos'])
self.addDuplicateAnalyses(src_pos, dest_pos) |
def orderedclasses(self, set_uri_or_id=None, nestedhierarchy=False):
"""Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`"""
classes = self.classes(set_uri_or_id, nestedhierarchy)
for classid in self.classorder(classes):
yield classes[classid] | Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder` | Below is the the instruction that describes the task:
### Input:
Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`
### Response:
def orderedclasses(self, set_uri_or_id=None, nestedhierarchy=False):
"""Higher-order generator function that yields class information in the right order, combines calls to :meth:`SetDefinition.classes` and :meth:`SetDefinition.classorder`"""
classes = self.classes(set_uri_or_id, nestedhierarchy)
for classid in self.classorder(classes):
yield classes[classid] |
def onscroll(self, event):
"""Action to be taken when an event is triggered.
Event is scroll of the mouse's wheel. This leads to changing the temporal frame displayed.
:param event: Scroll of mouse wheel
"""
if event.button == 'up':
self.ind = (self.ind + 1) % self.slices
else:
self.ind = (self.ind - 1) % self.slices
self.update() | Action to be taken when an event is triggered.
Event is scroll of the mouse's wheel. This leads to changing the temporal frame displayed.
:param event: Scroll of mouse wheel | Below is the the instruction that describes the task:
### Input:
Action to be taken when an event is triggered.
Event is scroll of the mouse's wheel. This leads to changing the temporal frame displayed.
:param event: Scroll of mouse wheel
### Response:
def onscroll(self, event):
"""Action to be taken when an event is triggered.
Event is scroll of the mouse's wheel. This leads to changing the temporal frame displayed.
:param event: Scroll of mouse wheel
"""
if event.button == 'up':
self.ind = (self.ind + 1) % self.slices
else:
self.ind = (self.ind - 1) % self.slices
self.update() |
def get_verified_jwt(
providers, audiences,
check_authorization_header=True, check_query_arg=True,
request=None, cache=memcache):
"""
This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache
"""
if not (check_authorization_header or check_query_arg):
raise ValueError(
'Either check_authorization_header or check_query_arg must be True.')
if check_query_arg and request is None:
raise ValueError(
'Cannot check query arg without request object.')
schemes = ('Bearer',) if check_authorization_header else ()
keys = ('access_token',) if check_query_arg else ()
token = _get_token(
request=request, allowed_auth_schemes=schemes, allowed_query_keys=keys)
if token is None:
return None
time_now = long(time.time())
for provider in providers:
parsed_token = _parse_and_verify_jwt(
token, time_now, (provider['issuer'],), audiences, provider['cert_uri'], cache)
if parsed_token is not None:
return parsed_token
return None | This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache | Below is the the instruction that describes the task:
### Input:
This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache
### Response:
def get_verified_jwt(
providers, audiences,
check_authorization_header=True, check_query_arg=True,
request=None, cache=memcache):
"""
This function will extract, verify, and parse a JWT token from the
Authorization header or access_token query argument.
The JWT is assumed to contain an issuer and audience claim, as well
as issued-at and expiration timestamps. The signature will be
cryptographically verified, the claims and timestamps will be
checked, and the resulting parsed JWT body is returned.
If at any point the JWT is missing or found to be invalid, the
return result will be None.
Arguments:
providers - An iterable of dicts each containing 'issuer' and 'cert_uri' keys
audiences - An iterable of valid audiences
check_authorization_header - Boolean; check 'Authorization: Bearer' header
check_query_arg - Boolean; check 'access_token' query arg
request - Must be the request object if check_query_arg is true; otherwise ignored.
cache - In testing, override the certificate cache
"""
if not (check_authorization_header or check_query_arg):
raise ValueError(
'Either check_authorization_header or check_query_arg must be True.')
if check_query_arg and request is None:
raise ValueError(
'Cannot check query arg without request object.')
schemes = ('Bearer',) if check_authorization_header else ()
keys = ('access_token',) if check_query_arg else ()
token = _get_token(
request=request, allowed_auth_schemes=schemes, allowed_query_keys=keys)
if token is None:
return None
time_now = long(time.time())
for provider in providers:
parsed_token = _parse_and_verify_jwt(
token, time_now, (provider['issuer'],), audiences, provider['cert_uri'], cache)
if parsed_token is not None:
return parsed_token
return None |
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
w
:param str xml_name: specify from which xml to pick the tag from
:param str tag_name: specify the tag name
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
] | Return a list of all the matched tags in a specific xml
w
:param str xml_name: specify from which xml to pick the tag from
:param str tag_name: specify the tag name | Below is the the instruction that describes the task:
### Input:
Return a list of all the matched tags in a specific xml
w
:param str xml_name: specify from which xml to pick the tag from
:param str tag_name: specify the tag name
### Response:
def find_tags_from_xml(
self, xml_name, tag_name, **attribute_filter
):
"""
Return a list of all the matched tags in a specific xml
w
:param str xml_name: specify from which xml to pick the tag from
:param str tag_name: specify the tag name
"""
xml = self.xml[xml_name]
if xml is None:
return []
if xml.tag == tag_name:
if self.is_tag_matched(
xml.tag, **attribute_filter
):
return [xml]
return []
tags = xml.findall(".//" + tag_name)
return [
tag for tag in tags if self.is_tag_matched(
tag, **attribute_filter
)
] |
def get_versions(name,
default_string=DEFAULT_STRING_NOT_FOUND,
default_tuple=DEFAULT_TUPLE_NOT_FOUND,
allow_ambiguous=True):
"""
Get string and tuple versions from installed package information
It will return :attr:`default_string` and :attr:`default_tuple` values when
the named package is not installed.
Parameters
-----------
name : string
An application name used to install via setuptools.
default : string
A default returning value used when the named application is not
installed yet
default_tuple : tuple
A default returning value used when the named application is not
installed yet
allow_ambiguous : boolean
``True`` for allowing ambiguous version information.
Returns
--------
tuple
A version string and version tuple
Examples
--------
>>> import re
>>> v1, v2 = get_versions('app_version', allow_ambiguous=True)
>>> isinstance(v1, str)
True
>>> isinstance(v2, tuple)
True
>>> get_versions('distribution_which_is_not_installed')
('Please install this application with setup.py', (0, 0, 0))
"""
version_string = get_string_version(name, default_string, allow_ambiguous)
version_tuple = get_tuple_version(name, default_tuple, allow_ambiguous)
return version_string, version_tuple | Get string and tuple versions from installed package information
It will return :attr:`default_string` and :attr:`default_tuple` values when
the named package is not installed.
Parameters
-----------
name : string
An application name used to install via setuptools.
default : string
A default returning value used when the named application is not
installed yet
default_tuple : tuple
A default returning value used when the named application is not
installed yet
allow_ambiguous : boolean
``True`` for allowing ambiguous version information.
Returns
--------
tuple
A version string and version tuple
Examples
--------
>>> import re
>>> v1, v2 = get_versions('app_version', allow_ambiguous=True)
>>> isinstance(v1, str)
True
>>> isinstance(v2, tuple)
True
>>> get_versions('distribution_which_is_not_installed')
('Please install this application with setup.py', (0, 0, 0)) | Below is the the instruction that describes the task:
### Input:
Get string and tuple versions from installed package information
It will return :attr:`default_string` and :attr:`default_tuple` values when
the named package is not installed.
Parameters
-----------
name : string
An application name used to install via setuptools.
default : string
A default returning value used when the named application is not
installed yet
default_tuple : tuple
A default returning value used when the named application is not
installed yet
allow_ambiguous : boolean
``True`` for allowing ambiguous version information.
Returns
--------
tuple
A version string and version tuple
Examples
--------
>>> import re
>>> v1, v2 = get_versions('app_version', allow_ambiguous=True)
>>> isinstance(v1, str)
True
>>> isinstance(v2, tuple)
True
>>> get_versions('distribution_which_is_not_installed')
('Please install this application with setup.py', (0, 0, 0))
### Response:
def get_versions(name,
default_string=DEFAULT_STRING_NOT_FOUND,
default_tuple=DEFAULT_TUPLE_NOT_FOUND,
allow_ambiguous=True):
"""
Get string and tuple versions from installed package information
It will return :attr:`default_string` and :attr:`default_tuple` values when
the named package is not installed.
Parameters
-----------
name : string
An application name used to install via setuptools.
default : string
A default returning value used when the named application is not
installed yet
default_tuple : tuple
A default returning value used when the named application is not
installed yet
allow_ambiguous : boolean
``True`` for allowing ambiguous version information.
Returns
--------
tuple
A version string and version tuple
Examples
--------
>>> import re
>>> v1, v2 = get_versions('app_version', allow_ambiguous=True)
>>> isinstance(v1, str)
True
>>> isinstance(v2, tuple)
True
>>> get_versions('distribution_which_is_not_installed')
('Please install this application with setup.py', (0, 0, 0))
"""
version_string = get_string_version(name, default_string, allow_ambiguous)
version_tuple = get_tuple_version(name, default_tuple, allow_ambiguous)
return version_string, version_tuple |
def reference_preprocessing(job, samples, config):
"""
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
"""
job.fileStore.logToMaster('Processed reference files')
config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv()
config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv()
job.addFollowOnJobFn(map_job, download_sample, samples, config) | Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information | Below is the the instruction that describes the task:
### Input:
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
### Response:
def reference_preprocessing(job, samples, config):
"""
Spawn the jobs that create index and dict file for reference
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param list[list] samples: A nested list of samples containing sample information
"""
job.fileStore.logToMaster('Processed reference files')
config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv()
config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv()
job.addFollowOnJobFn(map_job, download_sample, samples, config) |
def get_state_variable_from_storage(
self, address: str, params: Optional[List[str]] = None
) -> str:
"""
Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value
"""
params = params or []
(position, length, mappings) = (0, 1, [])
try:
if params[0] == "mapping":
if len(params) < 3:
raise CriticalError("Invalid number of parameters.")
position = int(params[1])
position_formatted = utils.zpad(utils.int_to_big_endian(position), 32)
for i in range(2, len(params)):
key = bytes(params[i], "utf8")
key_formatted = utils.rzpad(key, 32)
mappings.append(
int.from_bytes(
utils.sha3(key_formatted + position_formatted),
byteorder="big",
)
)
length = len(mappings)
if length == 1:
position = mappings[0]
else:
if len(params) >= 4:
raise CriticalError("Invalid number of parameters.")
if len(params) >= 1:
position = int(params[0])
if len(params) >= 2:
length = int(params[1])
if len(params) == 3 and params[2] == "array":
position_formatted = utils.zpad(
utils.int_to_big_endian(position), 32
)
position = int.from_bytes(
utils.sha3(position_formatted), byteorder="big"
)
except ValueError:
raise CriticalError(
"Invalid storage index. Please provide a numeric value."
)
outtxt = []
try:
if length == 1:
outtxt.append(
"{}: {}".format(
position, self.eth.eth_getStorageAt(address, position)
)
)
else:
if len(mappings) > 0:
for i in range(0, len(mappings)):
position = mappings[i]
outtxt.append(
"{}: {}".format(
hex(position),
self.eth.eth_getStorageAt(address, position),
)
)
else:
for i in range(position, position + length):
outtxt.append(
"{}: {}".format(
hex(i), self.eth.eth_getStorageAt(address, i)
)
)
except FileNotFoundError as e:
raise CriticalError("IPC error: " + str(e))
except ConnectionError:
raise CriticalError(
"Could not connect to RPC server. "
"Make sure that your node is running and that RPC parameters are set correctly."
)
return "\n".join(outtxt) | Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value | Below is the the instruction that describes the task:
### Input:
Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value
### Response:
def get_state_variable_from_storage(
self, address: str, params: Optional[List[str]] = None
) -> str:
"""
Get variables from the storage
:param address: The contract address
:param params: The list of parameters
param types: [position, length] or ["mapping", position, key1, key2, ... ]
or [position, length, array]
:return: The corresponding storage slot and its value
"""
params = params or []
(position, length, mappings) = (0, 1, [])
try:
if params[0] == "mapping":
if len(params) < 3:
raise CriticalError("Invalid number of parameters.")
position = int(params[1])
position_formatted = utils.zpad(utils.int_to_big_endian(position), 32)
for i in range(2, len(params)):
key = bytes(params[i], "utf8")
key_formatted = utils.rzpad(key, 32)
mappings.append(
int.from_bytes(
utils.sha3(key_formatted + position_formatted),
byteorder="big",
)
)
length = len(mappings)
if length == 1:
position = mappings[0]
else:
if len(params) >= 4:
raise CriticalError("Invalid number of parameters.")
if len(params) >= 1:
position = int(params[0])
if len(params) >= 2:
length = int(params[1])
if len(params) == 3 and params[2] == "array":
position_formatted = utils.zpad(
utils.int_to_big_endian(position), 32
)
position = int.from_bytes(
utils.sha3(position_formatted), byteorder="big"
)
except ValueError:
raise CriticalError(
"Invalid storage index. Please provide a numeric value."
)
outtxt = []
try:
if length == 1:
outtxt.append(
"{}: {}".format(
position, self.eth.eth_getStorageAt(address, position)
)
)
else:
if len(mappings) > 0:
for i in range(0, len(mappings)):
position = mappings[i]
outtxt.append(
"{}: {}".format(
hex(position),
self.eth.eth_getStorageAt(address, position),
)
)
else:
for i in range(position, position + length):
outtxt.append(
"{}: {}".format(
hex(i), self.eth.eth_getStorageAt(address, i)
)
)
except FileNotFoundError as e:
raise CriticalError("IPC error: " + str(e))
except ConnectionError:
raise CriticalError(
"Could not connect to RPC server. "
"Make sure that your node is running and that RPC parameters are set correctly."
)
return "\n".join(outtxt) |
def set_irc_targets(self, bot, *targets):
"""Add a irc Handler using bot and log to targets (can be nicks or
channels:
..
>>> bot = None
.. code-block:: python
>>> log = logging.getLogger('irc.mymodule')
>>> log.set_irc_targets(bot, '#chan', 'admin')
"""
# get formatter initialized by config (usualy on a NullHandler)
ll = logging.getLogger('irc')
formatter = ll.handlers[0].formatter
# add a handler for the sub logger
handler = Handler(bot, *targets)
handler.setFormatter(formatter)
self.addHandler(handler) | Add a irc Handler using bot and log to targets (can be nicks or
channels:
..
>>> bot = None
.. code-block:: python
>>> log = logging.getLogger('irc.mymodule')
>>> log.set_irc_targets(bot, '#chan', 'admin') | Below is the the instruction that describes the task:
### Input:
Add a irc Handler using bot and log to targets (can be nicks or
channels:
..
>>> bot = None
.. code-block:: python
>>> log = logging.getLogger('irc.mymodule')
>>> log.set_irc_targets(bot, '#chan', 'admin')
### Response:
def set_irc_targets(self, bot, *targets):
"""Add a irc Handler using bot and log to targets (can be nicks or
channels:
..
>>> bot = None
.. code-block:: python
>>> log = logging.getLogger('irc.mymodule')
>>> log.set_irc_targets(bot, '#chan', 'admin')
"""
# get formatter initialized by config (usualy on a NullHandler)
ll = logging.getLogger('irc')
formatter = ll.handlers[0].formatter
# add a handler for the sub logger
handler = Handler(bot, *targets)
handler.setFormatter(formatter)
self.addHandler(handler) |
def load(self, client, webpy_app, course_factory, task_factory, database, user_manager, submission_manager, config):
""" Loads the plugin manager. Must be done after the initialisation of the client """
self._app = webpy_app
self._task_factory = task_factory
self._database = database
self._user_manager = user_manager
self._submission_manager = submission_manager
self._loaded = True
for entry in config:
module = importlib.import_module(entry["plugin_module"])
module.init(self, course_factory, client, entry) | Loads the plugin manager. Must be done after the initialisation of the client | Below is the the instruction that describes the task:
### Input:
Loads the plugin manager. Must be done after the initialisation of the client
### Response:
def load(self, client, webpy_app, course_factory, task_factory, database, user_manager, submission_manager, config):
""" Loads the plugin manager. Must be done after the initialisation of the client """
self._app = webpy_app
self._task_factory = task_factory
self._database = database
self._user_manager = user_manager
self._submission_manager = submission_manager
self._loaded = True
for entry in config:
module = importlib.import_module(entry["plugin_module"])
module.init(self, course_factory, client, entry) |
def temperature_effectiveness_TEMA_J(R1, NTU1, Ntp):
r'''Returns temperature effectiveness `P1` of a TEMA J type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. The supported cases are as follows:
* One tube pass (shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Four tube passes (shell fluid mixed, tube pass mixed between passes)
For 1-1 TEMA J shell and tube exchangers, shell and tube fluids mixed:
.. math::
P_1 = \frac{1}{R_1}\left[1- \frac{(2-R_1)(2E + R_1 B)}{(2+R_1)
(2E - R_1/B)}\right]
For 1-2 TEMA J, shell and tube fluids mixed. There are two possible
arrangements for the flow and the number of tube passes, but the equation
is the same in both:
.. math::
P_1 = \left[1 + \frac{R_1}{2} + \lambda B - 2\lambda C D\right]^{-1}
B = \frac{(A^\lambda +1)}{A^\lambda -1}
C = \frac{A^{(1 + \lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
\lambda = (1 + R_1^2/4)^{0.5}
For 1-4 TEMA J, shell and tube exchanger with both sides mixed:
.. math::
P_1 = \left[1 + \frac{R_1}{4}\left(\frac{1+3E}{1+E}\right) + \lambda B
- 2 \lambda C D\right]^{-1}
B = \frac{A^\lambda +1}{A^\lambda -1}
C = \frac{A^{(1+\lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
E = \exp(R_1 NTU_1/2)
\lambda = (1 + R_1^2/16)^{0.5}
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, 2, or 4, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes that are not 1, 2, or 4, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=1)
0.5699085193651295
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
if Ntp == 1:
A = exp(NTU1)
B = exp(-NTU1*R1/2.)
if R1 != 2:
P1 = 1./R1*(1. - (2. - R1)*(2.*A + R1*B)/(2. + R1)/(2.*A - R1/B))
else:
P1 = 0.5*(1. - (1. + A**-2)/2./(1. + NTU1))
elif Ntp == 2:
lambda1 = (1. + R1*R1/4.)**0.5
A = exp(NTU1)
D = 1. + lambda1*A**((lambda1 - 1.)/2.)/(A**lambda1 - 1.)
C = A**((1+lambda1)/2.)/(lambda1 - 1. + (1. + lambda1)*A**lambda1)
B = (A**lambda1 + 1.)/(A**lambda1 - 1.)
P1 = 1./(1. + R1/2. + lambda1*B - 2.*lambda1*C*D)
elif Ntp == 4:
lambda1 = (1. + R1**2/16.)**0.5
E = exp(R1*NTU1/2.)
A = exp(NTU1)
D = 1. + lambda1*A**((lambda1-1)/2.)/(A**lambda1-1.)
C = A**((1+lambda1)/2.)/(lambda1 - 1. + (1. + lambda1)*A**lambda1)
B = (A**lambda1 + 1.)/(A**lambda1-1)
P1 = 1./(1. + R1/4.*(1. + 3.*E)/(1. + E) + lambda1*B - 2.*lambda1*C*D)
else:
raise Exception('Supported numbers of tube passes are 1, 2, and 4.')
return P1 | r'''Returns temperature effectiveness `P1` of a TEMA J type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. The supported cases are as follows:
* One tube pass (shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Four tube passes (shell fluid mixed, tube pass mixed between passes)
For 1-1 TEMA J shell and tube exchangers, shell and tube fluids mixed:
.. math::
P_1 = \frac{1}{R_1}\left[1- \frac{(2-R_1)(2E + R_1 B)}{(2+R_1)
(2E - R_1/B)}\right]
For 1-2 TEMA J, shell and tube fluids mixed. There are two possible
arrangements for the flow and the number of tube passes, but the equation
is the same in both:
.. math::
P_1 = \left[1 + \frac{R_1}{2} + \lambda B - 2\lambda C D\right]^{-1}
B = \frac{(A^\lambda +1)}{A^\lambda -1}
C = \frac{A^{(1 + \lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
\lambda = (1 + R_1^2/4)^{0.5}
For 1-4 TEMA J, shell and tube exchanger with both sides mixed:
.. math::
P_1 = \left[1 + \frac{R_1}{4}\left(\frac{1+3E}{1+E}\right) + \lambda B
- 2 \lambda C D\right]^{-1}
B = \frac{A^\lambda +1}{A^\lambda -1}
C = \frac{A^{(1+\lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
E = \exp(R_1 NTU_1/2)
\lambda = (1 + R_1^2/16)^{0.5}
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, 2, or 4, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes that are not 1, 2, or 4, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=1)
0.5699085193651295
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998. | Below is the the instruction that describes the task:
### Input:
r'''Returns temperature effectiveness `P1` of a TEMA J type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. The supported cases are as follows:
* One tube pass (shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Four tube passes (shell fluid mixed, tube pass mixed between passes)
For 1-1 TEMA J shell and tube exchangers, shell and tube fluids mixed:
.. math::
P_1 = \frac{1}{R_1}\left[1- \frac{(2-R_1)(2E + R_1 B)}{(2+R_1)
(2E - R_1/B)}\right]
For 1-2 TEMA J, shell and tube fluids mixed. There are two possible
arrangements for the flow and the number of tube passes, but the equation
is the same in both:
.. math::
P_1 = \left[1 + \frac{R_1}{2} + \lambda B - 2\lambda C D\right]^{-1}
B = \frac{(A^\lambda +1)}{A^\lambda -1}
C = \frac{A^{(1 + \lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
\lambda = (1 + R_1^2/4)^{0.5}
For 1-4 TEMA J, shell and tube exchanger with both sides mixed:
.. math::
P_1 = \left[1 + \frac{R_1}{4}\left(\frac{1+3E}{1+E}\right) + \lambda B
- 2 \lambda C D\right]^{-1}
B = \frac{A^\lambda +1}{A^\lambda -1}
C = \frac{A^{(1+\lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
E = \exp(R_1 NTU_1/2)
\lambda = (1 + R_1^2/16)^{0.5}
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, 2, or 4, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes that are not 1, 2, or 4, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=1)
0.5699085193651295
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
### Response:
def temperature_effectiveness_TEMA_J(R1, NTU1, Ntp):
r'''Returns temperature effectiveness `P1` of a TEMA J type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. The supported cases are as follows:
* One tube pass (shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Four tube passes (shell fluid mixed, tube pass mixed between passes)
For 1-1 TEMA J shell and tube exchangers, shell and tube fluids mixed:
.. math::
P_1 = \frac{1}{R_1}\left[1- \frac{(2-R_1)(2E + R_1 B)}{(2+R_1)
(2E - R_1/B)}\right]
For 1-2 TEMA J, shell and tube fluids mixed. There are two possible
arrangements for the flow and the number of tube passes, but the equation
is the same in both:
.. math::
P_1 = \left[1 + \frac{R_1}{2} + \lambda B - 2\lambda C D\right]^{-1}
B = \frac{(A^\lambda +1)}{A^\lambda -1}
C = \frac{A^{(1 + \lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
\lambda = (1 + R_1^2/4)^{0.5}
For 1-4 TEMA J, shell and tube exchanger with both sides mixed:
.. math::
P_1 = \left[1 + \frac{R_1}{4}\left(\frac{1+3E}{1+E}\right) + \lambda B
- 2 \lambda C D\right]^{-1}
B = \frac{A^\lambda +1}{A^\lambda -1}
C = \frac{A^{(1+\lambda)/2}}{\lambda - 1 + (1 + \lambda)A^\lambda}
D = 1 + \frac{\lambda A^{(\lambda-1)/2}}{A^\lambda -1}
A = \exp(NTU_1)
E = \exp(R_1 NTU_1/2)
\lambda = (1 + R_1^2/16)^{0.5}
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, 2, or 4, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes that are not 1, 2, or 4, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_J(R1=1/3., NTU1=1., Ntp=1)
0.5699085193651295
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
if Ntp == 1:
A = exp(NTU1)
B = exp(-NTU1*R1/2.)
if R1 != 2:
P1 = 1./R1*(1. - (2. - R1)*(2.*A + R1*B)/(2. + R1)/(2.*A - R1/B))
else:
P1 = 0.5*(1. - (1. + A**-2)/2./(1. + NTU1))
elif Ntp == 2:
lambda1 = (1. + R1*R1/4.)**0.5
A = exp(NTU1)
D = 1. + lambda1*A**((lambda1 - 1.)/2.)/(A**lambda1 - 1.)
C = A**((1+lambda1)/2.)/(lambda1 - 1. + (1. + lambda1)*A**lambda1)
B = (A**lambda1 + 1.)/(A**lambda1 - 1.)
P1 = 1./(1. + R1/2. + lambda1*B - 2.*lambda1*C*D)
elif Ntp == 4:
lambda1 = (1. + R1**2/16.)**0.5
E = exp(R1*NTU1/2.)
A = exp(NTU1)
D = 1. + lambda1*A**((lambda1-1)/2.)/(A**lambda1-1.)
C = A**((1+lambda1)/2.)/(lambda1 - 1. + (1. + lambda1)*A**lambda1)
B = (A**lambda1 + 1.)/(A**lambda1-1)
P1 = 1./(1. + R1/4.*(1. + 3.*E)/(1. + E) + lambda1*B - 2.*lambda1*C*D)
else:
raise Exception('Supported numbers of tube passes are 1, 2, and 4.')
return P1 |
def associated_parts(self, *args, **kwargs):
"""Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts()
"""
return (
self.parts(category=Category.MODEL, *args, **kwargs),
self.parts(category=Category.INSTANCE, *args, **kwargs)
) | Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts() | Below is the the instruction that describes the task:
### Input:
Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts()
### Response:
def associated_parts(self, *args, **kwargs):
"""Retrieve models and instances belonging to this activity.
This is a convenience method for the :func:`Activity.parts()` method, which is used to retrieve both the
`Category.MODEL` as well as the `Category.INSTANCE` in a tuple.
This call only returns the configured properties in an activity. So properties that are not configured
are not in the returned parts.
If you want to retrieve only the models associated to this task it is better to use:
`task.parts(category=Category.MODEL)`.
See :func:`pykechain.Client.parts` for additional available parameters.
:returns: a tuple(models of :class:`PartSet`, instances of :class:`PartSet`)
Example
-------
>>> task = project.activity('Specify Wheel Diameter')
>>> all_models, all_instances = task.associated_parts()
"""
return (
self.parts(category=Category.MODEL, *args, **kwargs),
self.parts(category=Category.INSTANCE, *args, **kwargs)
) |
def find(self, sub, start=0, end=None):
"""Test if elements contain substring.
Parameters
----------
sub : str
start : int, optional
Index to start searching from.
end : int, optional
Index to stop searching from.
Returns
-------
Series
"""
check_type(sub, str)
check_type(start, int)
check_type(end, int)
if end is not None and start >= end:
raise ValueError('End must be greater than start')
return Series(weld_str_find(self._data.values, sub, start, end),
self._data.index,
weld_to_numpy_dtype(WeldLong()),
self._data.name) | Test if elements contain substring.
Parameters
----------
sub : str
start : int, optional
Index to start searching from.
end : int, optional
Index to stop searching from.
Returns
-------
Series | Below is the the instruction that describes the task:
### Input:
Test if elements contain substring.
Parameters
----------
sub : str
start : int, optional
Index to start searching from.
end : int, optional
Index to stop searching from.
Returns
-------
Series
### Response:
def find(self, sub, start=0, end=None):
"""Test if elements contain substring.
Parameters
----------
sub : str
start : int, optional
Index to start searching from.
end : int, optional
Index to stop searching from.
Returns
-------
Series
"""
check_type(sub, str)
check_type(start, int)
check_type(end, int)
if end is not None and start >= end:
raise ValueError('End must be greater than start')
return Series(weld_str_find(self._data.values, sub, start, end),
self._data.index,
weld_to_numpy_dtype(WeldLong()),
self._data.name) |
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Read HMET WES from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into HmetRecords
with open(path, 'r') as hmetFile:
for line in hmetFile:
sline = line.strip().split()
try:
# Extract data time from record
dateTime = datetime(int(sline[0]), int(sline[1]), int(sline[2]), int(sline[3]))
# Intitialize GSSHAPY HmetRecord object
hmetRecord = HmetRecord(hmetDateTime=dateTime,
barometricPress=sline[4],
relHumidity=sline[5],
totalSkyCover=sline[6],
windSpeed=sline[7],
dryBulbTemp=sline[8],
directRad=sline[9],
globalRad=sline[10])
# Associate HmetRecord with HmetFile
hmetRecord.hmetFile = self
except:
pass | Read HMET WES from File Method | Below is the the instruction that describes the task:
### Input:
Read HMET WES from File Method
### Response:
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Read HMET WES from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into HmetRecords
with open(path, 'r') as hmetFile:
for line in hmetFile:
sline = line.strip().split()
try:
# Extract data time from record
dateTime = datetime(int(sline[0]), int(sline[1]), int(sline[2]), int(sline[3]))
# Intitialize GSSHAPY HmetRecord object
hmetRecord = HmetRecord(hmetDateTime=dateTime,
barometricPress=sline[4],
relHumidity=sline[5],
totalSkyCover=sline[6],
windSpeed=sline[7],
dryBulbTemp=sline[8],
directRad=sline[9],
globalRad=sline[10])
# Associate HmetRecord with HmetFile
hmetRecord.hmetFile = self
except:
pass |
def is_endpoint(G, node, strict=True):
"""
Return True if the node is a "real" endpoint of an edge in the network, \
otherwise False. OSM data includes lots of nodes that exist only as points \
to help streets bend around curves. An end point is a node that either: \
1) is its own neighbor, ie, it self-loops. \
2) or, has no incoming edges or no outgoing edges, ie, all its incident \
edges point inward or all its incident edges point outward. \
3) or, it does not have exactly two neighbors and degree of 2 or 4. \
4) or, if strict mode is false, if its edges have different OSM IDs. \
Parameters
----------
G : networkx multidigraph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules \
but have edges with different OSM IDs
Returns
-------
bool
"""
neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))
n = len(neighbors)
d = G.degree(node)
if node in neighbors:
# if the node appears in its list of neighbors, it self-loops. this is
# always an endpoint.
return True
# if node has no incoming edges or no outgoing edges, it must be an endpoint
elif G.out_degree(node)==0 or G.in_degree(node)==0:
return True
elif not (n==2 and (d==2 or d==4)):
# else, if it does NOT have 2 neighbors AND either 2 or 4 directed
# edges, it is an endpoint. either it has 1 or 3+ neighbors, in which
# case it is a dead-end or an intersection of multiple streets or it has
# 2 neighbors but 3 degree (indicating a change from oneway to twoway)
# or more than 4 degree (indicating a parallel edge) and thus is an
# endpoint
return True
elif not strict:
# non-strict mode
osmids = []
# add all the edge OSM IDs for incoming edges
for u in G.predecessors(node):
for key in G[u][node]:
osmids.append(G.edges[u, node, key]['osmid'])
# add all the edge OSM IDs for outgoing edges
for v in G.successors(node):
for key in G[node][v]:
osmids.append(G.edges[node, v, key]['osmid'])
# if there is more than 1 OSM ID in the list of edge OSM IDs then it is
# an endpoint, if not, it isn't
return len(set(osmids)) > 1
else:
# if none of the preceding rules returned true, then it is not an endpoint
return False | Return True if the node is a "real" endpoint of an edge in the network, \
otherwise False. OSM data includes lots of nodes that exist only as points \
to help streets bend around curves. An end point is a node that either: \
1) is its own neighbor, ie, it self-loops. \
2) or, has no incoming edges or no outgoing edges, ie, all its incident \
edges point inward or all its incident edges point outward. \
3) or, it does not have exactly two neighbors and degree of 2 or 4. \
4) or, if strict mode is false, if its edges have different OSM IDs. \
Parameters
----------
G : networkx multidigraph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules \
but have edges with different OSM IDs
Returns
-------
bool | Below is the the instruction that describes the task:
### Input:
Return True if the node is a "real" endpoint of an edge in the network, \
otherwise False. OSM data includes lots of nodes that exist only as points \
to help streets bend around curves. An end point is a node that either: \
1) is its own neighbor, ie, it self-loops. \
2) or, has no incoming edges or no outgoing edges, ie, all its incident \
edges point inward or all its incident edges point outward. \
3) or, it does not have exactly two neighbors and degree of 2 or 4. \
4) or, if strict mode is false, if its edges have different OSM IDs. \
Parameters
----------
G : networkx multidigraph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules \
but have edges with different OSM IDs
Returns
-------
bool
### Response:
def is_endpoint(G, node, strict=True):
"""
Return True if the node is a "real" endpoint of an edge in the network, \
otherwise False. OSM data includes lots of nodes that exist only as points \
to help streets bend around curves. An end point is a node that either: \
1) is its own neighbor, ie, it self-loops. \
2) or, has no incoming edges or no outgoing edges, ie, all its incident \
edges point inward or all its incident edges point outward. \
3) or, it does not have exactly two neighbors and degree of 2 or 4. \
4) or, if strict mode is false, if its edges have different OSM IDs. \
Parameters
----------
G : networkx multidigraph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules \
but have edges with different OSM IDs
Returns
-------
bool
"""
neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))
n = len(neighbors)
d = G.degree(node)
if node in neighbors:
# if the node appears in its list of neighbors, it self-loops. this is
# always an endpoint.
return True
# if node has no incoming edges or no outgoing edges, it must be an endpoint
elif G.out_degree(node)==0 or G.in_degree(node)==0:
return True
elif not (n==2 and (d==2 or d==4)):
# else, if it does NOT have 2 neighbors AND either 2 or 4 directed
# edges, it is an endpoint. either it has 1 or 3+ neighbors, in which
# case it is a dead-end or an intersection of multiple streets or it has
# 2 neighbors but 3 degree (indicating a change from oneway to twoway)
# or more than 4 degree (indicating a parallel edge) and thus is an
# endpoint
return True
elif not strict:
# non-strict mode
osmids = []
# add all the edge OSM IDs for incoming edges
for u in G.predecessors(node):
for key in G[u][node]:
osmids.append(G.edges[u, node, key]['osmid'])
# add all the edge OSM IDs for outgoing edges
for v in G.successors(node):
for key in G[node][v]:
osmids.append(G.edges[node, v, key]['osmid'])
# if there is more than 1 OSM ID in the list of edge OSM IDs then it is
# an endpoint, if not, it isn't
return len(set(osmids)) > 1
else:
# if none of the preceding rules returned true, then it is not an endpoint
return False |
def get_version(cls, name):
""" Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
# Dates can confuse th
# is stuff, so we'll check for that first and remove it from the string if found
try:
date = cls.get_date(name)
date = date['datetime'].strftime(date['format'])
except TypeError:
pass
return cls.get_version_naive(name, ignore=date or '') | Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches | Below is the the instruction that describes the task:
### Input:
Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
### Response:
def get_version(cls, name):
""" Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
# Dates can confuse th
# is stuff, so we'll check for that first and remove it from the string if found
try:
date = cls.get_date(name)
date = date['datetime'].strftime(date['format'])
except TypeError:
pass
return cls.get_version_naive(name, ignore=date or '') |
def load():
"""Load the active experiment."""
initialize_experiment_package(os.getcwd())
try:
try:
from dallinger_experiment import experiment
except ImportError:
from dallinger_experiment import dallinger_experiment as experiment
classes = inspect.getmembers(experiment, inspect.isclass)
for name, c in classes:
if "Experiment" in c.__bases__[0].__name__:
return c
else:
raise ImportError
except ImportError:
logger.error("Could not import experiment.")
raise | Load the active experiment. | Below is the the instruction that describes the task:
### Input:
Load the active experiment.
### Response:
def load():
"""Load the active experiment."""
initialize_experiment_package(os.getcwd())
try:
try:
from dallinger_experiment import experiment
except ImportError:
from dallinger_experiment import dallinger_experiment as experiment
classes = inspect.getmembers(experiment, inspect.isclass)
for name, c in classes:
if "Experiment" in c.__bases__[0].__name__:
return c
else:
raise ImportError
except ImportError:
logger.error("Could not import experiment.")
raise |
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget) | Set limit on the number of propagations. | Below is the the instruction that describes the task:
### Input:
Set limit on the number of propagations.
### Response:
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget) |
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list', []):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes | Gets the name of all recipes present in the run_list of a node | Below is the the instruction that describes the task:
### Input:
Gets the name of all recipes present in the run_list of a node
### Response:
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list', []):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes |
def createcommittee(ctx, url, account):
""" Setup a committee account for your account
"""
print_tx(ctx.bitshares.create_committee_member(url, account=account)) | Setup a committee account for your account | Below is the the instruction that describes the task:
### Input:
Setup a committee account for your account
### Response:
def createcommittee(ctx, url, account):
""" Setup a committee account for your account
"""
print_tx(ctx.bitshares.create_committee_member(url, account=account)) |
def do_heavy_work(self, block):
"""
Expects Compressor Block like objects
"""
cipher_key = self.gen_key(32)
in_file_path = block.latest_file_info.path
dst_file_path = block.processed_data_file_info.path + self.get_extension()
self.log.debug("Encrypting file '%s' with key '%s' to file '%s'",
in_file_path, cipher_key, dst_file_path)
self.encrypt_file(key=cipher_key,
in_filename=in_file_path,
out_filename=dst_file_path)
block.cipher_key = cipher_key
block.ciphered_file_info = FileInfo(dst_file_path)
block.latest_file_info = block.ciphered_file_info
return block | Expects Compressor Block like objects | Below is the the instruction that describes the task:
### Input:
Expects Compressor Block like objects
### Response:
def do_heavy_work(self, block):
"""
Expects Compressor Block like objects
"""
cipher_key = self.gen_key(32)
in_file_path = block.latest_file_info.path
dst_file_path = block.processed_data_file_info.path + self.get_extension()
self.log.debug("Encrypting file '%s' with key '%s' to file '%s'",
in_file_path, cipher_key, dst_file_path)
self.encrypt_file(key=cipher_key,
in_filename=in_file_path,
out_filename=dst_file_path)
block.cipher_key = cipher_key
block.ciphered_file_info = FileInfo(dst_file_path)
block.latest_file_info = block.ciphered_file_info
return block |
def expand(self, short):
"""Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
"""
data = dict(action='expand', shorturl=short)
jsondata = self._api_request(params=data)
return jsondata['longurl'] | Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error. | Below is the the instruction that describes the task:
### Input:
Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
### Response:
def expand(self, short):
"""Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
"""
data = dict(action='expand', shorturl=short)
jsondata = self._api_request(params=data)
return jsondata['longurl'] |
def _listen(sockets):
"""Main server loop. Listens for incoming events and dispatches them to appropriate chatroom"""
while True:
(i , o, e) = select.select(sockets.keys(),[],[],1)
for socket in i:
if isinstance(sockets[socket], Chatroom):
data_len = sockets[socket].client.Process(1)
if data_len is None or data_len == 0:
raise Exception('Disconnected from server')
#elif sockets[socket] == 'stdio':
# msg = sys.stdin.readline().rstrip('\r\n')
# logger.info('stdin: [%s]' % (msg,))
else:
raise Exception("Unknown socket type: %s" % repr(sockets[socket])) | Main server loop. Listens for incoming events and dispatches them to appropriate chatroom | Below is the the instruction that describes the task:
### Input:
Main server loop. Listens for incoming events and dispatches them to appropriate chatroom
### Response:
def _listen(sockets):
"""Main server loop. Listens for incoming events and dispatches them to appropriate chatroom"""
while True:
(i , o, e) = select.select(sockets.keys(),[],[],1)
for socket in i:
if isinstance(sockets[socket], Chatroom):
data_len = sockets[socket].client.Process(1)
if data_len is None or data_len == 0:
raise Exception('Disconnected from server')
#elif sockets[socket] == 'stdio':
# msg = sys.stdin.readline().rstrip('\r\n')
# logger.info('stdin: [%s]' % (msg,))
else:
raise Exception("Unknown socket type: %s" % repr(sockets[socket])) |
def install_deb(pkgname, url):
"""Install package from custom deb hosted on S3.
Return true if package was installed by this invocation."""
status = run("dpkg-query -W -f='${{Status}}' {p}; true".format(p=pkgname))
if ('installed' not in status) or ('not-installed' in status):
deb = url.rpartition('/')[2]
debtmp = '/tmp/{}'.format(deb)
run("wget --no-check-certificate -qc -O '{}' '{}'".format(debtmp, url))
sudo("dpkg -i '{0}' && rm -f '{0}'".format(debtmp))
return True
else:
return False | Install package from custom deb hosted on S3.
Return true if package was installed by this invocation. | Below is the the instruction that describes the task:
### Input:
Install package from custom deb hosted on S3.
Return true if package was installed by this invocation.
### Response:
def install_deb(pkgname, url):
"""Install package from custom deb hosted on S3.
Return true if package was installed by this invocation."""
status = run("dpkg-query -W -f='${{Status}}' {p}; true".format(p=pkgname))
if ('installed' not in status) or ('not-installed' in status):
deb = url.rpartition('/')[2]
debtmp = '/tmp/{}'.format(deb)
run("wget --no-check-certificate -qc -O '{}' '{}'".format(debtmp, url))
sudo("dpkg -i '{0}' && rm -f '{0}'".format(debtmp))
return True
else:
return False |
def _shift_cells(self, get_cells, get_deltas):
"""Handles cell shifting."""
# Don't do anything when there is an overlay.
if self.lost or self.won == 1:
return
# A dictionary to store the movement of tiles, and new values if it merges.
tile_moved = {}
for y, row in enumerate(self.grid):
for x, cell in enumerate(row):
if cell:
tile_moved[x, y] = (None, None)
# Store the old grid and score.
old_grid = [row[:] for row in self.grid]
old_score = self.score
self.old.append((old_grid, self.score))
if len(self.old) > 10:
self.old.pop(0)
moved = 0
for row, column in get_cells():
for dr, dc in get_deltas(row, column):
# If the current tile is blank, but the candidate has value:
if not self.grid[row][column] and self.grid[dr][dc]:
# Move the candidate to the current tile.
self.grid[row][column], self.grid[dr][dc] = self.grid[dr][dc], 0
moved += 1
tile_moved[dc, dr] = (column, row), None
if self.grid[dr][dc]:
# If the candidate can merge with the current tile:
if self.grid[row][column] == self.grid[dr][dc]:
self.grid[row][column] *= 2
self.grid[dr][dc] = 0
self.score += self.grid[row][column]
self.won += self.grid[row][column] == self.WIN_TILE
tile_moved[dc, dr] = (column, row), self.grid[row][column]
moved += 1
# When hitting a tile we stop trying.
break
# Submit the high score and get the change.
delta = self.manager.got_score(self.score)
free = self.free_cells()
new_tiles = set()
if moved:
# Spawn new tiles if there are holes.
if free:
x, y = random.choice(free)
value = self.grid[y][x] = random.randint(0, 10) and 2 or 4
new_tiles.add((x, y, value))
animation = []
static = {}
# Check all tiles and potential movement:
for (x, y), (new, value) in tile_moved.items():
# If not moved, store as static.
if new is None:
static[x, y] = old_grid[y][x]
else:
# Store the moving tile.
animation.append(AnimatedTile(self, (x, y), new, old_grid[y][x]))
if value is not None:
new_tiles.add(new + (value,))
self.animate(animation, static, self.score - old_score, delta, new_tiles)
else:
self.old.pop()
if not self.has_free_cells() and not self.has_free_moves():
self.lost = True | Handles cell shifting. | Below is the the instruction that describes the task:
### Input:
Handles cell shifting.
### Response:
def _shift_cells(self, get_cells, get_deltas):
"""Handles cell shifting."""
# Don't do anything when there is an overlay.
if self.lost or self.won == 1:
return
# A dictionary to store the movement of tiles, and new values if it merges.
tile_moved = {}
for y, row in enumerate(self.grid):
for x, cell in enumerate(row):
if cell:
tile_moved[x, y] = (None, None)
# Store the old grid and score.
old_grid = [row[:] for row in self.grid]
old_score = self.score
self.old.append((old_grid, self.score))
if len(self.old) > 10:
self.old.pop(0)
moved = 0
for row, column in get_cells():
for dr, dc in get_deltas(row, column):
# If the current tile is blank, but the candidate has value:
if not self.grid[row][column] and self.grid[dr][dc]:
# Move the candidate to the current tile.
self.grid[row][column], self.grid[dr][dc] = self.grid[dr][dc], 0
moved += 1
tile_moved[dc, dr] = (column, row), None
if self.grid[dr][dc]:
# If the candidate can merge with the current tile:
if self.grid[row][column] == self.grid[dr][dc]:
self.grid[row][column] *= 2
self.grid[dr][dc] = 0
self.score += self.grid[row][column]
self.won += self.grid[row][column] == self.WIN_TILE
tile_moved[dc, dr] = (column, row), self.grid[row][column]
moved += 1
# When hitting a tile we stop trying.
break
# Submit the high score and get the change.
delta = self.manager.got_score(self.score)
free = self.free_cells()
new_tiles = set()
if moved:
# Spawn new tiles if there are holes.
if free:
x, y = random.choice(free)
value = self.grid[y][x] = random.randint(0, 10) and 2 or 4
new_tiles.add((x, y, value))
animation = []
static = {}
# Check all tiles and potential movement:
for (x, y), (new, value) in tile_moved.items():
# If not moved, store as static.
if new is None:
static[x, y] = old_grid[y][x]
else:
# Store the moving tile.
animation.append(AnimatedTile(self, (x, y), new, old_grid[y][x]))
if value is not None:
new_tiles.add(new + (value,))
self.animate(animation, static, self.score - old_score, delta, new_tiles)
else:
self.old.pop()
if not self.has_free_cells() and not self.has_free_moves():
self.lost = True |
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain) | Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on. | Below is the the instruction that describes the task:
### Input:
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
### Response:
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain) |
def choice(self, obj):
"""
Overloads the choice method to add the position
of the object in the tree for future sorting.
"""
tree_id = getattr(obj, self.queryset.model._mptt_meta.tree_id_attr, 0)
left = getattr(obj, self.queryset.model._mptt_meta.left_attr, 0)
return super(MPTTModelChoiceIterator,
self).choice(obj) + ((tree_id, left),) | Overloads the choice method to add the position
of the object in the tree for future sorting. | Below is the the instruction that describes the task:
### Input:
Overloads the choice method to add the position
of the object in the tree for future sorting.
### Response:
def choice(self, obj):
"""
Overloads the choice method to add the position
of the object in the tree for future sorting.
"""
tree_id = getattr(obj, self.queryset.model._mptt_meta.tree_id_attr, 0)
left = getattr(obj, self.queryset.model._mptt_meta.left_attr, 0)
return super(MPTTModelChoiceIterator,
self).choice(obj) + ((tree_id, left),) |
def override(base=ABSENT):
"""Mark a method as overriding a corresponding method from superclass.
:param base:
Optional base class from which this method is being overridden.
If provided, it can be a class itself, or its (qualified) name.
.. note::
When overriding a :class:`classmethod`, remember to place ``@override``
above the ``@classmethod`` decorator::
class Foo(Bar):
@override
@classmethod
def florb(cls):
pass
"""
arg = base # ``base`` is just for clean, user-facing argument name
# direct application of the modifier through ``@override``
if inspect.isfunction(arg) or isinstance(arg, NonInstanceMethod):
_OverrideDecorator.maybe_signal_classmethod(arg)
decorator = _OverrideDecorator(None)
return decorator(arg)
# indirect (but simple) application of the modifier through ``@override()``
if arg is ABSENT:
return _OverrideDecorator(None)
# full-blown application, with base class specified
if is_class(arg) or is_string(arg):
return _OverrideDecorator(arg)
raise TypeError("explicit base class for @override "
"must be either a string or a class object") | Mark a method as overriding a corresponding method from superclass.
:param base:
Optional base class from which this method is being overridden.
If provided, it can be a class itself, or its (qualified) name.
.. note::
When overriding a :class:`classmethod`, remember to place ``@override``
above the ``@classmethod`` decorator::
class Foo(Bar):
@override
@classmethod
def florb(cls):
pass | Below is the the instruction that describes the task:
### Input:
Mark a method as overriding a corresponding method from superclass.
:param base:
Optional base class from which this method is being overridden.
If provided, it can be a class itself, or its (qualified) name.
.. note::
When overriding a :class:`classmethod`, remember to place ``@override``
above the ``@classmethod`` decorator::
class Foo(Bar):
@override
@classmethod
def florb(cls):
pass
### Response:
def override(base=ABSENT):
"""Mark a method as overriding a corresponding method from superclass.
:param base:
Optional base class from which this method is being overridden.
If provided, it can be a class itself, or its (qualified) name.
.. note::
When overriding a :class:`classmethod`, remember to place ``@override``
above the ``@classmethod`` decorator::
class Foo(Bar):
@override
@classmethod
def florb(cls):
pass
"""
arg = base # ``base`` is just for clean, user-facing argument name
# direct application of the modifier through ``@override``
if inspect.isfunction(arg) or isinstance(arg, NonInstanceMethod):
_OverrideDecorator.maybe_signal_classmethod(arg)
decorator = _OverrideDecorator(None)
return decorator(arg)
# indirect (but simple) application of the modifier through ``@override()``
if arg is ABSENT:
return _OverrideDecorator(None)
# full-blown application, with base class specified
if is_class(arg) or is_string(arg):
return _OverrideDecorator(arg)
raise TypeError("explicit base class for @override "
"must be either a string or a class object") |
def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None):
"Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges."
start_lr = learn.lr_range(start_lr)
start_lr = np.array(start_lr) if is_listy(start_lr) else start_lr
end_lr = learn.lr_range(end_lr)
end_lr = np.array(end_lr) if is_listy(end_lr) else end_lr
cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
learn.fit(epochs, start_lr, callbacks=[cb], wd=wd) | Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges. | Below is the the instruction that describes the task:
### Input:
Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges.
### Response:
def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None):
"Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges."
start_lr = learn.lr_range(start_lr)
start_lr = np.array(start_lr) if is_listy(start_lr) else start_lr
end_lr = learn.lr_range(end_lr)
end_lr = np.array(end_lr) if is_listy(end_lr) else end_lr
cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
learn.fit(epochs, start_lr, callbacks=[cb], wd=wd) |
def ask_captcha(length=4):
"""Prompts the user for a random string."""
captcha = "".join(random.choice(string.ascii_lowercase) for _ in range(length))
ask_str('Enter the following letters, "%s"' % (captcha), vld=[captcha, captcha.upper()], blk=False) | Prompts the user for a random string. | Below is the the instruction that describes the task:
### Input:
Prompts the user for a random string.
### Response:
def ask_captcha(length=4):
"""Prompts the user for a random string."""
captcha = "".join(random.choice(string.ascii_lowercase) for _ in range(length))
ask_str('Enter the following letters, "%s"' % (captcha), vld=[captcha, captcha.upper()], blk=False) |
def star(n, alpha='faced', center=(1, 1)):
"""
Create the star points of various design matrices
Parameters
----------
n : int
The number of variables in the design
Optional
--------
alpha : str
Available values are 'faced' (default), 'orthogonal', or 'rotatable'
center : array
A 1-by-2 array of integers indicating the number of center points
assigned in each block of the response surface design. Default is
(1, 1).
Returns
-------
H : 2d-array
The star-point portion of the design matrix (i.e. at +/- alpha)
a : scalar
The alpha value to scale the star points with.
Example
-------
::
>>> star(3)
array([[-1., 0., 0.],
[ 1., 0., 0.],
[ 0., -1., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.],
[ 0., 0., 1.]])
"""
# Star points at the center of each face of the factorial
if alpha=='faced':
a = 1
elif alpha=='orthogonal':
nc = 2**n # factorial points
nco = center[0] # center points to factorial
na = 2*n # axial points
nao = center[1] # center points to axial design
# value of alpha in orthogonal design
a = (n*(1 + nao/float(na))/(1 + nco/float(nc)))**0.5
elif alpha=='rotatable':
nc = 2**n # number of factorial points
a = nc**(0.25) # value of alpha in rotatable design
else:
raise ValueError('Invalid value for "alpha": {:}'.format(alpha))
# Create the actual matrix now.
H = np.zeros((2*n, n))
for i in range(n):
H[2*i:2*i+2, i] = [-1, 1]
H *= a
return H, a | Create the star points of various design matrices
Parameters
----------
n : int
The number of variables in the design
Optional
--------
alpha : str
Available values are 'faced' (default), 'orthogonal', or 'rotatable'
center : array
A 1-by-2 array of integers indicating the number of center points
assigned in each block of the response surface design. Default is
(1, 1).
Returns
-------
H : 2d-array
The star-point portion of the design matrix (i.e. at +/- alpha)
a : scalar
The alpha value to scale the star points with.
Example
-------
::
>>> star(3)
array([[-1., 0., 0.],
[ 1., 0., 0.],
[ 0., -1., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.],
[ 0., 0., 1.]]) | Below is the the instruction that describes the task:
### Input:
Create the star points of various design matrices
Parameters
----------
n : int
The number of variables in the design
Optional
--------
alpha : str
Available values are 'faced' (default), 'orthogonal', or 'rotatable'
center : array
A 1-by-2 array of integers indicating the number of center points
assigned in each block of the response surface design. Default is
(1, 1).
Returns
-------
H : 2d-array
The star-point portion of the design matrix (i.e. at +/- alpha)
a : scalar
The alpha value to scale the star points with.
Example
-------
::
>>> star(3)
array([[-1., 0., 0.],
[ 1., 0., 0.],
[ 0., -1., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.],
[ 0., 0., 1.]])
### Response:
def star(n, alpha='faced', center=(1, 1)):
"""
Create the star points of various design matrices
Parameters
----------
n : int
The number of variables in the design
Optional
--------
alpha : str
Available values are 'faced' (default), 'orthogonal', or 'rotatable'
center : array
A 1-by-2 array of integers indicating the number of center points
assigned in each block of the response surface design. Default is
(1, 1).
Returns
-------
H : 2d-array
The star-point portion of the design matrix (i.e. at +/- alpha)
a : scalar
The alpha value to scale the star points with.
Example
-------
::
>>> star(3)
array([[-1., 0., 0.],
[ 1., 0., 0.],
[ 0., -1., 0.],
[ 0., 1., 0.],
[ 0., 0., -1.],
[ 0., 0., 1.]])
"""
# Star points at the center of each face of the factorial
if alpha=='faced':
a = 1
elif alpha=='orthogonal':
nc = 2**n # factorial points
nco = center[0] # center points to factorial
na = 2*n # axial points
nao = center[1] # center points to axial design
# value of alpha in orthogonal design
a = (n*(1 + nao/float(na))/(1 + nco/float(nc)))**0.5
elif alpha=='rotatable':
nc = 2**n # number of factorial points
a = nc**(0.25) # value of alpha in rotatable design
else:
raise ValueError('Invalid value for "alpha": {:}'.format(alpha))
# Create the actual matrix now.
H = np.zeros((2*n, n))
for i in range(n):
H[2*i:2*i+2, i] = [-1, 1]
H *= a
return H, a |
def fit(model, data, n_epochs, opt, crit, metrics=None, callbacks=None, stepper=Stepper,
swa_model=None, swa_start=None, swa_eval_freq=None, visualize=False, **kwargs):
""" Fits a model
Arguments:
model (model): any pytorch module
net = to_gpu(net)
data (ModelData): see ModelData class and subclasses (can be a list)
opts: an optimizer. Example: optim.Adam.
If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes.
n_epochs(int or list): number of epochs (or list of number of epochs)
crit: loss function to optimize. Example: F.cross_entropy
"""
seq_first = kwargs.pop('seq_first', False)
all_val = kwargs.pop('all_val', False)
get_ep_vals = kwargs.pop('get_ep_vals', False)
validate_skip = kwargs.pop('validate_skip', 0)
metrics = metrics or []
callbacks = callbacks or []
avg_mom=0.98
batch_num,avg_loss=0,0.
for cb in callbacks: cb.on_train_begin()
names = ["epoch", "trn_loss", "val_loss"] + [f.__name__ for f in metrics]
if swa_model is not None:
swa_names = ['swa_loss'] + [f'swa_{f.__name__}' for f in metrics]
names += swa_names
# will use this to call evaluate later
swa_stepper = stepper(swa_model, None, crit, **kwargs)
layout = "{!s:10} " * len(names)
if not isinstance(n_epochs, Iterable): n_epochs=[n_epochs]
if not isinstance(data, Iterable): data = [data]
if len(data) == 1: data = data * len(n_epochs)
for cb in callbacks: cb.on_phase_begin()
model_stepper = stepper(model, opt.opt if hasattr(opt,'opt') else opt, crit, **kwargs)
ep_vals = collections.OrderedDict()
tot_epochs = int(np.ceil(np.array(n_epochs).sum()))
cnt_phases = np.array([ep * len(dat.trn_dl) for (ep,dat) in zip(n_epochs,data)]).cumsum()
phase = 0
for epoch in tnrange(tot_epochs, desc='Epoch'):
if phase >= len(n_epochs): break #Sometimes cumulated errors make this append.
model_stepper.reset(True)
cur_data = data[phase]
if hasattr(cur_data, 'trn_sampler'): cur_data.trn_sampler.set_epoch(epoch)
if hasattr(cur_data, 'val_sampler'): cur_data.val_sampler.set_epoch(epoch)
num_batch = len(cur_data.trn_dl)
t = tqdm(iter(cur_data.trn_dl), leave=False, total=num_batch, miniters=0)
if all_val: val_iter = IterBatch(cur_data.val_dl)
for (*x,y) in t:
batch_num += 1
for cb in callbacks: cb.on_batch_begin()
loss = model_stepper.step(V(x),V(y), epoch)
avg_loss = avg_loss * avg_mom + loss * (1-avg_mom)
debias_loss = avg_loss / (1 - avg_mom**batch_num)
t.set_postfix(loss=debias_loss, refresh=False)
stop=False
los = debias_loss if not all_val else [debias_loss] + validate_next(model_stepper,metrics, val_iter)
for cb in callbacks: stop = stop or cb.on_batch_end(los)
if stop: return
if batch_num >= cnt_phases[phase]:
for cb in callbacks: cb.on_phase_end()
phase += 1
if phase >= len(n_epochs):
t.close()
break
for cb in callbacks: cb.on_phase_begin()
if isinstance(opt, LayerOptimizer): model_stepper.opt = opt.opt
if cur_data != data[phase]:
t.close()
break
if not all_val:
vals = validate(model_stepper, cur_data.val_dl, metrics, epoch, seq_first=seq_first, validate_skip = validate_skip)
stop=False
for cb in callbacks: stop = stop or cb.on_epoch_end(vals)
if swa_model is not None:
if (epoch + 1) >= swa_start and ((epoch + 1 - swa_start) % swa_eval_freq == 0 or epoch == tot_epochs - 1):
fix_batchnorm(swa_model, cur_data.trn_dl)
swa_vals = validate(swa_stepper, cur_data.val_dl, metrics, epoch, validate_skip = validate_skip)
vals += swa_vals
if epoch > 0:
print_stats(epoch, [debias_loss] + vals, visualize, prev_val)
else:
print(layout.format(*names))
print_stats(epoch, [debias_loss] + vals, visualize)
prev_val = [debias_loss] + vals
ep_vals = append_stats(ep_vals, epoch, [debias_loss] + vals)
if stop: break
for cb in callbacks: cb.on_train_end()
if get_ep_vals: return vals, ep_vals
else: return vals | Fits a model
Arguments:
model (model): any pytorch module
net = to_gpu(net)
data (ModelData): see ModelData class and subclasses (can be a list)
opts: an optimizer. Example: optim.Adam.
If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes.
n_epochs(int or list): number of epochs (or list of number of epochs)
crit: loss function to optimize. Example: F.cross_entropy | Below is the the instruction that describes the task:
### Input:
Fits a model
Arguments:
model (model): any pytorch module
net = to_gpu(net)
data (ModelData): see ModelData class and subclasses (can be a list)
opts: an optimizer. Example: optim.Adam.
If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes.
n_epochs(int or list): number of epochs (or list of number of epochs)
crit: loss function to optimize. Example: F.cross_entropy
### Response:
def fit(model, data, n_epochs, opt, crit, metrics=None, callbacks=None, stepper=Stepper,
swa_model=None, swa_start=None, swa_eval_freq=None, visualize=False, **kwargs):
""" Fits a model
Arguments:
model (model): any pytorch module
net = to_gpu(net)
data (ModelData): see ModelData class and subclasses (can be a list)
opts: an optimizer. Example: optim.Adam.
If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes.
n_epochs(int or list): number of epochs (or list of number of epochs)
crit: loss function to optimize. Example: F.cross_entropy
"""
seq_first = kwargs.pop('seq_first', False)
all_val = kwargs.pop('all_val', False)
get_ep_vals = kwargs.pop('get_ep_vals', False)
validate_skip = kwargs.pop('validate_skip', 0)
metrics = metrics or []
callbacks = callbacks or []
avg_mom=0.98
batch_num,avg_loss=0,0.
for cb in callbacks: cb.on_train_begin()
names = ["epoch", "trn_loss", "val_loss"] + [f.__name__ for f in metrics]
if swa_model is not None:
swa_names = ['swa_loss'] + [f'swa_{f.__name__}' for f in metrics]
names += swa_names
# will use this to call evaluate later
swa_stepper = stepper(swa_model, None, crit, **kwargs)
layout = "{!s:10} " * len(names)
if not isinstance(n_epochs, Iterable): n_epochs=[n_epochs]
if not isinstance(data, Iterable): data = [data]
if len(data) == 1: data = data * len(n_epochs)
for cb in callbacks: cb.on_phase_begin()
model_stepper = stepper(model, opt.opt if hasattr(opt,'opt') else opt, crit, **kwargs)
ep_vals = collections.OrderedDict()
tot_epochs = int(np.ceil(np.array(n_epochs).sum()))
cnt_phases = np.array([ep * len(dat.trn_dl) for (ep,dat) in zip(n_epochs,data)]).cumsum()
phase = 0
for epoch in tnrange(tot_epochs, desc='Epoch'):
if phase >= len(n_epochs): break #Sometimes cumulated errors make this append.
model_stepper.reset(True)
cur_data = data[phase]
if hasattr(cur_data, 'trn_sampler'): cur_data.trn_sampler.set_epoch(epoch)
if hasattr(cur_data, 'val_sampler'): cur_data.val_sampler.set_epoch(epoch)
num_batch = len(cur_data.trn_dl)
t = tqdm(iter(cur_data.trn_dl), leave=False, total=num_batch, miniters=0)
if all_val: val_iter = IterBatch(cur_data.val_dl)
for (*x,y) in t:
batch_num += 1
for cb in callbacks: cb.on_batch_begin()
loss = model_stepper.step(V(x),V(y), epoch)
avg_loss = avg_loss * avg_mom + loss * (1-avg_mom)
debias_loss = avg_loss / (1 - avg_mom**batch_num)
t.set_postfix(loss=debias_loss, refresh=False)
stop=False
los = debias_loss if not all_val else [debias_loss] + validate_next(model_stepper,metrics, val_iter)
for cb in callbacks: stop = stop or cb.on_batch_end(los)
if stop: return
if batch_num >= cnt_phases[phase]:
for cb in callbacks: cb.on_phase_end()
phase += 1
if phase >= len(n_epochs):
t.close()
break
for cb in callbacks: cb.on_phase_begin()
if isinstance(opt, LayerOptimizer): model_stepper.opt = opt.opt
if cur_data != data[phase]:
t.close()
break
if not all_val:
vals = validate(model_stepper, cur_data.val_dl, metrics, epoch, seq_first=seq_first, validate_skip = validate_skip)
stop=False
for cb in callbacks: stop = stop or cb.on_epoch_end(vals)
if swa_model is not None:
if (epoch + 1) >= swa_start and ((epoch + 1 - swa_start) % swa_eval_freq == 0 or epoch == tot_epochs - 1):
fix_batchnorm(swa_model, cur_data.trn_dl)
swa_vals = validate(swa_stepper, cur_data.val_dl, metrics, epoch, validate_skip = validate_skip)
vals += swa_vals
if epoch > 0:
print_stats(epoch, [debias_loss] + vals, visualize, prev_val)
else:
print(layout.format(*names))
print_stats(epoch, [debias_loss] + vals, visualize)
prev_val = [debias_loss] + vals
ep_vals = append_stats(ep_vals, epoch, [debias_loss] + vals)
if stop: break
for cb in callbacks: cb.on_train_end()
if get_ep_vals: return vals, ep_vals
else: return vals |
def save_protein_pickles_and_reset_protein(self):
"""Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes!"""
self.gene_protein_pickles = {}
for g in tqdm(self.genes):
if g.protein.representative_sequence:
initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id))
g.protein.save_pickle(initproteinpickle)
self.gene_protein_pickles[g.id] = initproteinpickle
g.reset_protein()
else:
g.reset_protein() | Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes! | Below is the the instruction that describes the task:
### Input:
Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes!
### Response:
def save_protein_pickles_and_reset_protein(self):
"""Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the
protein attribute in all genes!"""
self.gene_protein_pickles = {}
for g in tqdm(self.genes):
if g.protein.representative_sequence:
initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id))
g.protein.save_pickle(initproteinpickle)
self.gene_protein_pickles[g.id] = initproteinpickle
g.reset_protein()
else:
g.reset_protein() |
def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items)) | Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3) | Below is the the instruction that describes the task:
### Input:
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
### Response:
def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items)) |
def hide_routemap_holder_route_map_content_set_metric_type_external(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
metric_type = ET.SubElement(set, "metric-type")
external = ET.SubElement(metric_type, "external")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_routemap_holder_route_map_content_set_metric_type_external(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
metric_type = ET.SubElement(set, "metric-type")
external = ET.SubElement(metric_type, "external")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def newround(number, ndigits=None):
"""
See Python 3 documentation: uses Banker's Rounding.
Delegates to the __round__ method if for some reason this exists.
If not, rounds a number to a given precision in decimal digits (default
0 digits). This returns an int when called with one argument,
otherwise the same type as the number. ndigits may be negative.
See the test_round method in future/tests/test_builtins.py for
examples.
"""
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
if PYPY:
# Work around issue #24: round() breaks on PyPy with NumPy's types
if 'numpy' in repr(type(number)):
number = float(number)
if not PY26:
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
else:
d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d) | See Python 3 documentation: uses Banker's Rounding.
Delegates to the __round__ method if for some reason this exists.
If not, rounds a number to a given precision in decimal digits (default
0 digits). This returns an int when called with one argument,
otherwise the same type as the number. ndigits may be negative.
See the test_round method in future/tests/test_builtins.py for
examples. | Below is the the instruction that describes the task:
### Input:
See Python 3 documentation: uses Banker's Rounding.
Delegates to the __round__ method if for some reason this exists.
If not, rounds a number to a given precision in decimal digits (default
0 digits). This returns an int when called with one argument,
otherwise the same type as the number. ndigits may be negative.
See the test_round method in future/tests/test_builtins.py for
examples.
### Response:
def newround(number, ndigits=None):
"""
See Python 3 documentation: uses Banker's Rounding.
Delegates to the __round__ method if for some reason this exists.
If not, rounds a number to a given precision in decimal digits (default
0 digits). This returns an int when called with one argument,
otherwise the same type as the number. ndigits may be negative.
See the test_round method in future/tests/test_builtins.py for
examples.
"""
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
if PYPY:
# Work around issue #24: round() breaks on PyPy with NumPy's types
if 'numpy' in repr(type(number)):
number = float(number)
if not PY26:
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
else:
d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d) |
def dict(self):
'''Returns a dictionary representing this query.'''
d = dict()
d['key'] = str(self.key)
if self.limit is not None:
d['limit'] = self.limit
if self.offset > 0:
d['offset'] = self.offset
if self.offset_key:
d['offset_key'] = str(self.offset_key)
if len(self.filters) > 0:
d['filter'] = [[f.field, f.op, f.value] for f in self.filters]
if len(self.orders) > 0:
d['order'] = [str(o) for o in self.orders]
return d | Returns a dictionary representing this query. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary representing this query.
### Response:
def dict(self):
'''Returns a dictionary representing this query.'''
d = dict()
d['key'] = str(self.key)
if self.limit is not None:
d['limit'] = self.limit
if self.offset > 0:
d['offset'] = self.offset
if self.offset_key:
d['offset_key'] = str(self.offset_key)
if len(self.filters) > 0:
d['filter'] = [[f.field, f.op, f.value] for f in self.filters]
if len(self.orders) > 0:
d['order'] = [str(o) for o in self.orders]
return d |
def toPIL(self, **attribs):
"""
Convert canvas to a PIL image
"""
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil | Convert canvas to a PIL image | Below is the the instruction that describes the task:
### Input:
Convert canvas to a PIL image
### Response:
def toPIL(self, **attribs):
"""
Convert canvas to a PIL image
"""
import PIL.Image
bytes = self.convert("png")
sfile = io.BytesIO(bytes)
pil = PIL.Image.open(sfile)
return pil |
def save(variable, filename):
"""Save variable on given path using Pickle
Args:
variable: what to save
path (str): path of the output
"""
fileObj = open(filename, 'wb')
pickle.dump(variable, fileObj)
fileObj.close() | Save variable on given path using Pickle
Args:
variable: what to save
path (str): path of the output | Below is the the instruction that describes the task:
### Input:
Save variable on given path using Pickle
Args:
variable: what to save
path (str): path of the output
### Response:
def save(variable, filename):
"""Save variable on given path using Pickle
Args:
variable: what to save
path (str): path of the output
"""
fileObj = open(filename, 'wb')
pickle.dump(variable, fileObj)
fileObj.close() |
def _send_with_auth(values, secret_key, url):
"""Send dictionary of JSON serializable `values` as a POST body to `url`
along with `auth_token` that's generated from `secret_key` and `values`
scheduler.auth.create_token expects a JSON serializable payload, so we send
a dictionary. On the receiving end of the POST request, the Flask view will
have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest
and most surefire way to ensure that the payload sent to create_token will
be consistent on both ends is to generate an ImmutableMultiDict using the
werkzeug.Request.
"""
data = urllib.urlencode(values)
# Simulate a Flask request because that is what will be unpacked when the
# request is received on the other side
request = Request.from_values(
content_length=len(data),
input_stream=StringIO(data),
content_type='application/x-www-form-urlencoded',
method='POST')
# Add the auth_token, re-encode, and send
values['auth_token'] = create_token(secret_key, dict(request.form))
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
return json.loads(response.read()) | Send dictionary of JSON serializable `values` as a POST body to `url`
along with `auth_token` that's generated from `secret_key` and `values`
scheduler.auth.create_token expects a JSON serializable payload, so we send
a dictionary. On the receiving end of the POST request, the Flask view will
have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest
and most surefire way to ensure that the payload sent to create_token will
be consistent on both ends is to generate an ImmutableMultiDict using the
werkzeug.Request. | Below is the the instruction that describes the task:
### Input:
Send dictionary of JSON serializable `values` as a POST body to `url`
along with `auth_token` that's generated from `secret_key` and `values`
scheduler.auth.create_token expects a JSON serializable payload, so we send
a dictionary. On the receiving end of the POST request, the Flask view will
have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest
and most surefire way to ensure that the payload sent to create_token will
be consistent on both ends is to generate an ImmutableMultiDict using the
werkzeug.Request.
### Response:
def _send_with_auth(values, secret_key, url):
"""Send dictionary of JSON serializable `values` as a POST body to `url`
along with `auth_token` that's generated from `secret_key` and `values`
scheduler.auth.create_token expects a JSON serializable payload, so we send
a dictionary. On the receiving end of the POST request, the Flask view will
have access to a werkzeug.datastructures.ImmutableMultiDict. The easiest
and most surefire way to ensure that the payload sent to create_token will
be consistent on both ends is to generate an ImmutableMultiDict using the
werkzeug.Request.
"""
data = urllib.urlencode(values)
# Simulate a Flask request because that is what will be unpacked when the
# request is received on the other side
request = Request.from_values(
content_length=len(data),
input_stream=StringIO(data),
content_type='application/x-www-form-urlencoded',
method='POST')
# Add the auth_token, re-encode, and send
values['auth_token'] = create_token(secret_key, dict(request.form))
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
return json.loads(response.read()) |
def resume_writing(self, exc=None):
'''Resume writing.
Successive calls to this method will fails unless
:meth:`pause_writing` is called first.
'''
assert self._paused
self._paused = False
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.done():
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
self.transport.resume_reading()
self._write_from_buffer() | Resume writing.
Successive calls to this method will fails unless
:meth:`pause_writing` is called first. | Below is the the instruction that describes the task:
### Input:
Resume writing.
Successive calls to this method will fails unless
:meth:`pause_writing` is called first.
### Response:
def resume_writing(self, exc=None):
'''Resume writing.
Successive calls to this method will fails unless
:meth:`pause_writing` is called first.
'''
assert self._paused
self._paused = False
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.done():
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
self.transport.resume_reading()
self._write_from_buffer() |
def get_atoms(self, inc_alt_states=False):
"""Returns all atoms in the `Monomer`.
Parameters
----------
inc_alt_states : bool, optional
If `True`, will return `Atoms` for alternate states.
"""
if inc_alt_states:
return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))])
return self.atoms.values() | Returns all atoms in the `Monomer`.
Parameters
----------
inc_alt_states : bool, optional
If `True`, will return `Atoms` for alternate states. | Below is the the instruction that describes the task:
### Input:
Returns all atoms in the `Monomer`.
Parameters
----------
inc_alt_states : bool, optional
If `True`, will return `Atoms` for alternate states.
### Response:
def get_atoms(self, inc_alt_states=False):
"""Returns all atoms in the `Monomer`.
Parameters
----------
inc_alt_states : bool, optional
If `True`, will return `Atoms` for alternate states.
"""
if inc_alt_states:
return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))])
return self.atoms.values() |
def from_spec(spec):
"""
Creates an exploration object from a specification dict.
"""
exploration = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.explorations.explorations
)
assert isinstance(exploration, Exploration)
return exploration | Creates an exploration object from a specification dict. | Below is the the instruction that describes the task:
### Input:
Creates an exploration object from a specification dict.
### Response:
def from_spec(spec):
"""
Creates an exploration object from a specification dict.
"""
exploration = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.explorations.explorations
)
assert isinstance(exploration, Exploration)
return exploration |
def AstroDrizzle(input=None, mdriztab=False, editpars=False, configobj=None,
wcsmap=None, **input_dict):
""" AstroDrizzle command-line interface """
# Support input of filenames from command-line without a parameter name
# then copy this into input_dict for merging with TEAL ConfigObj
# parameters.
# Load any user-specified configobj
if isinstance(configobj, (str, bytes)):
if configobj == 'defaults':
# load "TEAL"-defaults (from ~/.teal/):
configobj = teal.load(__taskname__)
else:
if not os.path.exists(configobj):
raise RuntimeError('Cannot find .cfg file: '+configobj)
configobj = teal.load(configobj, strict=False)
elif configobj is None:
# load 'astrodrizzle' parameter defaults as described in the docs:
configobj = teal.load(__taskname__, defaults=True)
if input and not util.is_blank(input):
input_dict['input'] = input
elif configobj is None:
raise TypeError("AstroDrizzle() needs either 'input' or "
"'configobj' arguments")
if 'updatewcs' in input_dict: # user trying to explicitly turn on updatewcs
configobj['updatewcs'] = input_dict['updatewcs']
del input_dict['updatewcs']
# If called from interactive user-interface, configObj will not be
# defined yet, so get defaults using EPAR/TEAL.
#
# Also insure that the input_dict (user-specified values) are folded in
# with a fully populated configObj instance.
try:
configObj = util.getDefaultConfigObj(__taskname__, configobj,
input_dict,
loadOnly=(not editpars))
log.debug('')
log.debug("INPUT_DICT:")
util.print_cfg(input_dict, log.debug)
log.debug('')
# If user specifies optional parameter for final_wcs specification in input_dict,
# insure that the final_wcs step gets turned on
util.applyUserPars_steps(configObj, input_dict, step='3a')
util.applyUserPars_steps(configObj, input_dict, step='7a')
except ValueError:
print("Problem with input parameters. Quitting...", file=sys.stderr)
return
if not configObj:
return
configObj['mdriztab'] = mdriztab
# If 'editpars' was set to True, util.getDefaultConfigObj() will have
# already called 'run()'.
if not editpars:
run(configObj, wcsmap=wcsmap) | AstroDrizzle command-line interface | Below is the the instruction that describes the task:
### Input:
AstroDrizzle command-line interface
### Response:
def AstroDrizzle(input=None, mdriztab=False, editpars=False, configobj=None,
wcsmap=None, **input_dict):
""" AstroDrizzle command-line interface """
# Support input of filenames from command-line without a parameter name
# then copy this into input_dict for merging with TEAL ConfigObj
# parameters.
# Load any user-specified configobj
if isinstance(configobj, (str, bytes)):
if configobj == 'defaults':
# load "TEAL"-defaults (from ~/.teal/):
configobj = teal.load(__taskname__)
else:
if not os.path.exists(configobj):
raise RuntimeError('Cannot find .cfg file: '+configobj)
configobj = teal.load(configobj, strict=False)
elif configobj is None:
# load 'astrodrizzle' parameter defaults as described in the docs:
configobj = teal.load(__taskname__, defaults=True)
if input and not util.is_blank(input):
input_dict['input'] = input
elif configobj is None:
raise TypeError("AstroDrizzle() needs either 'input' or "
"'configobj' arguments")
if 'updatewcs' in input_dict: # user trying to explicitly turn on updatewcs
configobj['updatewcs'] = input_dict['updatewcs']
del input_dict['updatewcs']
# If called from interactive user-interface, configObj will not be
# defined yet, so get defaults using EPAR/TEAL.
#
# Also insure that the input_dict (user-specified values) are folded in
# with a fully populated configObj instance.
try:
configObj = util.getDefaultConfigObj(__taskname__, configobj,
input_dict,
loadOnly=(not editpars))
log.debug('')
log.debug("INPUT_DICT:")
util.print_cfg(input_dict, log.debug)
log.debug('')
# If user specifies optional parameter for final_wcs specification in input_dict,
# insure that the final_wcs step gets turned on
util.applyUserPars_steps(configObj, input_dict, step='3a')
util.applyUserPars_steps(configObj, input_dict, step='7a')
except ValueError:
print("Problem with input parameters. Quitting...", file=sys.stderr)
return
if not configObj:
return
configObj['mdriztab'] = mdriztab
# If 'editpars' was set to True, util.getDefaultConfigObj() will have
# already called 'run()'.
if not editpars:
run(configObj, wcsmap=wcsmap) |
def activate_cython(self):
"""
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
"""
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True) | Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all. | Below is the the instruction that describes the task:
### Input:
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
### Response:
def activate_cython(self):
"""
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
"""
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True) |
def gather_from_ingredients(wrapped, instance=None, args=None, kwargs=None):
"""
Decorator that calls `_gather` on the instance the wrapped function is
bound to (should be an `Ingredient`) and yields from the returned
generator.
This function is necessary, because `Ingredient._gather` cannot directly be
used as a decorator inside of `Ingredient`.
"""
for item in instance._gather(wrapped):
yield item | Decorator that calls `_gather` on the instance the wrapped function is
bound to (should be an `Ingredient`) and yields from the returned
generator.
This function is necessary, because `Ingredient._gather` cannot directly be
used as a decorator inside of `Ingredient`. | Below is the the instruction that describes the task:
### Input:
Decorator that calls `_gather` on the instance the wrapped function is
bound to (should be an `Ingredient`) and yields from the returned
generator.
This function is necessary, because `Ingredient._gather` cannot directly be
used as a decorator inside of `Ingredient`.
### Response:
def gather_from_ingredients(wrapped, instance=None, args=None, kwargs=None):
"""
Decorator that calls `_gather` on the instance the wrapped function is
bound to (should be an `Ingredient`) and yields from the returned
generator.
This function is necessary, because `Ingredient._gather` cannot directly be
used as a decorator inside of `Ingredient`.
"""
for item in instance._gather(wrapped):
yield item |
async def AddUnits(self, application, num_units, placement):
'''
application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddUnits',
version=5,
params=_params)
_params['application'] = application
_params['num-units'] = num_units
_params['placement'] = placement
reply = await self.rpc(msg)
return reply | application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str] | Below is the the instruction that describes the task:
### Input:
application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str]
### Response:
async def AddUnits(self, application, num_units, placement):
'''
application : str
num_units : int
placement : typing.Sequence[~Placement]
Returns -> typing.Sequence[str]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Application',
request='AddUnits',
version=5,
params=_params)
_params['application'] = application
_params['num-units'] = num_units
_params['placement'] = placement
reply = await self.rpc(msg)
return reply |
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
name = 'Contract'
try:
name = self.Name.decode('utf-8')
except Exception as e:
pass
jsn = {'version': self.StateVersion}
jsn_code = self.Code.ToJson()
jsn_contract = {
'name': name,
'code_version': self.CodeVersion.decode('utf-8'),
'author': self.Author.decode('utf-8'),
'email': self.Email.decode('utf-8'),
'description': self.Description.decode('utf-8'),
'properties': {
'storage': self.HasStorage,
'dynamic_invoke': self.HasDynamicInvoke,
'payable': self.Payable
}
}
jsn.update(jsn_code)
jsn.update(jsn_contract)
if self._nep_token:
jsn['token'] = self._nep_token.ToJson()
return jsn | Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict: | Below is the the instruction that describes the task:
### Input:
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
### Response:
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
name = 'Contract'
try:
name = self.Name.decode('utf-8')
except Exception as e:
pass
jsn = {'version': self.StateVersion}
jsn_code = self.Code.ToJson()
jsn_contract = {
'name': name,
'code_version': self.CodeVersion.decode('utf-8'),
'author': self.Author.decode('utf-8'),
'email': self.Email.decode('utf-8'),
'description': self.Description.decode('utf-8'),
'properties': {
'storage': self.HasStorage,
'dynamic_invoke': self.HasDynamicInvoke,
'payable': self.Payable
}
}
jsn.update(jsn_code)
jsn.update(jsn_contract)
if self._nep_token:
jsn['token'] = self._nep_token.ToJson()
return jsn |
def clean_all(G, settings):
"""
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
"""
quiet = settings["quiet"]
recon = settings["recon"]
sprint = settings["sprint"]
error = settings["error"]
all_outputs = []
for node in G.nodes(data=True):
if "output" in node[1]:
for item in get_all_outputs(node[1]):
all_outputs.append(item)
all_outputs.append(".shastore")
retcode = 0
for item in sorted(all_outputs):
if os.path.isfile(item):
if recon:
sprint("Would remove file: {}".format(item))
continue
sprint("Attempting to remove file '{}'", level="verbose")
try:
os.remove(item)
sprint("Removed file", level="verbose")
except:
errmes = "Error: file '{}' failed to be removed"
error(errmes.format(item))
retcode = 1
if not retcode and not recon:
sprint("All clean", color=True)
return retcode | Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed | Below is the the instruction that describes the task:
### Input:
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
### Response:
def clean_all(G, settings):
"""
Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed
"""
quiet = settings["quiet"]
recon = settings["recon"]
sprint = settings["sprint"]
error = settings["error"]
all_outputs = []
for node in G.nodes(data=True):
if "output" in node[1]:
for item in get_all_outputs(node[1]):
all_outputs.append(item)
all_outputs.append(".shastore")
retcode = 0
for item in sorted(all_outputs):
if os.path.isfile(item):
if recon:
sprint("Would remove file: {}".format(item))
continue
sprint("Attempting to remove file '{}'", level="verbose")
try:
os.remove(item)
sprint("Removed file", level="verbose")
except:
errmes = "Error: file '{}' failed to be removed"
error(errmes.format(item))
retcode = 1
if not retcode and not recon:
sprint("All clean", color=True)
return retcode |
def disable_constant(parameterized):
"""
Temporarily set parameters on Parameterized object to
constant=False.
"""
params = parameterized.params().values()
constants = [p.constant for p in params]
for p in params:
p.constant = False
try:
yield
except:
raise
finally:
for (p, const) in zip(params, constants):
p.constant = const | Temporarily set parameters on Parameterized object to
constant=False. | Below is the the instruction that describes the task:
### Input:
Temporarily set parameters on Parameterized object to
constant=False.
### Response:
def disable_constant(parameterized):
"""
Temporarily set parameters on Parameterized object to
constant=False.
"""
params = parameterized.params().values()
constants = [p.constant for p in params]
for p in params:
p.constant = False
try:
yield
except:
raise
finally:
for (p, const) in zip(params, constants):
p.constant = const |
def add_description(method):
"""Decorator adding the description to the output of a visitor method."""
@wraps(method)
def wrapped(self, node, *args):
return join([node.description, method(self, node, *args)], "\n")
return wrapped | Decorator adding the description to the output of a visitor method. | Below is the the instruction that describes the task:
### Input:
Decorator adding the description to the output of a visitor method.
### Response:
def add_description(method):
"""Decorator adding the description to the output of a visitor method."""
@wraps(method)
def wrapped(self, node, *args):
return join([node.description, method(self, node, *args)], "\n")
return wrapped |
def reset_to_bootloader1(self, cpu_id):
""" Reset to the bootloader
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done and the contact with the
bootloader is established.
"""
# Send an echo request and wait for the answer
# Mainly aim to bypass a bug of the crazyflie firmware that prevents
# reset before normal CRTP communication
pk = CRTPPacket()
pk.port = CRTPPort.LINKCTRL
pk.data = (1, 2, 3) + cpu_id
self.link.send_packet(pk)
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == CRTPPort.LINKCTRL:
break
# Send the reset to bootloader request
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (0xFF, 0xFE) + cpu_id
self.link.send_packet(pk)
# Wait to ack the reset ...
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == 0xFF and tuple(pk.data) == (0xFF, 0xFE) + cpu_id:
pk.data = (0xFF, 0xF0) + cpu_id
self.link.send_packet(pk)
break
time.sleep(0.1)
self.link.close()
self.link = cflib.crtp.get_link_driver(self.clink_address)
# time.sleep(0.1)
return self._update_info() | Reset to the bootloader
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done and the contact with the
bootloader is established. | Below is the the instruction that describes the task:
### Input:
Reset to the bootloader
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done and the contact with the
bootloader is established.
### Response:
def reset_to_bootloader1(self, cpu_id):
""" Reset to the bootloader
The parameter cpuid shall correspond to the device to reset.
Return true if the reset has been done and the contact with the
bootloader is established.
"""
# Send an echo request and wait for the answer
# Mainly aim to bypass a bug of the crazyflie firmware that prevents
# reset before normal CRTP communication
pk = CRTPPacket()
pk.port = CRTPPort.LINKCTRL
pk.data = (1, 2, 3) + cpu_id
self.link.send_packet(pk)
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == CRTPPort.LINKCTRL:
break
# Send the reset to bootloader request
pk = CRTPPacket()
pk.set_header(0xFF, 0xFF)
pk.data = (0xFF, 0xFE) + cpu_id
self.link.send_packet(pk)
# Wait to ack the reset ...
pk = None
while True:
pk = self.link.receive_packet(2)
if not pk:
return False
if pk.port == 0xFF and tuple(pk.data) == (0xFF, 0xFE) + cpu_id:
pk.data = (0xFF, 0xF0) + cpu_id
self.link.send_packet(pk)
break
time.sleep(0.1)
self.link.close()
self.link = cflib.crtp.get_link_driver(self.clink_address)
# time.sleep(0.1)
return self._update_info() |
def dump_key(engine, obj):
"""dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
"""
key = {}
for key_column in obj.Meta.keys:
key_value = getattr(obj, key_column.name, missing)
if key_value is missing:
raise MissingKey("{!r} is missing {}: {!r}".format(
obj, "hash_key" if key_column.hash_key else "range_key",
key_column.name
))
# noinspection PyProtectedMember
key_value = engine._dump(key_column.typedef, key_value)
key[key_column.dynamo_name] = key_value
return key | dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys} | Below is the the instruction that describes the task:
### Input:
dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
### Response:
def dump_key(engine, obj):
"""dump the hash (and range, if there is one) key(s) of an object into
a dynamo-friendly format.
returns {dynamo_name: {type: value} for dynamo_name in hash/range keys}
"""
key = {}
for key_column in obj.Meta.keys:
key_value = getattr(obj, key_column.name, missing)
if key_value is missing:
raise MissingKey("{!r} is missing {}: {!r}".format(
obj, "hash_key" if key_column.hash_key else "range_key",
key_column.name
))
# noinspection PyProtectedMember
key_value = engine._dump(key_column.typedef, key_value)
key[key_column.dynamo_name] = key_value
return key |
def parse_card_transfer_metainfo(protobuf: bytes, deck_version: int) -> dict:
'''decode card_spawn protobuf message and validate it against deck.version
:protobuf - bytes from op_return message
:deck_version - integer
'''
card = CardTransferProto()
card.ParseFromString(protobuf)
if not card.version == deck_version:
raise CardVersionMismatch({'error': 'card version does not match deck version.'})
return {
"version": card.version,
"number_of_decimals": card.number_of_decimals,
"amount": list(card.amount),
"asset_specific_data": card.asset_specific_data
} | decode card_spawn protobuf message and validate it against deck.version
:protobuf - bytes from op_return message
:deck_version - integer | Below is the the instruction that describes the task:
### Input:
decode card_spawn protobuf message and validate it against deck.version
:protobuf - bytes from op_return message
:deck_version - integer
### Response:
def parse_card_transfer_metainfo(protobuf: bytes, deck_version: int) -> dict:
'''decode card_spawn protobuf message and validate it against deck.version
:protobuf - bytes from op_return message
:deck_version - integer
'''
card = CardTransferProto()
card.ParseFromString(protobuf)
if not card.version == deck_version:
raise CardVersionMismatch({'error': 'card version does not match deck version.'})
return {
"version": card.version,
"number_of_decimals": card.number_of_decimals,
"amount": list(card.amount),
"asset_specific_data": card.asset_specific_data
} |
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
data = {}
for idx, var_name in enumerate(self.var_names):
# Use emcee3 syntax, else use emcee2
data[var_name] = (
self.sampler.get_chain()[(..., idx)].T
if hasattr(self.sampler, "get_chain")
else self.sampler.chain[(..., idx)]
)
return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims) | Convert the posterior to an xarray dataset. | Below is the the instruction that describes the task:
### Input:
Convert the posterior to an xarray dataset.
### Response:
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
data = {}
for idx, var_name in enumerate(self.var_names):
# Use emcee3 syntax, else use emcee2
data[var_name] = (
self.sampler.get_chain()[(..., idx)].T
if hasattr(self.sampler, "get_chain")
else self.sampler.chain[(..., idx)]
)
return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims) |
def rename(self, name):
"""
Rename the element.
@param name: A new name for the element.
@type name: basestring
"""
if name is None:
raise Exception("name (%s) not-valid" % (name,))
self.prefix, self.name = splitPrefix(name) | Rename the element.
@param name: A new name for the element.
@type name: basestring | Below is the the instruction that describes the task:
### Input:
Rename the element.
@param name: A new name for the element.
@type name: basestring
### Response:
def rename(self, name):
"""
Rename the element.
@param name: A new name for the element.
@type name: basestring
"""
if name is None:
raise Exception("name (%s) not-valid" % (name,))
self.prefix, self.name = splitPrefix(name) |
def instantiate(self, value_of_n):
"""Instantiates the template"""
template = Cheetah.Template.Template(
self.content,
searchList={'n': value_of_n}
)
template.random_string = random_string
return str(template) | Instantiates the template | Below is the the instruction that describes the task:
### Input:
Instantiates the template
### Response:
def instantiate(self, value_of_n):
"""Instantiates the template"""
template = Cheetah.Template.Template(
self.content,
searchList={'n': value_of_n}
)
template.random_string = random_string
return str(template) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'sentence') and self.sentence is not None:
_dict['sentence'] = self.sentence
if hasattr(self, 'subject') and self.subject is not None:
_dict['subject'] = self.subject._to_dict()
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action._to_dict()
if hasattr(self, 'object') and self.object is not None:
_dict['object'] = self.object._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'sentence') and self.sentence is not None:
_dict['sentence'] = self.sentence
if hasattr(self, 'subject') and self.subject is not None:
_dict['subject'] = self.subject._to_dict()
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action._to_dict()
if hasattr(self, 'object') and self.object is not None:
_dict['object'] = self.object._to_dict()
return _dict |
def assess_fitting_results(basename, cormap_alpha=0.01):
"""Assess the results of a fit based on the .fit and .fir files created by
various programs from the ATSAS suite."""
plt.figure(figsize=(12, 4))
plt.subplot2grid((1, 4), (0, 0), colspan=2)
fir = np.loadtxt(basename + '.fir', skiprows=1) # q, Iexp, Errexp, Ifitted
# do a cormap test to compare the raw data and the model.
pvalf, Cf, cormapf = cormaptest(fir[:, 1], fir[:, 3])
cormapstatusf = ['Reject', 'Accept'][pvalf >= cormap_alpha]
plt.errorbar(fir[:, 0], fir[:, 1], fir[:, 2], None, 'bo-', label='Raw data')
plt.plot(fir[:, 0], fir[:, 3], 'r-', label='Fitted')
chi2 = calc_chi2(fir[:, 1], fir[:, 2], fir[:, 3])
R2 = calc_R2(fir[:, 1], fir[:, 3])
try:
skiprows = 0
while True:
try:
fit = np.loadtxt(basename + '.fit', skiprows=skiprows) # q, Ismoothed, Ifitted
break
except ValueError as ve:
if ve.args[0].startswith('could not convert string to float'):
skiprows += 1
continue
else:
raise
# do a cormap test to compare the raw data to the smoothed data
smoothed = fit[(fit[:, 0] >= fir[:, 0].min()) & (fit[:, 0] <= fir[:, 0].max()), 1]
pvals, Cs, cormaps = cormaptest(fir[:, 1], smoothed)
cormapstatuss = ['Reject', 'Accept'][pvals >= cormap_alpha]
plt.plot(fit[:, 0], fit[:, 1], 'g.-', label='Smoothed, extrapolated')
plt.plot(fit[:, 0], fit[:, 2], 'm-', label='Fitted to smoothed, extrapolated')
except ValueError as ve:
print('Error while loading file: {}.fit: {}'.format(basename, ve))
except FileNotFoundError:
fit = None
cormaps = cormapstatuss = pvals = Cs = None
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.grid(which='both')
if fit is not None:
plt.subplot2grid((1, 4), (0, 2))
plt.imshow(cormaps, cmap='gray', interpolation='nearest')
plt.title('CorMap of the smoothing')
plt.subplot2grid((1, 4), (0, 3))
plt.imshow(cormapf, cmap='gray', interpolation='nearest')
plt.title('CorMap of the fitting')
print('R2: ', R2)
print('Chi2: ', chi2)
if fit is not None:
print('Cormap test of the smoothing: {} (p={}, C={}, N={})'.format(cormapstatuss, pvals, Cs, cormaps.shape[0]))
print('Cormap test of fit: {} (p={}, C={}, N={})'.format(cormapstatusf, pvalf, Cf, cormapf.shape[0])) | Assess the results of a fit based on the .fit and .fir files created by
various programs from the ATSAS suite. | Below is the the instruction that describes the task:
### Input:
Assess the results of a fit based on the .fit and .fir files created by
various programs from the ATSAS suite.
### Response:
def assess_fitting_results(basename, cormap_alpha=0.01):
"""Assess the results of a fit based on the .fit and .fir files created by
various programs from the ATSAS suite."""
plt.figure(figsize=(12, 4))
plt.subplot2grid((1, 4), (0, 0), colspan=2)
fir = np.loadtxt(basename + '.fir', skiprows=1) # q, Iexp, Errexp, Ifitted
# do a cormap test to compare the raw data and the model.
pvalf, Cf, cormapf = cormaptest(fir[:, 1], fir[:, 3])
cormapstatusf = ['Reject', 'Accept'][pvalf >= cormap_alpha]
plt.errorbar(fir[:, 0], fir[:, 1], fir[:, 2], None, 'bo-', label='Raw data')
plt.plot(fir[:, 0], fir[:, 3], 'r-', label='Fitted')
chi2 = calc_chi2(fir[:, 1], fir[:, 2], fir[:, 3])
R2 = calc_R2(fir[:, 1], fir[:, 3])
try:
skiprows = 0
while True:
try:
fit = np.loadtxt(basename + '.fit', skiprows=skiprows) # q, Ismoothed, Ifitted
break
except ValueError as ve:
if ve.args[0].startswith('could not convert string to float'):
skiprows += 1
continue
else:
raise
# do a cormap test to compare the raw data to the smoothed data
smoothed = fit[(fit[:, 0] >= fir[:, 0].min()) & (fit[:, 0] <= fir[:, 0].max()), 1]
pvals, Cs, cormaps = cormaptest(fir[:, 1], smoothed)
cormapstatuss = ['Reject', 'Accept'][pvals >= cormap_alpha]
plt.plot(fit[:, 0], fit[:, 1], 'g.-', label='Smoothed, extrapolated')
plt.plot(fit[:, 0], fit[:, 2], 'm-', label='Fitted to smoothed, extrapolated')
except ValueError as ve:
print('Error while loading file: {}.fit: {}'.format(basename, ve))
except FileNotFoundError:
fit = None
cormaps = cormapstatuss = pvals = Cs = None
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.grid(which='both')
if fit is not None:
plt.subplot2grid((1, 4), (0, 2))
plt.imshow(cormaps, cmap='gray', interpolation='nearest')
plt.title('CorMap of the smoothing')
plt.subplot2grid((1, 4), (0, 3))
plt.imshow(cormapf, cmap='gray', interpolation='nearest')
plt.title('CorMap of the fitting')
print('R2: ', R2)
print('Chi2: ', chi2)
if fit is not None:
print('Cormap test of the smoothing: {} (p={}, C={}, N={})'.format(cormapstatuss, pvals, Cs, cormaps.shape[0]))
print('Cormap test of fit: {} (p={}, C={}, N={})'.format(cormapstatusf, pvalf, Cf, cormapf.shape[0])) |
def bot_config(player_config_path: Path, team: Team) -> 'PlayerConfig':
"""
A function to cover the common case of creating a config for a bot.
"""
bot_config = PlayerConfig()
bot_config.bot = True
bot_config.rlbot_controlled = True
bot_config.team = team.value
bot_config.config_path = str(player_config_path.absolute()) # TODO: Refactor to use Path's
config_bundle = get_bot_config_bundle(bot_config.config_path)
bot_config.name = config_bundle.name
bot_config.loadout_config = load_bot_appearance(config_bundle.get_looks_config(), bot_config.team)
return bot_config | A function to cover the common case of creating a config for a bot. | Below is the the instruction that describes the task:
### Input:
A function to cover the common case of creating a config for a bot.
### Response:
def bot_config(player_config_path: Path, team: Team) -> 'PlayerConfig':
"""
A function to cover the common case of creating a config for a bot.
"""
bot_config = PlayerConfig()
bot_config.bot = True
bot_config.rlbot_controlled = True
bot_config.team = team.value
bot_config.config_path = str(player_config_path.absolute()) # TODO: Refactor to use Path's
config_bundle = get_bot_config_bundle(bot_config.config_path)
bot_config.name = config_bundle.name
bot_config.loadout_config = load_bot_appearance(config_bundle.get_looks_config(), bot_config.team)
return bot_config |
def openXmlDocument(path=None, file_=None, data=None, url=None, mime_type=None):
"""**Factory function**
Will guess what document type is best suited and return the appropriate
document type.
User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter.
:param path: file path in the local filesystem to a document.
:param file_: a file (like) object to a document (must be opened in 'rb' mode')
:param data: the binary data of a document
:param url: the URL of a document
:param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`.
Note that ``mime_tyype`` parameter **must** be provided if you provide the
Open XML document through the ``data`` parameter. Otherwise, if you don't
provide one, we'll try to guess which is the most appropriate using the file
extension.
:return: A subclass of :class:`openxmllib.document.Document`.
"""
if path is not None:
file_ = open(path, 'rb')
elif file_ is not None:
assert hasattr(file_, 'read')
elif url is not None:
file_ = urllib2.urlopen(url)
if mime_type is None:
mime_type = file_.headers.gettype()
elif data is not None:
file_ = cStringIO.StringIO(data)
assert mime_type is not None
else:
raise ValueError("Either path, file_, data, or url should be provided")
# Mime type based document
if mime_type is not None:
for class_ in _document_classes:
if class_.canProcessMime(mime_type):
return class_(file_, mime_type=mime_type)
raise ValueError("%s MIME type is unknown." % mime_type)
else:
assert hasattr(file_, 'name')
for class_ in _document_classes:
if class_.canProcessFilename(file_.name):
return class_(file_, mime_type=mime_type)
raise ValueError("Can't guess mime_type. You should set the mime_type param")
return | **Factory function**
Will guess what document type is best suited and return the appropriate
document type.
User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter.
:param path: file path in the local filesystem to a document.
:param file_: a file (like) object to a document (must be opened in 'rb' mode')
:param data: the binary data of a document
:param url: the URL of a document
:param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`.
Note that ``mime_tyype`` parameter **must** be provided if you provide the
Open XML document through the ``data`` parameter. Otherwise, if you don't
provide one, we'll try to guess which is the most appropriate using the file
extension.
:return: A subclass of :class:`openxmllib.document.Document`. | Below is the the instruction that describes the task:
### Input:
**Factory function**
Will guess what document type is best suited and return the appropriate
document type.
User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter.
:param path: file path in the local filesystem to a document.
:param file_: a file (like) object to a document (must be opened in 'rb' mode')
:param data: the binary data of a document
:param url: the URL of a document
:param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`.
Note that ``mime_tyype`` parameter **must** be provided if you provide the
Open XML document through the ``data`` parameter. Otherwise, if you don't
provide one, we'll try to guess which is the most appropriate using the file
extension.
:return: A subclass of :class:`openxmllib.document.Document`.
### Response:
def openXmlDocument(path=None, file_=None, data=None, url=None, mime_type=None):
"""**Factory function**
Will guess what document type is best suited and return the appropriate
document type.
User must provide either ``path``, ``file_``, ``data`` or ``url`` parameter.
:param path: file path in the local filesystem to a document.
:param file_: a file (like) object to a document (must be opened in 'rb' mode')
:param data: the binary data of a document
:param url: the URL of a document
:param mime_type: mime type if known. One of the known MIME types from :mod:`openxmllib.contenttypes`.
Note that ``mime_tyype`` parameter **must** be provided if you provide the
Open XML document through the ``data`` parameter. Otherwise, if you don't
provide one, we'll try to guess which is the most appropriate using the file
extension.
:return: A subclass of :class:`openxmllib.document.Document`.
"""
if path is not None:
file_ = open(path, 'rb')
elif file_ is not None:
assert hasattr(file_, 'read')
elif url is not None:
file_ = urllib2.urlopen(url)
if mime_type is None:
mime_type = file_.headers.gettype()
elif data is not None:
file_ = cStringIO.StringIO(data)
assert mime_type is not None
else:
raise ValueError("Either path, file_, data, or url should be provided")
# Mime type based document
if mime_type is not None:
for class_ in _document_classes:
if class_.canProcessMime(mime_type):
return class_(file_, mime_type=mime_type)
raise ValueError("%s MIME type is unknown." % mime_type)
else:
assert hasattr(file_, 'name')
for class_ in _document_classes:
if class_.canProcessFilename(file_.name):
return class_(file_, mime_type=mime_type)
raise ValueError("Can't guess mime_type. You should set the mime_type param")
return |
def get_input_shape(self):
"""
Return a list of shape tuples if there are multiple inputs.
Return one shape tuple otherwise.
"""
input = callBigDlFunc(self.bigdl_type, "getInputShape",
self.value)
return self.__process_shape(input) | Return a list of shape tuples if there are multiple inputs.
Return one shape tuple otherwise. | Below is the the instruction that describes the task:
### Input:
Return a list of shape tuples if there are multiple inputs.
Return one shape tuple otherwise.
### Response:
def get_input_shape(self):
"""
Return a list of shape tuples if there are multiple inputs.
Return one shape tuple otherwise.
"""
input = callBigDlFunc(self.bigdl_type, "getInputShape",
self.value)
return self.__process_shape(input) |
def write_end_of_directory(fp, dir_size, dir_offset, count):
"""
Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files
"""
fp.write(struct.pack('I', 0x06054b50)) # central directory header
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('I', dir_size)) # central directory size
fp.write(struct.pack('I', dir_offset)) # central directory offset
fp.write(struct.pack('H', 0)) | Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files | Below is the the instruction that describes the task:
### Input:
Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files
### Response:
def write_end_of_directory(fp, dir_size, dir_offset, count):
"""
Write zip file end of directory header at the current file position
:param fp: the file point to which to write the header
:param dir_size: the total size of the directory
:param dir_offset: the start of the first directory header
:param count: the count of files
"""
fp.write(struct.pack('I', 0x06054b50)) # central directory header
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('H', count)) # number of files
fp.write(struct.pack('I', dir_size)) # central directory size
fp.write(struct.pack('I', dir_offset)) # central directory offset
fp.write(struct.pack('H', 0)) |
def all_sharded_cluster_links(cluster_id, shard_id=None,
router_id=None, rel_to=None):
"""Get a list of all links to be included with ShardedClusters."""
return [
sharded_cluster_link(rel, cluster_id, shard_id, router_id,
self_rel=(rel == rel_to))
for rel in (
'get-sharded-clusters', 'get-sharded-cluster-info',
'sharded-cluster-command', 'delete-sharded-cluster',
'add-shard', 'get-shards', 'get-configsvrs',
'get-routers', 'add-router'
)
] | Get a list of all links to be included with ShardedClusters. | Below is the the instruction that describes the task:
### Input:
Get a list of all links to be included with ShardedClusters.
### Response:
def all_sharded_cluster_links(cluster_id, shard_id=None,
router_id=None, rel_to=None):
"""Get a list of all links to be included with ShardedClusters."""
return [
sharded_cluster_link(rel, cluster_id, shard_id, router_id,
self_rel=(rel == rel_to))
for rel in (
'get-sharded-clusters', 'get-sharded-cluster-info',
'sharded-cluster-command', 'delete-sharded-cluster',
'add-shard', 'get-shards', 'get-configsvrs',
'get-routers', 'add-router'
)
] |
def on_open(self, callback, timeout):
"""
Initialize a new timeout.
:param callback: The callback to execute when the timeout reaches the
end of its life. May be a coroutine function.
:param timeout: The maximum time to wait for, in seconds.
"""
super().on_open()
self.callback = callback
self.timeout = timeout
self.revive_event = asyncio.Event(loop=self.loop) | Initialize a new timeout.
:param callback: The callback to execute when the timeout reaches the
end of its life. May be a coroutine function.
:param timeout: The maximum time to wait for, in seconds. | Below is the the instruction that describes the task:
### Input:
Initialize a new timeout.
:param callback: The callback to execute when the timeout reaches the
end of its life. May be a coroutine function.
:param timeout: The maximum time to wait for, in seconds.
### Response:
def on_open(self, callback, timeout):
"""
Initialize a new timeout.
:param callback: The callback to execute when the timeout reaches the
end of its life. May be a coroutine function.
:param timeout: The maximum time to wait for, in seconds.
"""
super().on_open()
self.callback = callback
self.timeout = timeout
self.revive_event = asyncio.Event(loop=self.loop) |
def check_imts(self, imts):
"""
Make sure the IMTs are recognized by all GSIMs in the logic tree
"""
for trt in self.values:
for gsim in self.values[trt]:
for attr in dir(gsim):
coeffs = getattr(gsim, attr)
if not isinstance(coeffs, CoeffsTable):
continue
for imt in imts:
if imt.startswith('SA'):
try:
coeffs[from_string(imt)]
except KeyError:
raise ValueError(
'%s is out of the period range defined '
'for %s' % (imt, gsim)) | Make sure the IMTs are recognized by all GSIMs in the logic tree | Below is the the instruction that describes the task:
### Input:
Make sure the IMTs are recognized by all GSIMs in the logic tree
### Response:
def check_imts(self, imts):
"""
Make sure the IMTs are recognized by all GSIMs in the logic tree
"""
for trt in self.values:
for gsim in self.values[trt]:
for attr in dir(gsim):
coeffs = getattr(gsim, attr)
if not isinstance(coeffs, CoeffsTable):
continue
for imt in imts:
if imt.startswith('SA'):
try:
coeffs[from_string(imt)]
except KeyError:
raise ValueError(
'%s is out of the period range defined '
'for %s' % (imt, gsim)) |
def assert_requirements(self):
""""Asserts PEP 508 specifiers."""
# Support for 508's implementation_version.
if hasattr(sys, 'implementation'):
implementation_version = format_full_version(sys.implementation.version)
else:
implementation_version = "0"
# Default to cpython for 2.7.
if hasattr(sys, 'implementation'):
implementation_name = sys.implementation.name
else:
implementation_name = 'cpython'
lookup = {
'os_name': os.name,
'sys_platform': sys.platform,
'platform_machine': platform.machine(),
'platform_python_implementation': platform.python_implementation(),
'platform_release': platform.release(),
'platform_system': platform.system(),
'platform_version': platform.version(),
'python_version': platform.python_version()[:3],
'python_full_version': platform.python_version(),
'implementation_name': implementation_name,
'implementation_version': implementation_version
}
# Assert each specified requirement.
for marker, specifier in self.data['_meta']['requires'].items():
if marker in lookup:
try:
assert lookup[marker] == specifier
except AssertionError:
raise AssertionError('Specifier {!r} does not match {!r}.'.format(marker, specifier)) | Asserts PEP 508 specifiers. | Below is the the instruction that describes the task:
### Input:
Asserts PEP 508 specifiers.
### Response:
def assert_requirements(self):
""""Asserts PEP 508 specifiers."""
# Support for 508's implementation_version.
if hasattr(sys, 'implementation'):
implementation_version = format_full_version(sys.implementation.version)
else:
implementation_version = "0"
# Default to cpython for 2.7.
if hasattr(sys, 'implementation'):
implementation_name = sys.implementation.name
else:
implementation_name = 'cpython'
lookup = {
'os_name': os.name,
'sys_platform': sys.platform,
'platform_machine': platform.machine(),
'platform_python_implementation': platform.python_implementation(),
'platform_release': platform.release(),
'platform_system': platform.system(),
'platform_version': platform.version(),
'python_version': platform.python_version()[:3],
'python_full_version': platform.python_version(),
'implementation_name': implementation_name,
'implementation_version': implementation_version
}
# Assert each specified requirement.
for marker, specifier in self.data['_meta']['requires'].items():
if marker in lookup:
try:
assert lookup[marker] == specifier
except AssertionError:
raise AssertionError('Specifier {!r} does not match {!r}.'.format(marker, specifier)) |
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object)) | Return true if the object is any kind of function or method. | Below is the the instruction that describes the task:
### Input:
Return true if the object is any kind of function or method.
### Response:
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object)) |
def OnCellBorderColor(self, event):
"""Cell border color event handler"""
with undo.group(_("Border color")):
self.grid.actions.set_border_attr("bordercolor",
event.color, event.borders)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() | Cell border color event handler | Below is the the instruction that describes the task:
### Input:
Cell border color event handler
### Response:
def OnCellBorderColor(self, event):
"""Cell border color event handler"""
with undo.group(_("Border color")):
self.grid.actions.set_border_attr("bordercolor",
event.color, event.borders)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() |
def dumps(self, blob):
"""
Call json.dumps with the attributes of this instance as
arguments.
"""
return json.dumps(
blob, indent=self.indent, sort_keys=True,
separators=self.separators,
) | Call json.dumps with the attributes of this instance as
arguments. | Below is the the instruction that describes the task:
### Input:
Call json.dumps with the attributes of this instance as
arguments.
### Response:
def dumps(self, blob):
"""
Call json.dumps with the attributes of this instance as
arguments.
"""
return json.dumps(
blob, indent=self.indent, sort_keys=True,
separators=self.separators,
) |
def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None):
"""
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
"""
cmd = 'authorize-guest'
js = {'mac': guest_mac, 'minutes': minutes}
if up_bandwidth:
js['up'] = up_bandwidth
if down_bandwidth:
js['down'] = down_bandwidth
if byte_quota:
js['bytes'] = byte_quota
if ap_mac and self.version != 'v2':
js['ap_mac'] = ap_mac
return self._run_command(cmd, params=js) | Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional) | Below is the the instruction that describes the task:
### Input:
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
### Response:
def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None):
"""
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
"""
cmd = 'authorize-guest'
js = {'mac': guest_mac, 'minutes': minutes}
if up_bandwidth:
js['up'] = up_bandwidth
if down_bandwidth:
js['down'] = down_bandwidth
if byte_quota:
js['bytes'] = byte_quota
if ap_mac and self.version != 'v2':
js['ap_mac'] = ap_mac
return self._run_command(cmd, params=js) |
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, ) | Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance | Below is the the instruction that describes the task:
### Input:
Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
### Response:
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: The ISO language-country string that specifies the language used for the sample
:param unicode tagged_text: The text example of how end users might express the task
:param unicode source_channel: The communication channel from which the sample was captured
:returns: Updated SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, ) |
def toArray(self):
"""
Return an numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A | Return an numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Return an numpy.ndarray
### Response:
def toArray(self):
"""
Return an numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A |
def append_annotation(self, annotation: str, values: Set[str]) -> 'Seeding':
"""Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
:returns: self for fluid API
"""
return self._append_seed(SEED_TYPE_ANNOTATION, {
'annotations': {
annotation: values,
}
}) | Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
:returns: self for fluid API | Below is the the instruction that describes the task:
### Input:
Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
:returns: self for fluid API
### Response:
def append_annotation(self, annotation: str, values: Set[str]) -> 'Seeding':
"""Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
:returns: self for fluid API
"""
return self._append_seed(SEED_TYPE_ANNOTATION, {
'annotations': {
annotation: values,
}
}) |
def install_deps(self, virtualenv, skip_cached=True, always_exit=False, exit_if_failed=True,
stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
"""
:type virtualenv: VirtualEnv
"""
config = self.registered_venvs[virtualenv.venv_name]
configed_deps = config['deps']
for dep in configed_deps:
if dep.startswith(FILE_PREFIX):
dep = self._full_relative_path(dep.replace(FILE_PREFIX, ''))
virtualenv.install_requirements_file(dep, skip_cached=skip_cached,
always_exit=always_exit,
exit_if_failed=exit_if_failed, stdin=stdin,
stdout=stdout, stderr=stderr)
else:
virtualenv.install_deps([dep], skip_cached=skip_cached, always_exit=always_exit,
exit_if_failed=exit_if_failed, stdin=stdin,
stdout=stdout, stderr=stderr) | :type virtualenv: VirtualEnv | Below is the the instruction that describes the task:
### Input:
:type virtualenv: VirtualEnv
### Response:
def install_deps(self, virtualenv, skip_cached=True, always_exit=False, exit_if_failed=True,
stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
"""
:type virtualenv: VirtualEnv
"""
config = self.registered_venvs[virtualenv.venv_name]
configed_deps = config['deps']
for dep in configed_deps:
if dep.startswith(FILE_PREFIX):
dep = self._full_relative_path(dep.replace(FILE_PREFIX, ''))
virtualenv.install_requirements_file(dep, skip_cached=skip_cached,
always_exit=always_exit,
exit_if_failed=exit_if_failed, stdin=stdin,
stdout=stdout, stderr=stderr)
else:
virtualenv.install_deps([dep], skip_cached=skip_cached, always_exit=always_exit,
exit_if_failed=exit_if_failed, stdin=stdin,
stdout=stdout, stderr=stderr) |
def samba():
'''Install smb server samba and create a share (common read-write-access).
More infos:
* https://wiki.ubuntuusers.de/Samba%20Server/
'''
username = env.user
install_packages(['samba'])
run(flo('sudo smbpasswd -a {username}'))
path = '$HOME/shared'
sharename = 'shared'
comment = '"smb share; everyone has full access (read/write)"'
acl = flo('Everyone:F,{username}:F guest_ok=y')
with warn_only():
run(flo('mkdir {path}'))
run(flo('sudo net usershare add {sharename} {path} {comment} {acl}'))
run(flo('sudo net usershare info {sharename}')) | Install smb server samba and create a share (common read-write-access).
More infos:
* https://wiki.ubuntuusers.de/Samba%20Server/ | Below is the the instruction that describes the task:
### Input:
Install smb server samba and create a share (common read-write-access).
More infos:
* https://wiki.ubuntuusers.de/Samba%20Server/
### Response:
def samba():
'''Install smb server samba and create a share (common read-write-access).
More infos:
* https://wiki.ubuntuusers.de/Samba%20Server/
'''
username = env.user
install_packages(['samba'])
run(flo('sudo smbpasswd -a {username}'))
path = '$HOME/shared'
sharename = 'shared'
comment = '"smb share; everyone has full access (read/write)"'
acl = flo('Everyone:F,{username}:F guest_ok=y')
with warn_only():
run(flo('mkdir {path}'))
run(flo('sudo net usershare add {sharename} {path} {comment} {acl}'))
run(flo('sudo net usershare info {sharename}')) |
def _get_forums_for_user(self, user, perm_codenames, use_tree_hierarchy=False):
""" Returns all the forums that satisfy the given list of permission codenames.
User and group forum permissions are used.
If the ``use_tree_hierarchy`` keyword argument is set the granted forums will be filtered so
that a forum which has an ancestor which is not in the granted forums set will not be
returned.
"""
granted_forums_cache_key = '{}__{}'.format(
':'.join(perm_codenames), user.id if not user.is_anonymous else 'anonymous',
)
if granted_forums_cache_key in self._granted_forums_cache:
return self._granted_forums_cache[granted_forums_cache_key]
forums = self._get_all_forums()
# First check if the user is a superuser and if so, returns the forum queryset immediately.
if user.is_superuser: # pragma: no cover
forum_objects = forums
else:
# Generates the appropriate queryset filter in order to handle both authenticated users
# and anonymous users.
user_kwargs_filter = {'anonymous_user': True} if user.is_anonymous else {'user': user}
# Get all the user permissions for the considered user.
user_perms = (
UserForumPermission.objects
.filter(**user_kwargs_filter)
.filter(permission__codename__in=perm_codenames)
)
# The first thing to do is to compute three lists of permissions: one containing only
# globally granted permissions, one containing granted permissions (these permissions
# are associated with specific forums) and one containing non granted permissions (the
# latest are also associated with specific forums).
globally_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum_id is None, user_perms)
)
per_forum_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum_id is not None, user_perms)
)
per_forum_nongranted_user_perms = list(
filter(lambda p: not p.has_perm and p.forum_id is not None, user_perms)
)
# Using the previous lists we are able to compute a list of forums ids for which
# permissions are explicitly not granted. It should be noted that any permission that is
# explicitely set for a user will not be considered as non granted if a "non granted"
# permission also exists. The explicitly granted permissions always win precedence.
granted_user_forum_ids = [p.forum_id for p in per_forum_granted_user_perms]
nongranted_forum_ids = [
p.forum_id for p in per_forum_nongranted_user_perms
if p.forum_id not in granted_user_forum_ids
]
required_perm_codenames_count = len(perm_codenames)
initial_forum_ids = [f.id for f in forums]
# Now we build a dictionary allowing to associate each forum ID of the initial queryset
# with a set of permissions that are granted for the considered forum.
granted_permissions_per_forum = collections.defaultdict(set)
for perm in per_forum_granted_user_perms:
granted_permissions_per_forum[perm.forum_id].add(perm.permission_id)
for forum_id in initial_forum_ids:
granted_permissions_per_forum[forum_id].update(
[perm.permission_id for perm in globally_granted_user_perms]
)
if not user.is_anonymous:
user_model = get_user_model()
# Get all the group permissions for the considered user.
group_perms = (
GroupForumPermission.objects
.filter(
**{
'group__{0}'.format(user_model.groups.field.related_query_name()): user
}
)
.filter(permission__codename__in=perm_codenames)
)
# Again, we compute three lists of permissions. But this time we are considering
# group permissions. The first list contains only globally granted permissions. The
# second one contains only granted permissions that are associated with specific
# forums. The third list contains non granted permissions.
globally_granted_group_perms = list(
filter(lambda p: p.has_perm and p.forum_id is None, group_perms)
)
per_forum_granted_group_perms = list(
filter(lambda p: p.has_perm and p.forum_id is not None, group_perms)
)
per_forum_nongranted_group_perms = list(
filter(lambda p: not p.has_perm and p.forum_id is not None, group_perms)
)
# Now we can update the list of forums ids for which permissions are explicitly not
# granted.
granted_group_forum_ids = [p.forum_id for p in per_forum_granted_group_perms]
nongranted_forum_ids += [
p.forum_id for p in per_forum_nongranted_group_perms
if p.forum_id not in granted_group_forum_ids
]
# Now we will update our previous dictionary that associated each forum ID with a
# set of granted permissions (at the user level). We will update it with the new
# permissions we've computed for the user's groups.
for perm in per_forum_granted_group_perms:
granted_permissions_per_forum[perm.forum_id].add(perm.permission_id)
for forum_id in initial_forum_ids:
granted_permissions_per_forum[forum_id].update(
[perm.permission_id for perm in globally_granted_group_perms]
)
# We keep only the forum IDs for which the length of the set containing the granted
# permissions is equal to the number of initial permission codenames. The other forums
# have not all the required granted permissions, so we just throw them away.
for forum_id in list(granted_permissions_per_forum):
if len(granted_permissions_per_forum[forum_id]) < required_perm_codenames_count:
del granted_permissions_per_forum[forum_id]
# Alright! It is now possible to filter the initial queryset using the forums associated
# with the granted permissions and the list of forums for which permissions are
# explicitly not granted.
forum_objects = [
f for f in forums
if f.id in granted_permissions_per_forum and f.id not in nongranted_forum_ids
]
if (
not user.is_anonymous and
set(perm_codenames).issubset(
set(machina_settings.DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS)
)
):
forum_objects += [
f for f in forums if f.id not in nongranted_forum_ids and f not in forum_objects
]
if use_tree_hierarchy:
forum_objects = self._filter_granted_forums_using_tree(forum_objects)
self._granted_forums_cache[granted_forums_cache_key] = forum_objects
return forum_objects | Returns all the forums that satisfy the given list of permission codenames.
User and group forum permissions are used.
If the ``use_tree_hierarchy`` keyword argument is set the granted forums will be filtered so
that a forum which has an ancestor which is not in the granted forums set will not be
returned. | Below is the the instruction that describes the task:
### Input:
Returns all the forums that satisfy the given list of permission codenames.
User and group forum permissions are used.
If the ``use_tree_hierarchy`` keyword argument is set the granted forums will be filtered so
that a forum which has an ancestor which is not in the granted forums set will not be
returned.
### Response:
def _get_forums_for_user(self, user, perm_codenames, use_tree_hierarchy=False):
""" Returns all the forums that satisfy the given list of permission codenames.
User and group forum permissions are used.
If the ``use_tree_hierarchy`` keyword argument is set the granted forums will be filtered so
that a forum which has an ancestor which is not in the granted forums set will not be
returned.
"""
granted_forums_cache_key = '{}__{}'.format(
':'.join(perm_codenames), user.id if not user.is_anonymous else 'anonymous',
)
if granted_forums_cache_key in self._granted_forums_cache:
return self._granted_forums_cache[granted_forums_cache_key]
forums = self._get_all_forums()
# First check if the user is a superuser and if so, returns the forum queryset immediately.
if user.is_superuser: # pragma: no cover
forum_objects = forums
else:
# Generates the appropriate queryset filter in order to handle both authenticated users
# and anonymous users.
user_kwargs_filter = {'anonymous_user': True} if user.is_anonymous else {'user': user}
# Get all the user permissions for the considered user.
user_perms = (
UserForumPermission.objects
.filter(**user_kwargs_filter)
.filter(permission__codename__in=perm_codenames)
)
# The first thing to do is to compute three lists of permissions: one containing only
# globally granted permissions, one containing granted permissions (these permissions
# are associated with specific forums) and one containing non granted permissions (the
# latest are also associated with specific forums).
globally_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum_id is None, user_perms)
)
per_forum_granted_user_perms = list(
filter(lambda p: p.has_perm and p.forum_id is not None, user_perms)
)
per_forum_nongranted_user_perms = list(
filter(lambda p: not p.has_perm and p.forum_id is not None, user_perms)
)
# Using the previous lists we are able to compute a list of forums ids for which
# permissions are explicitly not granted. It should be noted that any permission that is
# explicitely set for a user will not be considered as non granted if a "non granted"
# permission also exists. The explicitly granted permissions always win precedence.
granted_user_forum_ids = [p.forum_id for p in per_forum_granted_user_perms]
nongranted_forum_ids = [
p.forum_id for p in per_forum_nongranted_user_perms
if p.forum_id not in granted_user_forum_ids
]
required_perm_codenames_count = len(perm_codenames)
initial_forum_ids = [f.id for f in forums]
# Now we build a dictionary allowing to associate each forum ID of the initial queryset
# with a set of permissions that are granted for the considered forum.
granted_permissions_per_forum = collections.defaultdict(set)
for perm in per_forum_granted_user_perms:
granted_permissions_per_forum[perm.forum_id].add(perm.permission_id)
for forum_id in initial_forum_ids:
granted_permissions_per_forum[forum_id].update(
[perm.permission_id for perm in globally_granted_user_perms]
)
if not user.is_anonymous:
user_model = get_user_model()
# Get all the group permissions for the considered user.
group_perms = (
GroupForumPermission.objects
.filter(
**{
'group__{0}'.format(user_model.groups.field.related_query_name()): user
}
)
.filter(permission__codename__in=perm_codenames)
)
# Again, we compute three lists of permissions. But this time we are considering
# group permissions. The first list contains only globally granted permissions. The
# second one contains only granted permissions that are associated with specific
# forums. The third list contains non granted permissions.
globally_granted_group_perms = list(
filter(lambda p: p.has_perm and p.forum_id is None, group_perms)
)
per_forum_granted_group_perms = list(
filter(lambda p: p.has_perm and p.forum_id is not None, group_perms)
)
per_forum_nongranted_group_perms = list(
filter(lambda p: not p.has_perm and p.forum_id is not None, group_perms)
)
# Now we can update the list of forums ids for which permissions are explicitly not
# granted.
granted_group_forum_ids = [p.forum_id for p in per_forum_granted_group_perms]
nongranted_forum_ids += [
p.forum_id for p in per_forum_nongranted_group_perms
if p.forum_id not in granted_group_forum_ids
]
# Now we will update our previous dictionary that associated each forum ID with a
# set of granted permissions (at the user level). We will update it with the new
# permissions we've computed for the user's groups.
for perm in per_forum_granted_group_perms:
granted_permissions_per_forum[perm.forum_id].add(perm.permission_id)
for forum_id in initial_forum_ids:
granted_permissions_per_forum[forum_id].update(
[perm.permission_id for perm in globally_granted_group_perms]
)
# We keep only the forum IDs for which the length of the set containing the granted
# permissions is equal to the number of initial permission codenames. The other forums
# have not all the required granted permissions, so we just throw them away.
for forum_id in list(granted_permissions_per_forum):
if len(granted_permissions_per_forum[forum_id]) < required_perm_codenames_count:
del granted_permissions_per_forum[forum_id]
# Alright! It is now possible to filter the initial queryset using the forums associated
# with the granted permissions and the list of forums for which permissions are
# explicitly not granted.
forum_objects = [
f for f in forums
if f.id in granted_permissions_per_forum and f.id not in nongranted_forum_ids
]
if (
not user.is_anonymous and
set(perm_codenames).issubset(
set(machina_settings.DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS)
)
):
forum_objects += [
f for f in forums if f.id not in nongranted_forum_ids and f not in forum_objects
]
if use_tree_hierarchy:
forum_objects = self._filter_granted_forums_using_tree(forum_objects)
self._granted_forums_cache[granted_forums_cache_key] = forum_objects
return forum_objects |
def uninstall(pkgs=None,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
Path to requirements file
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
cwd
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
CLI Example:
.. code-block:: bash
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
cmd = _get_pip_bin(bin_env)
cmd.extend(['uninstall', '-y'])
cleanup_requirements, error = _process_requirements(
requirements=requirements, cmd=cmd, saltenv=saltenv, user=user,
cwd=cwd
)
if error:
return error
if log:
try:
# TODO make this check if writeable
os.path.exists(log)
except IOError:
raise IOError('\'{0}\' is not writeable'.format(log))
cmd.extend(['--log', log])
if proxy:
cmd.extend(['--proxy', proxy])
if timeout:
try:
if isinstance(timeout, float):
# Catch floating point input, exception will be caught in
# exception class below.
raise ValueError('Timeout cannot be a float')
int(timeout)
except ValueError:
raise ValueError(
'\'{0}\' is not a valid timeout, must be an integer'
.format(timeout)
)
cmd.extend(['--timeout', timeout])
if pkgs:
if isinstance(pkgs, six.string_types):
pkgs = [p.strip() for p in pkgs.split(',')]
if requirements:
for requirement in requirements:
with salt.utils.files.fopen(requirement) as rq_:
for req in rq_:
req = salt.utils.stringutils.to_unicode(req)
try:
req_pkg, _ = req.split('==')
if req_pkg in pkgs:
pkgs.remove(req_pkg)
except ValueError:
pass
cmd.extend(pkgs)
cmd_kwargs = dict(python_shell=False, runas=user,
cwd=cwd, saltenv=saltenv, use_vt=use_vt)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
try:
return __salt__['cmd.run_all'](cmd, **cmd_kwargs)
finally:
_clear_context(bin_env)
for requirement in cleanup_requirements:
if requirement:
try:
os.remove(requirement)
except OSError:
pass | Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
Path to requirements file
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
cwd
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
CLI Example:
.. code-block:: bash
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin | Below is the the instruction that describes the task:
### Input:
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
Path to requirements file
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
cwd
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
CLI Example:
.. code-block:: bash
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
### Response:
def uninstall(pkgs=None,
requirements=None,
bin_env=None,
log=None,
proxy=None,
timeout=None,
user=None,
cwd=None,
saltenv='base',
use_vt=False):
'''
Uninstall packages individually or from a pip requirements file
pkgs
comma separated list of packages to install
requirements
Path to requirements file
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the format ``user:passwd@proxy.server:port``. Note
that the ``user:password@`` is optional and required only if you are
behind an authenticated proxy. If you provide
``user@proxy.server:port`` then you will be prompted for a password.
timeout
Set the socket timeout (default 15 seconds)
user
The user under which to run pip
cwd
Directory from which to run pip
use_vt
Use VT terminal emulation (see output while installing)
CLI Example:
.. code-block:: bash
salt '*' pip.uninstall <package name>,<package2 name>
salt '*' pip.uninstall requirements=/path/to/requirements.txt
salt '*' pip.uninstall <package name> bin_env=/path/to/virtualenv
salt '*' pip.uninstall <package name> bin_env=/path/to/pip_bin
'''
cmd = _get_pip_bin(bin_env)
cmd.extend(['uninstall', '-y'])
cleanup_requirements, error = _process_requirements(
requirements=requirements, cmd=cmd, saltenv=saltenv, user=user,
cwd=cwd
)
if error:
return error
if log:
try:
# TODO make this check if writeable
os.path.exists(log)
except IOError:
raise IOError('\'{0}\' is not writeable'.format(log))
cmd.extend(['--log', log])
if proxy:
cmd.extend(['--proxy', proxy])
if timeout:
try:
if isinstance(timeout, float):
# Catch floating point input, exception will be caught in
# exception class below.
raise ValueError('Timeout cannot be a float')
int(timeout)
except ValueError:
raise ValueError(
'\'{0}\' is not a valid timeout, must be an integer'
.format(timeout)
)
cmd.extend(['--timeout', timeout])
if pkgs:
if isinstance(pkgs, six.string_types):
pkgs = [p.strip() for p in pkgs.split(',')]
if requirements:
for requirement in requirements:
with salt.utils.files.fopen(requirement) as rq_:
for req in rq_:
req = salt.utils.stringutils.to_unicode(req)
try:
req_pkg, _ = req.split('==')
if req_pkg in pkgs:
pkgs.remove(req_pkg)
except ValueError:
pass
cmd.extend(pkgs)
cmd_kwargs = dict(python_shell=False, runas=user,
cwd=cwd, saltenv=saltenv, use_vt=use_vt)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
try:
return __salt__['cmd.run_all'](cmd, **cmd_kwargs)
finally:
_clear_context(bin_env)
for requirement in cleanup_requirements:
if requirement:
try:
os.remove(requirement)
except OSError:
pass |
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj | When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly. | Below is the the instruction that describes the task:
### Input:
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
### Response:
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj |
def list(cls, vrf=None):
""" List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res | List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values. | Below is the the instruction that describes the task:
### Input:
List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values.
### Response:
def list(cls, vrf=None):
""" List VRFs.
Maps to the function :py:func:`nipap.backend.Nipap.list_vrf` in the
backend. Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res |
def _equally_weight_samples(samples, weights):
""" Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))`
"""
if len(weights) != len(samples):
raise ValueError("len(weights) = %i != len(samples) = %i" %
(len(weights), len(samples)))
if numpy.logical_or(weights < 0, weights > 1).any():
raise ValueError("weights must have probability between 0 and 1")
weights = numpy.array(weights)
samples = numpy.array(samples)
state = numpy.random.get_state()
numpy.random.seed(1)
n = len(weights)
choices = numpy.random.rand(n) < weights
new_samples = samples[choices]
numpy.random.set_state(state)
return new_samples.copy() | Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))` | Below is the the instruction that describes the task:
### Input:
Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))`
### Response:
def _equally_weight_samples(samples, weights):
""" Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))`
"""
if len(weights) != len(samples):
raise ValueError("len(weights) = %i != len(samples) = %i" %
(len(weights), len(samples)))
if numpy.logical_or(weights < 0, weights > 1).any():
raise ValueError("weights must have probability between 0 and 1")
weights = numpy.array(weights)
samples = numpy.array(samples)
state = numpy.random.get_state()
numpy.random.seed(1)
n = len(weights)
choices = numpy.random.rand(n) < weights
new_samples = samples[choices]
numpy.random.set_state(state)
return new_samples.copy() |
def request_pdu(self):
""" Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU.
"""
if None in [self.address, self.value]:
# TODO Raise proper exception.
raise Exception
return struct.pack('>BHH', self.function_code, self.address,
self._value) | Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU. | Below is the the instruction that describes the task:
### Input:
Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU.
### Response:
def request_pdu(self):
""" Build request PDU to write single coil.
:return: Byte array of 5 bytes with PDU.
"""
if None in [self.address, self.value]:
# TODO Raise proper exception.
raise Exception
return struct.pack('>BHH', self.function_code, self.address,
self._value) |
def get_alternative_full_names(cls, entry):
"""
get list of models.AlternativeFullName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeFullName` objects
"""
names = []
query = "./protein/alternativeName/fullName"
for name in entry.iterfind(query):
names.append(models.AlternativeFullName(name=name.text))
return names | get list of models.AlternativeFullName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeFullName` objects | Below is the the instruction that describes the task:
### Input:
get list of models.AlternativeFullName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeFullName` objects
### Response:
def get_alternative_full_names(cls, entry):
"""
get list of models.AlternativeFullName objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.AlternativeFullName` objects
"""
names = []
query = "./protein/alternativeName/fullName"
for name in entry.iterfind(query):
names.append(models.AlternativeFullName(name=name.text))
return names |
def create(state, host, ctid, template=None):
'''
Create OpenVZ containers.
+ ctid: CTID of the container to create
'''
# Check we don't already have a container with this CTID
current_containers = host.fact.openvz_containers
if ctid in current_containers:
raise OperationError(
'An OpenVZ container with CTID {0} already exists'.format(ctid),
)
args = ['{0}'.format(ctid)]
if template:
args.append('--ostemplate {0}'.format(template))
yield 'vzctl create {0}'.format(' '.join(args)) | Create OpenVZ containers.
+ ctid: CTID of the container to create | Below is the the instruction that describes the task:
### Input:
Create OpenVZ containers.
+ ctid: CTID of the container to create
### Response:
def create(state, host, ctid, template=None):
'''
Create OpenVZ containers.
+ ctid: CTID of the container to create
'''
# Check we don't already have a container with this CTID
current_containers = host.fact.openvz_containers
if ctid in current_containers:
raise OperationError(
'An OpenVZ container with CTID {0} already exists'.format(ctid),
)
args = ['{0}'.format(ctid)]
if template:
args.append('--ostemplate {0}'.format(template))
yield 'vzctl create {0}'.format(' '.join(args)) |
def unembed_sampleset(target_sampleset, embedding, source_bqm,
chain_break_method=None, chain_break_fraction=False):
"""Unembed the samples set.
Construct a sample set for the source binary quadratic model (BQM) by
unembedding the given samples from the target BQM.
Args:
target_sampleset (:obj:`dimod.SampleSet`):
SampleSet from the target BQM.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source variable and t is a target
variable.
source_bqm (:obj:`dimod.BinaryQuadraticModel`):
Source binary quadratic model.
chain_break_method (function, optional):
Method used to resolve chain breaks.
See :mod:`dwave.embedding.chain_breaks`.
chain_break_fraction (bool, optional, default=False):
If True, a 'chain_break_fraction' field is added to the unembedded
samples which report what fraction of the chains were broken before
unembedding.
Returns:
:obj:`.SampleSet`:
Examples:
>>> import dimod
...
>>> # say we have a bqm on a triangle and an embedding
>>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)
>>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}
...
>>> # and some samples from the embedding
>>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken
{0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken
>>> energies = [-3, 1]
>>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)
...
>>> # unembed
>>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1],
[ 1, 1, 1]], dtype=int8)
"""
if chain_break_method is None:
chain_break_method = majority_vote
variables = list(source_bqm)
try:
chains = [embedding[v] for v in variables]
except KeyError:
raise ValueError("given bqm does not match the embedding")
chain_idxs = [[target_sampleset.variables.index[v] for v in chain] for chain in chains]
record = target_sampleset.record
unembedded, idxs = chain_break_method(record.sample, chain_idxs)
# dev note: this is a bug in dimod that empty unembedded is not handled,
# in the future this try-except can be removed
try:
energies = source_bqm.energies((unembedded, variables))
except ValueError:
datatypes = [('sample', np.dtype(np.int8), (len(variables),)), ('energy', np.float)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names
if name not in {'sample', 'energy'})
if chain_break_fraction:
datatypes.append(('chain_break_fraction', np.float64))
# there are no samples so everything is empty
data = np.rec.array(np.empty(0, dtype=datatypes))
return dimod.SampleSet(data, variables, target_sampleset.info.copy(), target_sampleset.vartype)
reserved = {'sample', 'energy'}
vectors = {name: record[name][idxs]
for name in record.dtype.names if name not in reserved}
if chain_break_fraction:
vectors['chain_break_fraction'] = broken_chains(record.sample, chain_idxs).mean(axis=1)[idxs]
return dimod.SampleSet.from_samples((unembedded, variables),
target_sampleset.vartype,
energy=energies,
info=target_sampleset.info.copy(),
**vectors) | Unembed the samples set.
Construct a sample set for the source binary quadratic model (BQM) by
unembedding the given samples from the target BQM.
Args:
target_sampleset (:obj:`dimod.SampleSet`):
SampleSet from the target BQM.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source variable and t is a target
variable.
source_bqm (:obj:`dimod.BinaryQuadraticModel`):
Source binary quadratic model.
chain_break_method (function, optional):
Method used to resolve chain breaks.
See :mod:`dwave.embedding.chain_breaks`.
chain_break_fraction (bool, optional, default=False):
If True, a 'chain_break_fraction' field is added to the unembedded
samples which report what fraction of the chains were broken before
unembedding.
Returns:
:obj:`.SampleSet`:
Examples:
>>> import dimod
...
>>> # say we have a bqm on a triangle and an embedding
>>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)
>>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}
...
>>> # and some samples from the embedding
>>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken
{0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken
>>> energies = [-3, 1]
>>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)
...
>>> # unembed
>>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1],
[ 1, 1, 1]], dtype=int8) | Below is the the instruction that describes the task:
### Input:
Unembed the samples set.
Construct a sample set for the source binary quadratic model (BQM) by
unembedding the given samples from the target BQM.
Args:
target_sampleset (:obj:`dimod.SampleSet`):
SampleSet from the target BQM.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source variable and t is a target
variable.
source_bqm (:obj:`dimod.BinaryQuadraticModel`):
Source binary quadratic model.
chain_break_method (function, optional):
Method used to resolve chain breaks.
See :mod:`dwave.embedding.chain_breaks`.
chain_break_fraction (bool, optional, default=False):
If True, a 'chain_break_fraction' field is added to the unembedded
samples which report what fraction of the chains were broken before
unembedding.
Returns:
:obj:`.SampleSet`:
Examples:
>>> import dimod
...
>>> # say we have a bqm on a triangle and an embedding
>>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)
>>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}
...
>>> # and some samples from the embedding
>>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken
{0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken
>>> energies = [-3, 1]
>>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)
...
>>> # unembed
>>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1],
[ 1, 1, 1]], dtype=int8)
### Response:
def unembed_sampleset(target_sampleset, embedding, source_bqm,
chain_break_method=None, chain_break_fraction=False):
"""Unembed the samples set.
Construct a sample set for the source binary quadratic model (BQM) by
unembedding the given samples from the target BQM.
Args:
target_sampleset (:obj:`dimod.SampleSet`):
SampleSet from the target BQM.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source variable and t is a target
variable.
source_bqm (:obj:`dimod.BinaryQuadraticModel`):
Source binary quadratic model.
chain_break_method (function, optional):
Method used to resolve chain breaks.
See :mod:`dwave.embedding.chain_breaks`.
chain_break_fraction (bool, optional, default=False):
If True, a 'chain_break_fraction' field is added to the unembedded
samples which report what fraction of the chains were broken before
unembedding.
Returns:
:obj:`.SampleSet`:
Examples:
>>> import dimod
...
>>> # say we have a bqm on a triangle and an embedding
>>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)
>>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}
...
>>> # and some samples from the embedding
>>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken
{0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken
>>> energies = [-3, 1]
>>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)
...
>>> # unembed
>>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1],
[ 1, 1, 1]], dtype=int8)
"""
if chain_break_method is None:
chain_break_method = majority_vote
variables = list(source_bqm)
try:
chains = [embedding[v] for v in variables]
except KeyError:
raise ValueError("given bqm does not match the embedding")
chain_idxs = [[target_sampleset.variables.index[v] for v in chain] for chain in chains]
record = target_sampleset.record
unembedded, idxs = chain_break_method(record.sample, chain_idxs)
# dev note: this is a bug in dimod that empty unembedded is not handled,
# in the future this try-except can be removed
try:
energies = source_bqm.energies((unembedded, variables))
except ValueError:
datatypes = [('sample', np.dtype(np.int8), (len(variables),)), ('energy', np.float)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names
if name not in {'sample', 'energy'})
if chain_break_fraction:
datatypes.append(('chain_break_fraction', np.float64))
# there are no samples so everything is empty
data = np.rec.array(np.empty(0, dtype=datatypes))
return dimod.SampleSet(data, variables, target_sampleset.info.copy(), target_sampleset.vartype)
reserved = {'sample', 'energy'}
vectors = {name: record[name][idxs]
for name in record.dtype.names if name not in reserved}
if chain_break_fraction:
vectors['chain_break_fraction'] = broken_chains(record.sample, chain_idxs).mean(axis=1)[idxs]
return dimod.SampleSet.from_samples((unembedded, variables),
target_sampleset.vartype,
energy=energies,
info=target_sampleset.info.copy(),
**vectors) |
def _get_def_class(self, class_obj, member_name):
"""
Return the class object in MRO order that defines a member.
class_obj: Class object that exposes (but not necessarily defines) the
member. I.e. starting point of the search.
member_name: Name of the member (method or attribute).
Returns:
Class object that defines the member.
"""
member_obj = getattr(class_obj, member_name)
for def_class_obj in inspect.getmro(class_obj):
if member_name in def_class_obj.__dict__:
if def_class_obj.__name__ in self._excluded_classes:
return class_obj # Fall back to input class
return def_class_obj
self._logger.warning(
"%s: Definition class not found for member %s.%s, "
"defaulting to class %s",
self._log_prefix, class_obj.__name__, member_name,
class_obj.__name__)
return class_obj | Return the class object in MRO order that defines a member.
class_obj: Class object that exposes (but not necessarily defines) the
member. I.e. starting point of the search.
member_name: Name of the member (method or attribute).
Returns:
Class object that defines the member. | Below is the the instruction that describes the task:
### Input:
Return the class object in MRO order that defines a member.
class_obj: Class object that exposes (but not necessarily defines) the
member. I.e. starting point of the search.
member_name: Name of the member (method or attribute).
Returns:
Class object that defines the member.
### Response:
def _get_def_class(self, class_obj, member_name):
"""
Return the class object in MRO order that defines a member.
class_obj: Class object that exposes (but not necessarily defines) the
member. I.e. starting point of the search.
member_name: Name of the member (method or attribute).
Returns:
Class object that defines the member.
"""
member_obj = getattr(class_obj, member_name)
for def_class_obj in inspect.getmro(class_obj):
if member_name in def_class_obj.__dict__:
if def_class_obj.__name__ in self._excluded_classes:
return class_obj # Fall back to input class
return def_class_obj
self._logger.warning(
"%s: Definition class not found for member %s.%s, "
"defaulting to class %s",
self._log_prefix, class_obj.__name__, member_name,
class_obj.__name__)
return class_obj |
def get_enterprise_customer_for_running_pipeline(request, pipeline): # pylint: disable=invalid-name
"""
Get the EnterpriseCustomer associated with a running pipeline.
"""
sso_provider_id = request.GET.get('tpa_hint')
if pipeline:
sso_provider_id = Registry.get_from_pipeline(pipeline).provider_id
return get_enterprise_customer_for_sso(sso_provider_id) | Get the EnterpriseCustomer associated with a running pipeline. | Below is the the instruction that describes the task:
### Input:
Get the EnterpriseCustomer associated with a running pipeline.
### Response:
def get_enterprise_customer_for_running_pipeline(request, pipeline): # pylint: disable=invalid-name
"""
Get the EnterpriseCustomer associated with a running pipeline.
"""
sso_provider_id = request.GET.get('tpa_hint')
if pipeline:
sso_provider_id = Registry.get_from_pipeline(pipeline).provider_id
return get_enterprise_customer_for_sso(sso_provider_id) |
def per_from_id_except(s, flavors=chat_flavors+inline_flavors):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``.
"""
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] not in s
else None) | :param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``. | Below is the the instruction that describes the task:
### Input:
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``.
### Response:
def per_from_id_except(s, flavors=chat_flavors+inline_flavors):
"""
:param s:
a list or set of from id
:param flavors:
``all`` or a list of flavors
:return:
a seeder function that returns the from id only if the from id is *not* in ``s``
and message flavor is in ``flavors``.
"""
return _wrap_none(lambda msg:
msg['from']['id']
if (flavors == 'all' or flavor(msg) in flavors) and msg['from']['id'] not in s
else None) |
def get_by_path(path, first=False):
"""
Search for resources using colon-separated path notation.
E.g.::
path = 'deployments:production:servers:haproxy'
haproxies = get_by_path(path)
:param bool first: Always use the first returned match for all intermediate
searches along the path. If this is ``False`` and an intermediate
search returns multiple hits, an exception is raised.
"""
api = get_api()
cur_res = api
parts = path.split(':')
for part in parts:
res = getattr(cur_res, part, None)
if not res:
# probably the name of the res to find
res = find_by_name(cur_res, part)
cur_res = res
index = getattr(cur_res, 'index', None)
if index:
return index()
return cur_res | Search for resources using colon-separated path notation.
E.g.::
path = 'deployments:production:servers:haproxy'
haproxies = get_by_path(path)
:param bool first: Always use the first returned match for all intermediate
searches along the path. If this is ``False`` and an intermediate
search returns multiple hits, an exception is raised. | Below is the the instruction that describes the task:
### Input:
Search for resources using colon-separated path notation.
E.g.::
path = 'deployments:production:servers:haproxy'
haproxies = get_by_path(path)
:param bool first: Always use the first returned match for all intermediate
searches along the path. If this is ``False`` and an intermediate
search returns multiple hits, an exception is raised.
### Response:
def get_by_path(path, first=False):
"""
Search for resources using colon-separated path notation.
E.g.::
path = 'deployments:production:servers:haproxy'
haproxies = get_by_path(path)
:param bool first: Always use the first returned match for all intermediate
searches along the path. If this is ``False`` and an intermediate
search returns multiple hits, an exception is raised.
"""
api = get_api()
cur_res = api
parts = path.split(':')
for part in parts:
res = getattr(cur_res, part, None)
if not res:
# probably the name of the res to find
res = find_by_name(cur_res, part)
cur_res = res
index = getattr(cur_res, 'index', None)
if index:
return index()
return cur_res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.