code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def load_all_assistants(cls, superassistants):
"""Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles
"""
# mapping of assistant roles to lists of top-level assistant instances
_assistants = {}
# {'crt': CreatorAssistant, ...}
superas_dict = dict(map(lambda a: (a.name, a), superassistants))
to_load = set(superas_dict.keys())
for tl in to_load:
dirs = [os.path.join(d, tl) for d in cls.assistants_dirs]
file_hierarchy = cls.get_assistants_file_hierarchy(dirs)
# load all if we're not using cache or if we fail to load it
load_all = not settings.USE_CACHE
if settings.USE_CACHE:
try:
cch = cache.Cache()
cch.refresh_role(tl, file_hierarchy)
_assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl],
superas_dict[tl],
role=tl)
except BaseException as e:
logger.debug('Failed to use DevAssistant cachefile {0}: {1}'.format(
settings.CACHE_FILE, e))
load_all = True
if load_all:
_assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy,
superas_dict[tl],
role=tl)
return _assistants | Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles | Below is the the instruction that describes the task:
### Input:
Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles
### Response:
def load_all_assistants(cls, superassistants):
"""Fills self._assistants with loaded YamlAssistant instances of requested roles.
Tries to use cache (updated/created if needed). If cache is unusable, it
falls back to loading all assistants.
Args:
roles: list of required assistant roles
"""
# mapping of assistant roles to lists of top-level assistant instances
_assistants = {}
# {'crt': CreatorAssistant, ...}
superas_dict = dict(map(lambda a: (a.name, a), superassistants))
to_load = set(superas_dict.keys())
for tl in to_load:
dirs = [os.path.join(d, tl) for d in cls.assistants_dirs]
file_hierarchy = cls.get_assistants_file_hierarchy(dirs)
# load all if we're not using cache or if we fail to load it
load_all = not settings.USE_CACHE
if settings.USE_CACHE:
try:
cch = cache.Cache()
cch.refresh_role(tl, file_hierarchy)
_assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl],
superas_dict[tl],
role=tl)
except BaseException as e:
logger.debug('Failed to use DevAssistant cachefile {0}: {1}'.format(
settings.CACHE_FILE, e))
load_all = True
if load_all:
_assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy,
superas_dict[tl],
role=tl)
return _assistants |
def to_tuples(self, data):
'''
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
'''
self.data = data
self.pos = 0
parsed = []
command = []
while self.pos < len(self.data):
indicator = self.data[self.pos]
if indicator == ' ':
self.pos += 1
elif indicator == ',':
if len(command) >= 2:
self.pos += 1
else:
msg = 'unexpected comma at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
elif indicator in '0123456789.-':
if command:
command.append(self.get_number())
else:
msg = 'missing command at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
else:
if command:
parsed.append(tuple(command))
command = [indicator]
self.pos += 1
if command:
parsed.append(tuple(command))
if parsed[0][0] == 'M' and parsed[-1][0] == 'L'\
and parsed[0][1:] == parsed[-1][1:]:
parsed[-1] = ('z',)
return parsed | path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace. | Below is the the instruction that describes the task:
### Input:
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
### Response:
def to_tuples(self, data):
'''
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
'''
self.data = data
self.pos = 0
parsed = []
command = []
while self.pos < len(self.data):
indicator = self.data[self.pos]
if indicator == ' ':
self.pos += 1
elif indicator == ',':
if len(command) >= 2:
self.pos += 1
else:
msg = 'unexpected comma at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
elif indicator in '0123456789.-':
if command:
command.append(self.get_number())
else:
msg = 'missing command at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
else:
if command:
parsed.append(tuple(command))
command = [indicator]
self.pos += 1
if command:
parsed.append(tuple(command))
if parsed[0][0] == 'M' and parsed[-1][0] == 'L'\
and parsed[0][1:] == parsed[-1][1:]:
parsed[-1] = ('z',)
return parsed |
def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
return p.parse(string, evaluate_result=evaluate_result) | Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None. | Below is the the instruction that describes the task:
### Input:
Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
### Response:
def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
return p.parse(string, evaluate_result=evaluate_result) |
def members(self):
"""Returns a list of :class:`Member` that are currently inside this voice channel."""
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret | Returns a list of :class:`Member` that are currently inside this voice channel. | Below is the the instruction that describes the task:
### Input:
Returns a list of :class:`Member` that are currently inside this voice channel.
### Response:
def members(self):
"""Returns a list of :class:`Member` that are currently inside this voice channel."""
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret |
def from_parmed(self, structure, coords_only=False):
"""Extract atoms and bonds from a pmd.Structure.
Will create sub-compounds for every chain if there is more than one
and sub-sub-compounds for every residue.
Parameters
----------
structure : pmd.Structure
The structure to load.
coords_only : bool
Set preexisting atoms in compound to coordinates given by structure.
"""
if coords_only:
if len(structure.atoms) != self.n_particles:
raise ValueError(
'Number of atoms in {structure} does not match'
' {self}'.format(
**locals()))
atoms_particles = zip(structure.atoms,
self.particles(include_ports=False))
if None in self._particles(include_ports=False):
raise ValueError('Some particles are None')
for parmed_atom, particle in atoms_particles:
particle.pos = np.array([parmed_atom.xx,
parmed_atom.xy,
parmed_atom.xz]) / 10
return
atom_mapping = dict()
chain_id = None
chains = defaultdict(list)
for residue in structure.residues:
chains[residue.chain].append(residue)
for chain, residues in chains.items():
if len(chains) > 1:
chain_compound = Compound()
self.add(chain_compound, chain_id)
else:
chain_compound = self
for residue in residues:
for atom in residue.atoms:
pos = np.array([atom.xx, atom.xy, atom.xz]) / 10
new_atom = Particle(name=str(atom.name), pos=pos)
chain_compound.add(
new_atom, label='{0}[$]'.format(
atom.name))
atom_mapping[atom] = new_atom
for bond in structure.bonds:
atom1 = atom_mapping[bond.atom1]
atom2 = atom_mapping[bond.atom2]
self.add_bond((atom1, atom2))
if structure.box is not None:
# Convert from A to nm
self.periodicity = 0.1 * structure.box[0:3]
else:
self.periodicity = np.array([0., 0., 0.]) | Extract atoms and bonds from a pmd.Structure.
Will create sub-compounds for every chain if there is more than one
and sub-sub-compounds for every residue.
Parameters
----------
structure : pmd.Structure
The structure to load.
coords_only : bool
Set preexisting atoms in compound to coordinates given by structure. | Below is the the instruction that describes the task:
### Input:
Extract atoms and bonds from a pmd.Structure.
Will create sub-compounds for every chain if there is more than one
and sub-sub-compounds for every residue.
Parameters
----------
structure : pmd.Structure
The structure to load.
coords_only : bool
Set preexisting atoms in compound to coordinates given by structure.
### Response:
def from_parmed(self, structure, coords_only=False):
"""Extract atoms and bonds from a pmd.Structure.
Will create sub-compounds for every chain if there is more than one
and sub-sub-compounds for every residue.
Parameters
----------
structure : pmd.Structure
The structure to load.
coords_only : bool
Set preexisting atoms in compound to coordinates given by structure.
"""
if coords_only:
if len(structure.atoms) != self.n_particles:
raise ValueError(
'Number of atoms in {structure} does not match'
' {self}'.format(
**locals()))
atoms_particles = zip(structure.atoms,
self.particles(include_ports=False))
if None in self._particles(include_ports=False):
raise ValueError('Some particles are None')
for parmed_atom, particle in atoms_particles:
particle.pos = np.array([parmed_atom.xx,
parmed_atom.xy,
parmed_atom.xz]) / 10
return
atom_mapping = dict()
chain_id = None
chains = defaultdict(list)
for residue in structure.residues:
chains[residue.chain].append(residue)
for chain, residues in chains.items():
if len(chains) > 1:
chain_compound = Compound()
self.add(chain_compound, chain_id)
else:
chain_compound = self
for residue in residues:
for atom in residue.atoms:
pos = np.array([atom.xx, atom.xy, atom.xz]) / 10
new_atom = Particle(name=str(atom.name), pos=pos)
chain_compound.add(
new_atom, label='{0}[$]'.format(
atom.name))
atom_mapping[atom] = new_atom
for bond in structure.bonds:
atom1 = atom_mapping[bond.atom1]
atom2 = atom_mapping[bond.atom2]
self.add_bond((atom1, atom2))
if structure.box is not None:
# Convert from A to nm
self.periodicity = 0.1 * structure.box[0:3]
else:
self.periodicity = np.array([0., 0., 0.]) |
def logposterior(self):
"""Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called.
"""
logp = self.logprior
if logp == -numpy.inf:
return logp
else:
return logp + self.loglikelihood | Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called. | Below is the the instruction that describes the task:
### Input:
Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called.
### Response:
def logposterior(self):
"""Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called.
"""
logp = self.logprior
if logp == -numpy.inf:
return logp
else:
return logp + self.loglikelihood |
def date(start, end):
"""Get a random date between two dates"""
stime = date_to_timestamp(start)
etime = date_to_timestamp(end)
ptime = stime + random.random() * (etime - stime)
return datetime.date.fromtimestamp(ptime) | Get a random date between two dates | Below is the the instruction that describes the task:
### Input:
Get a random date between two dates
### Response:
def date(start, end):
"""Get a random date between two dates"""
stime = date_to_timestamp(start)
etime = date_to_timestamp(end)
ptime = stime + random.random() * (etime - stime)
return datetime.date.fromtimestamp(ptime) |
def destroy_cloudwatch_event(app='', env='dev', region=''):
"""Destroy Cloudwatch event subscription.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('events')
event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region)
for rule in event_rules:
cloudwatch_client.remove_targets(Rule=rule, Ids=[app])
return True | Destroy Cloudwatch event subscription.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion. | Below is the the instruction that describes the task:
### Input:
Destroy Cloudwatch event subscription.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
### Response:
def destroy_cloudwatch_event(app='', env='dev', region=''):
"""Destroy Cloudwatch event subscription.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('events')
event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region)
for rule in event_rules:
cloudwatch_client.remove_targets(Rule=rule, Ids=[app])
return True |
def _format_info(data):
'''
Return user information in a pretty way
'''
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append('')
return {'gid': data.pw_gid,
'groups': list_groups(data.pw_name),
'home': data.pw_dir,
'name': data.pw_name,
'passwd': data.pw_passwd,
'shell': data.pw_shell,
'uid': data.pw_uid,
'fullname': gecos_field[0],
'roomnumber': gecos_field[1],
'workphone': gecos_field[2],
'homephone': gecos_field[3],
'other': gecos_field[4]} | Return user information in a pretty way | Below is the the instruction that describes the task:
### Input:
Return user information in a pretty way
### Response:
def _format_info(data):
'''
Return user information in a pretty way
'''
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(',', 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append('')
return {'gid': data.pw_gid,
'groups': list_groups(data.pw_name),
'home': data.pw_dir,
'name': data.pw_name,
'passwd': data.pw_passwd,
'shell': data.pw_shell,
'uid': data.pw_uid,
'fullname': gecos_field[0],
'roomnumber': gecos_field[1],
'workphone': gecos_field[2],
'homephone': gecos_field[3],
'other': gecos_field[4]} |
def _enable_thread_pool(func):
"""
Use thread pool for executing a task if self.enable_thread_pool is True.
Return an instance of future when flag is_async is True otherwise will to
block waiting for the result until timeout then returns the result.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.enable_thread_pool and hasattr(self, 'thread_pool'):
future = self.thread_pool.submit(func, *args, **kwargs)
is_async = kwargs.get('is_async')
if is_async is None or not is_async:
timeout = kwargs.get('timeout')
if timeout is None:
timeout = 2
try:
result = future.result(timeout=timeout)
except TimeoutError as e:
self.logger.exception(e)
result = None
return result
return future
else:
return func(*args, **kwargs)
return wrapper | Use thread pool for executing a task if self.enable_thread_pool is True.
Return an instance of future when flag is_async is True otherwise will to
block waiting for the result until timeout then returns the result. | Below is the the instruction that describes the task:
### Input:
Use thread pool for executing a task if self.enable_thread_pool is True.
Return an instance of future when flag is_async is True otherwise will to
block waiting for the result until timeout then returns the result.
### Response:
def _enable_thread_pool(func):
"""
Use thread pool for executing a task if self.enable_thread_pool is True.
Return an instance of future when flag is_async is True otherwise will to
block waiting for the result until timeout then returns the result.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.enable_thread_pool and hasattr(self, 'thread_pool'):
future = self.thread_pool.submit(func, *args, **kwargs)
is_async = kwargs.get('is_async')
if is_async is None or not is_async:
timeout = kwargs.get('timeout')
if timeout is None:
timeout = 2
try:
result = future.result(timeout=timeout)
except TimeoutError as e:
self.logger.exception(e)
result = None
return result
return future
else:
return func(*args, **kwargs)
return wrapper |
def change_same_starting_points(flaglist):
"""Gets points at which changes begin"""
change_points = []
same_points = []
in_change = False
if flaglist and not flaglist[0]:
same_points.append(0)
for x, flag in enumerate(flaglist):
if flag and not in_change:
change_points.append(x)
in_change = True
elif not flag and in_change:
same_points.append(x)
in_change = False
return (change_points, same_points) | Gets points at which changes begin | Below is the the instruction that describes the task:
### Input:
Gets points at which changes begin
### Response:
def change_same_starting_points(flaglist):
"""Gets points at which changes begin"""
change_points = []
same_points = []
in_change = False
if flaglist and not flaglist[0]:
same_points.append(0)
for x, flag in enumerate(flaglist):
if flag and not in_change:
change_points.append(x)
in_change = True
elif not flag and in_change:
same_points.append(x)
in_change = False
return (change_points, same_points) |
def _start_handler_thread(self):
"""Called once to start the event handler thread."""
# Create handler thread
t = Thread(target=self._start_loop)
t.daemon = True
# Start handler thread
t.start()
self.thread_started = True | Called once to start the event handler thread. | Below is the the instruction that describes the task:
### Input:
Called once to start the event handler thread.
### Response:
def _start_handler_thread(self):
"""Called once to start the event handler thread."""
# Create handler thread
t = Thread(target=self._start_loop)
t.daemon = True
# Start handler thread
t.start()
self.thread_started = True |
def netlog(message,
source=None,
host='localhost',
port=514,
priority=syslog.LOG_DEBUG,
facility=syslog.LOG_USER):
"""
Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
:type source: str
:param source: The source address.
:type host: str
:param host: The IP address or hostname of the receiving server.
:type port: str
:param port: The TCP port number of the receiving server.
:type priority: int
:param priority: The message priority.
:type facility: int
:param facility: The message facility.
"""
if not source:
source = '%s[%s]' + (sys.argv[0], os.getpid())
data = '<%d>%s: %s' % (priority + facility, source, message)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(data, (host, port))
sock.close() | Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
:type source: str
:param source: The source address.
:type host: str
:param host: The IP address or hostname of the receiving server.
:type port: str
:param port: The TCP port number of the receiving server.
:type priority: int
:param priority: The message priority.
:type facility: int
:param facility: The message facility. | Below is the the instruction that describes the task:
### Input:
Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
:type source: str
:param source: The source address.
:type host: str
:param host: The IP address or hostname of the receiving server.
:type port: str
:param port: The TCP port number of the receiving server.
:type priority: int
:param priority: The message priority.
:type facility: int
:param facility: The message facility.
### Response:
def netlog(message,
source=None,
host='localhost',
port=514,
priority=syslog.LOG_DEBUG,
facility=syslog.LOG_USER):
"""
Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
:type source: str
:param source: The source address.
:type host: str
:param host: The IP address or hostname of the receiving server.
:type port: str
:param port: The TCP port number of the receiving server.
:type priority: int
:param priority: The message priority.
:type facility: int
:param facility: The message facility.
"""
if not source:
source = '%s[%s]' + (sys.argv[0], os.getpid())
data = '<%d>%s: %s' % (priority + facility, source, message)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(data, (host, port))
sock.close() |
def pin(self, disable_notification: bool = None) -> "Message":
"""Bound method *pin* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.pin_chat_message(
chat_id=message.chat.id,
message_id=message_id
)
Example:
.. code-block:: python
message.pin()
Args:
disable_notification (``bool``):
Pass True, if it is not necessary to send a notification to all chat members about the new pinned
message. Notifications are always disabled in channels.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>`
"""
return self._client.pin_chat_message(
chat_id=self.chat.id,
message_id=self.message_id,
disable_notification=disable_notification
) | Bound method *pin* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.pin_chat_message(
chat_id=message.chat.id,
message_id=message_id
)
Example:
.. code-block:: python
message.pin()
Args:
disable_notification (``bool``):
Pass True, if it is not necessary to send a notification to all chat members about the new pinned
message. Notifications are always disabled in channels.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` | Below is the the instruction that describes the task:
### Input:
Bound method *pin* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.pin_chat_message(
chat_id=message.chat.id,
message_id=message_id
)
Example:
.. code-block:: python
message.pin()
Args:
disable_notification (``bool``):
Pass True, if it is not necessary to send a notification to all chat members about the new pinned
message. Notifications are always disabled in channels.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>`
### Response:
def pin(self, disable_notification: bool = None) -> "Message":
"""Bound method *pin* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.pin_chat_message(
chat_id=message.chat.id,
message_id=message_id
)
Example:
.. code-block:: python
message.pin()
Args:
disable_notification (``bool``):
Pass True, if it is not necessary to send a notification to all chat members about the new pinned
message. Notifications are always disabled in channels.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>`
"""
return self._client.pin_chat_message(
chat_id=self.chat.id,
message_id=self.message_id,
disable_notification=disable_notification
) |
def get_scaled_font(self):
"""Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object.
"""
return ScaledFont._from_pointer(
cairo.cairo_get_scaled_font(self._pointer), incref=True) | Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object. | Below is the the instruction that describes the task:
### Input:
Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object.
### Response:
def get_scaled_font(self):
"""Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object.
"""
return ScaledFont._from_pointer(
cairo.cairo_get_scaled_font(self._pointer), incref=True) |
def get_strike(self):
"""
Compute strike of each surface element and return area-weighted average
value (in range ``[0, 360]``) using formula from:
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
Note that the original formula has been adapted to compute a weighted
rather than arithmetic mean.
"""
areas = self._get_areas()
strikes = numpy.array([surf.get_strike() for surf in self.surfaces])
v1 = (numpy.sum(areas * numpy.sin(numpy.radians(strikes))) /
numpy.sum(areas))
v2 = (numpy.sum(areas * numpy.cos(numpy.radians(strikes))) /
numpy.sum(areas))
return numpy.degrees(numpy.arctan2(v1, v2)) % 360 | Compute strike of each surface element and return area-weighted average
value (in range ``[0, 360]``) using formula from:
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
Note that the original formula has been adapted to compute a weighted
rather than arithmetic mean. | Below is the the instruction that describes the task:
### Input:
Compute strike of each surface element and return area-weighted average
value (in range ``[0, 360]``) using formula from:
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
Note that the original formula has been adapted to compute a weighted
rather than arithmetic mean.
### Response:
def get_strike(self):
"""
Compute strike of each surface element and return area-weighted average
value (in range ``[0, 360]``) using formula from:
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
Note that the original formula has been adapted to compute a weighted
rather than arithmetic mean.
"""
areas = self._get_areas()
strikes = numpy.array([surf.get_strike() for surf in self.surfaces])
v1 = (numpy.sum(areas * numpy.sin(numpy.radians(strikes))) /
numpy.sum(areas))
v2 = (numpy.sum(areas * numpy.cos(numpy.radians(strikes))) /
numpy.sum(areas))
return numpy.degrees(numpy.arctan2(v1, v2)) % 360 |
def discover(service, timeout=5, retries=5):
'''
Discovers services on a network using the SSDP Protocol.
'''
group = ('239.255.255.250', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}',
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
data = message.format(*group, st=service)
sock.sendto(data.encode('utf-8'), group)
while True:
try:
response = SSDPResponse(sock.recv(1024))
responses[response.location] = response
except socket.timeout:
break
if responses:
break
return responses.values() | Discovers services on a network using the SSDP Protocol. | Below is the the instruction that describes the task:
### Input:
Discovers services on a network using the SSDP Protocol.
### Response:
def discover(service, timeout=5, retries=5):
'''
Discovers services on a network using the SSDP Protocol.
'''
group = ('239.255.255.250', 1900)
message = '\r\n'.join([
'M-SEARCH * HTTP/1.1',
'HOST: {0}:{1}',
'MAN: "ssdp:discover"',
'ST: {st}', 'MX: 3', '', ''])
socket.setdefaulttimeout(timeout)
responses = {}
for _ in range(retries):
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
data = message.format(*group, st=service)
sock.sendto(data.encode('utf-8'), group)
while True:
try:
response = SSDPResponse(sock.recv(1024))
responses[response.location] = response
except socket.timeout:
break
if responses:
break
return responses.values() |
def _param(self):
"""
Get/Set a parameter.
"""
class Parameters(object):
def __getitem__(_self, name):
return self.getParameter(name)
def __setitem__(_self, name, value):
if isinstance(value, (float, int, basestring)):
self.getParameter(name).set(value)
else:
self.getParameter(name).setValues(value)
def __iter__(_self):
return self.getParameters()
return Parameters() | Get/Set a parameter. | Below is the the instruction that describes the task:
### Input:
Get/Set a parameter.
### Response:
def _param(self):
"""
Get/Set a parameter.
"""
class Parameters(object):
def __getitem__(_self, name):
return self.getParameter(name)
def __setitem__(_self, name, value):
if isinstance(value, (float, int, basestring)):
self.getParameter(name).set(value)
else:
self.getParameter(name).setValues(value)
def __iter__(_self):
return self.getParameters()
return Parameters() |
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs) | Called if no explicit visitor function exists for a node. | Below is the the instruction that describes the task:
### Input:
Called if no explicit visitor function exists for a node.
### Response:
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs) |
def get_parent_ids(self):
"""Gets the parents of this node.
return: (osid.id.IdList) - the parents of this node
*compliance: mandatory -- This method must be implemented.*
"""
id_list = []
from ..id.objects import IdList
for parent_node in self._my_map['parentNodes']:
id_list.append(str(parent_node.ident))
return IdList(id_list) | Gets the parents of this node.
return: (osid.id.IdList) - the parents of this node
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the parents of this node.
return: (osid.id.IdList) - the parents of this node
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_parent_ids(self):
"""Gets the parents of this node.
return: (osid.id.IdList) - the parents of this node
*compliance: mandatory -- This method must be implemented.*
"""
id_list = []
from ..id.objects import IdList
for parent_node in self._my_map['parentNodes']:
id_list.append(str(parent_node.ident))
return IdList(id_list) |
def from_euler(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians'''
cp = cos(pitch)
sp = sin(pitch)
sr = sin(roll)
cr = cos(roll)
sy = sin(yaw)
cy = cos(yaw)
self.a.x = cp * cy
self.a.y = (sr * sp * cy) - (cr * sy)
self.a.z = (cr * sp * cy) + (sr * sy)
self.b.x = cp * sy
self.b.y = (sr * sp * sy) + (cr * cy)
self.b.z = (cr * sp * sy) - (sr * cy)
self.c.x = -sp
self.c.y = sr * cp
self.c.z = cr * cp | fill the matrix from Euler angles in radians | Below is the the instruction that describes the task:
### Input:
fill the matrix from Euler angles in radians
### Response:
def from_euler(self, roll, pitch, yaw):
'''fill the matrix from Euler angles in radians'''
cp = cos(pitch)
sp = sin(pitch)
sr = sin(roll)
cr = cos(roll)
sy = sin(yaw)
cy = cos(yaw)
self.a.x = cp * cy
self.a.y = (sr * sp * cy) - (cr * sy)
self.a.z = (cr * sp * cy) + (sr * sy)
self.b.x = cp * sy
self.b.y = (sr * sp * sy) + (cr * cy)
self.b.z = (cr * sp * sy) - (sr * cy)
self.c.x = -sp
self.c.y = sr * cp
self.c.z = cr * cp |
def _get_flux_bounds(self, r_id, model, flux_limits, equation):
"""Read reaction's limits to set up strings for limits in the output file.
"""
if r_id not in flux_limits or flux_limits[r_id][0] is None:
if equation.direction == Direction.Forward:
lower = 0
else:
lower = -model.default_flux_limit
else:
lower = flux_limits[r_id][0]
if r_id not in flux_limits or flux_limits[r_id][1] is None:
if equation.direction == Direction.Reverse:
upper = 0
else:
upper = model.default_flux_limit
else:
upper = flux_limits[r_id][1]
if lower % 1 == 0:
lower = int(lower)
if upper % 1 == 0:
upper = int(upper)
return text_type(lower), text_type(upper) | Read reaction's limits to set up strings for limits in the output file. | Below is the the instruction that describes the task:
### Input:
Read reaction's limits to set up strings for limits in the output file.
### Response:
def _get_flux_bounds(self, r_id, model, flux_limits, equation):
"""Read reaction's limits to set up strings for limits in the output file.
"""
if r_id not in flux_limits or flux_limits[r_id][0] is None:
if equation.direction == Direction.Forward:
lower = 0
else:
lower = -model.default_flux_limit
else:
lower = flux_limits[r_id][0]
if r_id not in flux_limits or flux_limits[r_id][1] is None:
if equation.direction == Direction.Reverse:
upper = 0
else:
upper = model.default_flux_limit
else:
upper = flux_limits[r_id][1]
if lower % 1 == 0:
lower = int(lower)
if upper % 1 == 0:
upper = int(upper)
return text_type(lower), text_type(upper) |
def peek(self, n=1, raw=False):
"""Peek status queue
:param int n: number of messages to return as part of peek.
:param bool raw: should message content be returned as is (no parsing).
"""
def _peek_specific_q(_q, _n):
has_messages = False
for m in _q.service.peek_messages(_q.name, num_messages=_n):
if m is not None:
has_messages = True
result.append(m if raw else self._deserialize_message(m))
# short circut to prevent unneeded work
if len(result) == n:
return True
return has_messages
q_services = self._get_q_services()
random.shuffle(q_services)
per_q = int(n / len(q_services)) + 1
result = []
non_empty_qs = []
for q in q_services:
if _peek_specific_q(q, per_q):
non_empty_qs.append(q)
if len(result) == n:
return result
# in-case queues aren't balanced, and we didn't get enough messages, iterate again and this time get all that we can
for q in non_empty_qs:
_peek_specific_q(q, n)
if len(result) == n:
return result
# because we ask for n / len(qs) + 1, we might get more message then requests
return result | Peek status queue
:param int n: number of messages to return as part of peek.
:param bool raw: should message content be returned as is (no parsing). | Below is the the instruction that describes the task:
### Input:
Peek status queue
:param int n: number of messages to return as part of peek.
:param bool raw: should message content be returned as is (no parsing).
### Response:
def peek(self, n=1, raw=False):
"""Peek status queue
:param int n: number of messages to return as part of peek.
:param bool raw: should message content be returned as is (no parsing).
"""
def _peek_specific_q(_q, _n):
has_messages = False
for m in _q.service.peek_messages(_q.name, num_messages=_n):
if m is not None:
has_messages = True
result.append(m if raw else self._deserialize_message(m))
# short circut to prevent unneeded work
if len(result) == n:
return True
return has_messages
q_services = self._get_q_services()
random.shuffle(q_services)
per_q = int(n / len(q_services)) + 1
result = []
non_empty_qs = []
for q in q_services:
if _peek_specific_q(q, per_q):
non_empty_qs.append(q)
if len(result) == n:
return result
# in-case queues aren't balanced, and we didn't get enough messages, iterate again and this time get all that we can
for q in non_empty_qs:
_peek_specific_q(q, n)
if len(result) == n:
return result
# because we ask for n / len(qs) + 1, we might get more message then requests
return result |
def _getNumberOfRequiredVerificationsVocabulary(self):
"""Returns a DisplayList with the available options for the
multi-verification list: 'system default', '1', '2', '3', '4'
:returns: DisplayList with the available options for the
multi-verification list
"""
bsve = self.bika_setup.getNumberOfRequiredVerifications()
bsval = "%s (%s)" % (_("System default"), str(bsve))
items = [(-1, bsval), (1, '1'), (2, '2'), (3, '3'), (4, '4')]
return IntDisplayList(list(items)) | Returns a DisplayList with the available options for the
multi-verification list: 'system default', '1', '2', '3', '4'
:returns: DisplayList with the available options for the
multi-verification list | Below is the the instruction that describes the task:
### Input:
Returns a DisplayList with the available options for the
multi-verification list: 'system default', '1', '2', '3', '4'
:returns: DisplayList with the available options for the
multi-verification list
### Response:
def _getNumberOfRequiredVerificationsVocabulary(self):
"""Returns a DisplayList with the available options for the
multi-verification list: 'system default', '1', '2', '3', '4'
:returns: DisplayList with the available options for the
multi-verification list
"""
bsve = self.bika_setup.getNumberOfRequiredVerifications()
bsval = "%s (%s)" % (_("System default"), str(bsve))
items = [(-1, bsval), (1, '1'), (2, '2'), (3, '3'), (4, '4')]
return IntDisplayList(list(items)) |
def lookup_forward(name):
"""Perform a forward lookup of a hostname."""
ip_addresses = {}
addresses = list(set(str(ip[4][0]) for ip in socket.getaddrinfo(
name, None)))
if addresses is None:
return ip_addresses
for address in addresses:
if type(ipaddress.ip_address(address)) is ipaddress.IPv4Address:
ip_addresses['ipv4'] = address
if type(ipaddress.ip_address(address)) is ipaddress.IPv6Address:
ip_addresses['ipv6'] = address
return ip_addresses | Perform a forward lookup of a hostname. | Below is the the instruction that describes the task:
### Input:
Perform a forward lookup of a hostname.
### Response:
def lookup_forward(name):
"""Perform a forward lookup of a hostname."""
ip_addresses = {}
addresses = list(set(str(ip[4][0]) for ip in socket.getaddrinfo(
name, None)))
if addresses is None:
return ip_addresses
for address in addresses:
if type(ipaddress.ip_address(address)) is ipaddress.IPv4Address:
ip_addresses['ipv4'] = address
if type(ipaddress.ip_address(address)) is ipaddress.IPv6Address:
ip_addresses['ipv6'] = address
return ip_addresses |
def define_user_analysis_extent(self, extent, crs):
"""Slot called when user has defined a custom analysis extent.
.. versionadded: 2.2.0
:param extent: Extent of the user's preferred analysis area.
:type extent: QgsRectangle
:param crs: Coordinate reference system for user defined analysis
extent.
:type crs: QgsCoordinateReferenceSystem
"""
extent = QgsGeometry.fromRect(extent)
self.extent.set_user_extent(extent, crs)
self.validate_impact_function() | Slot called when user has defined a custom analysis extent.
.. versionadded: 2.2.0
:param extent: Extent of the user's preferred analysis area.
:type extent: QgsRectangle
:param crs: Coordinate reference system for user defined analysis
extent.
:type crs: QgsCoordinateReferenceSystem | Below is the the instruction that describes the task:
### Input:
Slot called when user has defined a custom analysis extent.
.. versionadded: 2.2.0
:param extent: Extent of the user's preferred analysis area.
:type extent: QgsRectangle
:param crs: Coordinate reference system for user defined analysis
extent.
:type crs: QgsCoordinateReferenceSystem
### Response:
def define_user_analysis_extent(self, extent, crs):
"""Slot called when user has defined a custom analysis extent.
.. versionadded: 2.2.0
:param extent: Extent of the user's preferred analysis area.
:type extent: QgsRectangle
:param crs: Coordinate reference system for user defined analysis
extent.
:type crs: QgsCoordinateReferenceSystem
"""
extent = QgsGeometry.fromRect(extent)
self.extent.set_user_extent(extent, crs)
self.validate_impact_function() |
def match_filtered_identities(self, fa, fb):
"""Determine if two filtered identities are the same.
This method compares the username and the source of each
identity to check if the given unique identities are the
same. Identities sources have to start with 'github' keyword
(uppercase or lowercase). When the given filtered identities
are the same object or share the same UUID, this will also
produce a positive match.
Identities which their usernames are in the blacklist will be
ignored and the result of the comparison will be false.
:param fa: filtered identity to match
:param fb: filtered identity to match
:returns: True when both filtered identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given filtered identities is not
an instance of EmailNameIdentity class.
"""
if not isinstance(fa, GitHubUsernameIdentity):
raise ValueError("<fa> is not an instance of GitHubUsernameIdentity")
if not isinstance(fb, GitHubUsernameIdentity):
raise ValueError("<fb> is not an instance of GitHubUsernameIdentity")
if fa.uuid and fb.uuid and fa.uuid == fb.uuid:
return True
if self._check_blacklist(fa):
return False
# Compare username
return fa.username and (fa.username == fb.username) | Determine if two filtered identities are the same.
This method compares the username and the source of each
identity to check if the given unique identities are the
same. Identities sources have to start with 'github' keyword
(uppercase or lowercase). When the given filtered identities
are the same object or share the same UUID, this will also
produce a positive match.
Identities which their usernames are in the blacklist will be
ignored and the result of the comparison will be false.
:param fa: filtered identity to match
:param fb: filtered identity to match
:returns: True when both filtered identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given filtered identities is not
an instance of EmailNameIdentity class. | Below is the the instruction that describes the task:
### Input:
Determine if two filtered identities are the same.
This method compares the username and the source of each
identity to check if the given unique identities are the
same. Identities sources have to start with 'github' keyword
(uppercase or lowercase). When the given filtered identities
are the same object or share the same UUID, this will also
produce a positive match.
Identities which their usernames are in the blacklist will be
ignored and the result of the comparison will be false.
:param fa: filtered identity to match
:param fb: filtered identity to match
:returns: True when both filtered identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given filtered identities is not
an instance of EmailNameIdentity class.
### Response:
def match_filtered_identities(self, fa, fb):
"""Determine if two filtered identities are the same.
This method compares the username and the source of each
identity to check if the given unique identities are the
same. Identities sources have to start with 'github' keyword
(uppercase or lowercase). When the given filtered identities
are the same object or share the same UUID, this will also
produce a positive match.
Identities which their usernames are in the blacklist will be
ignored and the result of the comparison will be false.
:param fa: filtered identity to match
:param fb: filtered identity to match
:returns: True when both filtered identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given filtered identities is not
an instance of EmailNameIdentity class.
"""
if not isinstance(fa, GitHubUsernameIdentity):
raise ValueError("<fa> is not an instance of GitHubUsernameIdentity")
if not isinstance(fb, GitHubUsernameIdentity):
raise ValueError("<fb> is not an instance of GitHubUsernameIdentity")
if fa.uuid and fb.uuid and fa.uuid == fb.uuid:
return True
if self._check_blacklist(fa):
return False
# Compare username
return fa.username and (fa.username == fb.username) |
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
def retry_bad_nonce(f):
f.trap(ServerError)
# The current RFC draft defines the namespace as
# urn:ietf:params:acme:error:<code>, but earlier drafts (and some
# current implementations) use urn:acme:error:<code> instead. We
# don't really care about the namespace here, just the error code.
if f.value.message.typ.split(':')[-1] == 'badNonce':
# If one nonce is bad, others likely are too. Let's clear them
# and re-add the one we just got.
self._nonces.clear()
self._add_nonce(f.value.response)
return self._post(url, obj, content_type, **kwargs)
return f
return (
self._post(url, obj, content_type, **kwargs)
.addErrback(retry_bad_nonce)) | POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors. | Below is the the instruction that describes the task:
### Input:
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
### Response:
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
"""
def retry_bad_nonce(f):
f.trap(ServerError)
# The current RFC draft defines the namespace as
# urn:ietf:params:acme:error:<code>, but earlier drafts (and some
# current implementations) use urn:acme:error:<code> instead. We
# don't really care about the namespace here, just the error code.
if f.value.message.typ.split(':')[-1] == 'badNonce':
# If one nonce is bad, others likely are too. Let's clear them
# and re-add the one we just got.
self._nonces.clear()
self._add_nonce(f.value.response)
return self._post(url, obj, content_type, **kwargs)
return f
return (
self._post(url, obj, content_type, **kwargs)
.addErrback(retry_bad_nonce)) |
def synoname(
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
ret_name=False,
):
"""Return the Synoname similarity type of two words.
This is a wrapper for :py:meth:`Synoname.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx' match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test names
to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> synoname(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> synoname(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx'
"""
return Synoname().dist_abs(
src, tar, word_approx_min, char_approx_min, tests, ret_name
) | Return the Synoname similarity type of two words.
This is a wrapper for :py:meth:`Synoname.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx' match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test names
to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> synoname(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> synoname(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx' | Below is the the instruction that describes the task:
### Input:
Return the Synoname similarity type of two words.
This is a wrapper for :py:meth:`Synoname.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx' match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test names
to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> synoname(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> synoname(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx'
### Response:
def synoname(
src,
tar,
word_approx_min=0.3,
char_approx_min=0.73,
tests=2 ** 12 - 1,
ret_name=False,
):
"""Return the Synoname similarity type of two words.
This is a wrapper for :py:meth:`Synoname.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
word_approx_min : float
The minimum word approximation value to signal a 'word_approx' match
char_approx_min : float
The minimum character approximation value to signal a 'char_approx'
match
tests : int or Iterable
Either an integer indicating tests to perform or a list of test names
to perform (defaults to performing all tests)
ret_name : bool
If True, returns the match name rather than its integer equivalent
Returns
-------
int (or str if ret_name is True)
Synoname value
Examples
--------
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''))
2
>>> synoname(('Breghel', 'Pieter', ''), ('Brueghel', 'Pieter', ''),
... ret_name=True)
'omission'
>>> synoname(('Dore', 'Gustave', ''),
... ('Dore', 'Paul Gustave Louis Christophe', ''), ret_name=True)
'inclusion'
>>> synoname(('Pereira', 'I. R.', ''), ('Pereira', 'I. Smith', ''),
... ret_name=True)
'word_approx'
"""
return Synoname().dist_abs(
src, tar, word_approx_min, char_approx_min, tests, ret_name
) |
def spkssb(targ, et, ref):
"""
Return the state (position and velocity) of a target body
relative to the solar system barycenter.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkssb_c.html
:param targ: Target body.
:type targ: int
:param et: Target epoch.
:type et: float
:param ref: Target reference frame.
:type ref: str
:return: State of target.
:rtype: 6-Element Array of floats
"""
targ = ctypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.stringToCharP(ref)
starg = stypes.emptyDoubleVector(6)
libspice.spkssb_c(targ, et, ref, starg)
return stypes.cVectorToPython(starg) | Return the state (position and velocity) of a target body
relative to the solar system barycenter.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkssb_c.html
:param targ: Target body.
:type targ: int
:param et: Target epoch.
:type et: float
:param ref: Target reference frame.
:type ref: str
:return: State of target.
:rtype: 6-Element Array of floats | Below is the the instruction that describes the task:
### Input:
Return the state (position and velocity) of a target body
relative to the solar system barycenter.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkssb_c.html
:param targ: Target body.
:type targ: int
:param et: Target epoch.
:type et: float
:param ref: Target reference frame.
:type ref: str
:return: State of target.
:rtype: 6-Element Array of floats
### Response:
def spkssb(targ, et, ref):
"""
Return the state (position and velocity) of a target body
relative to the solar system barycenter.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkssb_c.html
:param targ: Target body.
:type targ: int
:param et: Target epoch.
:type et: float
:param ref: Target reference frame.
:type ref: str
:return: State of target.
:rtype: 6-Element Array of floats
"""
targ = ctypes.c_int(targ)
et = ctypes.c_double(et)
ref = stypes.stringToCharP(ref)
starg = stypes.emptyDoubleVector(6)
libspice.spkssb_c(targ, et, ref, starg)
return stypes.cVectorToPython(starg) |
def set_authoring_nodes(self, editor):
"""
Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool
"""
project_node = self.default_project_node
file_node = self.register_file(editor.file, project_node)
editor_node = self.register_editor(editor, file_node)
return True | Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool
### Response:
def set_authoring_nodes(self, editor):
"""
Sets the Model authoring Nodes using given editor.
:param editor: Editor to set.
:type editor: Editor
:return: Method success.
:rtype: bool
"""
project_node = self.default_project_node
file_node = self.register_file(editor.file, project_node)
editor_node = self.register_editor(editor, file_node)
return True |
def clip_by_extent(layer, extent):
"""Clip a raster using a bounding box using processing.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to clip.
:type layer: QgsRasterLayer
:param extent: The extent.
:type extent: QgsRectangle
:return: Clipped layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"""
parameters = dict()
# noinspection PyBroadException
try:
output_layer_name = quick_clip_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
output_raster = unique_filename(suffix='.tif', dir=temp_dir())
# We make one pixel size buffer on the extent to cover every pixels.
# See https://github.com/inasafe/inasafe/issues/3655
pixel_size_x = layer.rasterUnitsPerPixelX()
pixel_size_y = layer.rasterUnitsPerPixelY()
buffer_size = max(pixel_size_x, pixel_size_y)
extent = extent.buffered(buffer_size)
if is_raster_y_inverted(layer):
# The raster is Y inverted. We need to switch Y min and Y max.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMaximum()),
str(extent.yMinimum())
]
else:
# The raster is normal.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMinimum()),
str(extent.yMaximum())
]
# These values are all from the processing algorithm.
# https://github.com/qgis/QGIS/blob/master/python/plugins/processing/
# algs/gdal/ClipByExtent.py
# Please read the file to know these parameters.
parameters['INPUT'] = layer.source()
parameters['NO_DATA'] = ''
parameters['PROJWIN'] = ','.join(bbox)
parameters['DATA_TYPE'] = 5
parameters['COMPRESS'] = 4
parameters['JPEGCOMPRESSION'] = 75
parameters['ZLEVEL'] = 6
parameters['PREDICTOR'] = 1
parameters['TILED'] = False
parameters['BIGTIFF'] = 0
parameters['TFW'] = False
parameters['EXTRA'] = ''
parameters['OUTPUT'] = output_raster
initialize_processing()
feedback = create_processing_feedback()
context = create_processing_context(feedback=feedback)
result = processing.run(
"gdal:cliprasterbyextent",
parameters,
context=context)
if result is None:
raise ProcessingInstallationError
clipped = QgsRasterLayer(result['OUTPUT'], output_layer_name)
# We transfer keywords to the output.
clipped.keywords = layer.keywords.copy()
clipped.keywords['title'] = output_layer_name
check_layer(clipped)
except Exception as e:
# This step clip_raster_by_extent was nice to speedup the analysis.
# As we got an exception because the layer is invalid, we are not going
# to stop the analysis. We will return the original raster layer.
# It will take more processing time until we clip the vector layer.
# Check https://github.com/inasafe/inasafe/issues/4026 why we got some
# exceptions with this step.
LOGGER.exception(parameters)
LOGGER.exception(
'Error from QGIS clip raster by extent. Please check the QGIS '
'logs too !')
LOGGER.info(
'Even if we got an exception, we are continuing the analysis. The '
'layer was not clipped.')
LOGGER.exception(str(e))
LOGGER.exception(get_error_message(e).to_text())
clipped = layer
return clipped | Clip a raster using a bounding box using processing.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to clip.
:type layer: QgsRasterLayer
:param extent: The extent.
:type extent: QgsRectangle
:return: Clipped layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Clip a raster using a bounding box using processing.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to clip.
:type layer: QgsRasterLayer
:param extent: The extent.
:type extent: QgsRectangle
:return: Clipped layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
### Response:
def clip_by_extent(layer, extent):
"""Clip a raster using a bounding box using processing.
Issue https://github.com/inasafe/inasafe/issues/3183
:param layer: The layer to clip.
:type layer: QgsRasterLayer
:param extent: The extent.
:type extent: QgsRectangle
:return: Clipped layer.
:rtype: QgsRasterLayer
.. versionadded:: 4.0
"""
parameters = dict()
# noinspection PyBroadException
try:
output_layer_name = quick_clip_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
output_raster = unique_filename(suffix='.tif', dir=temp_dir())
# We make one pixel size buffer on the extent to cover every pixels.
# See https://github.com/inasafe/inasafe/issues/3655
pixel_size_x = layer.rasterUnitsPerPixelX()
pixel_size_y = layer.rasterUnitsPerPixelY()
buffer_size = max(pixel_size_x, pixel_size_y)
extent = extent.buffered(buffer_size)
if is_raster_y_inverted(layer):
# The raster is Y inverted. We need to switch Y min and Y max.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMaximum()),
str(extent.yMinimum())
]
else:
# The raster is normal.
bbox = [
str(extent.xMinimum()),
str(extent.xMaximum()),
str(extent.yMinimum()),
str(extent.yMaximum())
]
# These values are all from the processing algorithm.
# https://github.com/qgis/QGIS/blob/master/python/plugins/processing/
# algs/gdal/ClipByExtent.py
# Please read the file to know these parameters.
parameters['INPUT'] = layer.source()
parameters['NO_DATA'] = ''
parameters['PROJWIN'] = ','.join(bbox)
parameters['DATA_TYPE'] = 5
parameters['COMPRESS'] = 4
parameters['JPEGCOMPRESSION'] = 75
parameters['ZLEVEL'] = 6
parameters['PREDICTOR'] = 1
parameters['TILED'] = False
parameters['BIGTIFF'] = 0
parameters['TFW'] = False
parameters['EXTRA'] = ''
parameters['OUTPUT'] = output_raster
initialize_processing()
feedback = create_processing_feedback()
context = create_processing_context(feedback=feedback)
result = processing.run(
"gdal:cliprasterbyextent",
parameters,
context=context)
if result is None:
raise ProcessingInstallationError
clipped = QgsRasterLayer(result['OUTPUT'], output_layer_name)
# We transfer keywords to the output.
clipped.keywords = layer.keywords.copy()
clipped.keywords['title'] = output_layer_name
check_layer(clipped)
except Exception as e:
# This step clip_raster_by_extent was nice to speedup the analysis.
# As we got an exception because the layer is invalid, we are not going
# to stop the analysis. We will return the original raster layer.
# It will take more processing time until we clip the vector layer.
# Check https://github.com/inasafe/inasafe/issues/4026 why we got some
# exceptions with this step.
LOGGER.exception(parameters)
LOGGER.exception(
'Error from QGIS clip raster by extent. Please check the QGIS '
'logs too !')
LOGGER.info(
'Even if we got an exception, we are continuing the analysis. The '
'layer was not clipped.')
LOGGER.exception(str(e))
LOGGER.exception(get_error_message(e).to_text())
clipped = layer
return clipped |
async def async_get_state(self, field: str) -> dict:
"""Get state of object in deCONZ.
Field is a string representing an API endpoint or lower
e.g. field='/lights'.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
session = self.session.get
url = self.api_url + field
response_dict = await async_request(session, url)
return response_dict | Get state of object in deCONZ.
Field is a string representing an API endpoint or lower
e.g. field='/lights'.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/ | Below is the the instruction that describes the task:
### Input:
Get state of object in deCONZ.
Field is a string representing an API endpoint or lower
e.g. field='/lights'.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
### Response:
async def async_get_state(self, field: str) -> dict:
"""Get state of object in deCONZ.
Field is a string representing an API endpoint or lower
e.g. field='/lights'.
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
session = self.session.get
url = self.api_url + field
response_dict = await async_request(session, url)
return response_dict |
def variance(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.Variance(arg, how, where).to_expr()
expr = expr.name('var')
return expr | Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar | Below is the the instruction that describes the task:
### Input:
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
### Response:
def variance(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.Variance(arg, how, where).to_expr()
expr = expr.name('var')
return expr |
def delete(community):
"""Delete a community."""
deleteform = DeleteCommunityForm(formdata=request.values)
ctx = mycommunities_ctx()
ctx.update({
'deleteform': deleteform,
'is_new': False,
'community': community,
})
if deleteform.validate_on_submit():
community.delete()
db.session.commit()
flash("Community was deleted.", category='success')
return redirect(url_for('.index'))
else:
flash("Community could not be deleted.", category='warning')
return redirect(url_for('.edit', community_id=community.id)) | Delete a community. | Below is the the instruction that describes the task:
### Input:
Delete a community.
### Response:
def delete(community):
"""Delete a community."""
deleteform = DeleteCommunityForm(formdata=request.values)
ctx = mycommunities_ctx()
ctx.update({
'deleteform': deleteform,
'is_new': False,
'community': community,
})
if deleteform.validate_on_submit():
community.delete()
db.session.commit()
flash("Community was deleted.", category='success')
return redirect(url_for('.index'))
else:
flash("Community could not be deleted.", category='warning')
return redirect(url_for('.edit', community_id=community.id)) |
def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel` | Below is the the instruction that describes the task:
### Input:
Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
### Response:
def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) |
def create_cluster_meta(cluster_groups):
"""Return a ClusterMeta instance with cluster group support."""
meta = ClusterMeta()
meta.add_field('group')
cluster_groups = cluster_groups or {}
data = {c: {'group': v} for c, v in cluster_groups.items()}
meta.from_dict(data)
return meta | Return a ClusterMeta instance with cluster group support. | Below is the the instruction that describes the task:
### Input:
Return a ClusterMeta instance with cluster group support.
### Response:
def create_cluster_meta(cluster_groups):
"""Return a ClusterMeta instance with cluster group support."""
meta = ClusterMeta()
meta.add_field('group')
cluster_groups = cluster_groups or {}
data = {c: {'group': v} for c, v in cluster_groups.items()}
meta.from_dict(data)
return meta |
def _xls2attributes(self, worksheet, tab):
"""Updates attributes in code_array"""
# Merged cells
for top, bottom, left, right in worksheet.merged_cells:
attrs = {"merge_area": (top, left, bottom - 1, right - 1)}
selection = Selection([(top, left)], [(bottom - 1, right - 1)],
[], [], [])
self.code_array.cell_attributes.append((selection, tab, attrs))
# Which cell comprise which format ids
xf2cell = dict((xfid, []) for xfid in xrange(self.workbook.xfcount))
rows, cols = worksheet.nrows, worksheet.ncols
for row, col in product(xrange(rows), xrange(cols)):
xfid = worksheet.cell_xf_index(row, col)
xf2cell[xfid].append((row, col))
for xfid, xf in enumerate(self.workbook.xf_list):
selection = Selection([], [], [], [], xf2cell[xfid])
selection_above = selection.shifted(-1, 0)
selection_left = selection.shifted(0, -1)
attributes = {}
# Alignment
xfalign2justification = {
0: "left",
1: "left",
2: "center",
3: "right",
4: "left",
5: "left",
6: "center",
7: "left",
}
xfalign2vertical_align = {
0: "top",
1: "middle",
2: "bottom",
3: "middle",
4: "middle",
}
def xfrotation2angle(xfrotation):
"""Returns angle from xlrotatation"""
# angle is counterclockwise
if 0 <= xfrotation <= 90:
return xfrotation
elif 90 < xfrotation <= 180:
return - (xfrotation - 90)
return 0
try:
attributes["justification"] = \
xfalign2justification[xf.alignment.hor_align]
attributes["vertical_align"] = \
xfalign2vertical_align[xf.alignment.vert_align]
attributes["angle"] = \
xfrotation2angle(xf.alignment.rotation)
except AttributeError:
pass
# Background
if xf.background.fill_pattern == 1:
color_idx = xf.background.pattern_colour_index
color = self.idx2colour(color_idx)
attributes["bgcolor"] = color.GetRGB()
# Border
__border_line_style2width = {
0: 1,
1: 1,
2: 4,
5: 7,
}
def constant_factory(value):
return repeat(value).next
border_line_style2width = defaultdict(constant_factory(1))
border_line_style2width.update(__border_line_style2width)
bottom_color_idx = xf.border.bottom_colour_index
if bottom_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[bottom_color_idx] is not None:
bottom_color = self.idx2colour(bottom_color_idx)
attributes["bordercolor_bottom"] = bottom_color.GetRGB()
right_color_idx = xf.border.right_colour_index
if right_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[right_color_idx] is not None:
right_color = self.idx2colour(right_color_idx)
attributes["bordercolor_right"] = right_color.GetRGB()
bottom_width = border_line_style2width[xf.border.bottom_line_style]
attributes["borderwidth_bottom"] = bottom_width
right_width = border_line_style2width[xf.border.right_line_style]
attributes["borderwidth_right"] = right_width
# Font
font = self.workbook.font_list[xf.font_index]
attributes["textfont"] = font.name
attributes["pointsize"] = font.height / 20.0
fontweight = wx.BOLD if font.weight == 700 else wx.NORMAL
attributes["fontweight"] = fontweight
if font.italic:
attributes["fontstyle"] = wx.ITALIC
if font.colour_index in self.workbook.colour_map and \
self.workbook.colour_map[font.colour_index] is not None:
attributes["textcolor"] = \
self.idx2colour(font.colour_index).GetRGB()
if font.underline_type:
attributes["underline"] = True
if font.struck_out:
attributes["strikethrough"] = True
# Handle top cells' top borders
attributes_above = {}
top_width = border_line_style2width[xf.border.top_line_style]
if top_width != 1:
attributes_above["borderwidth_bottom"] = top_width
top_color_idx = xf.border.top_colour_index
if top_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[top_color_idx] is not None:
top_color = self.idx2colour(top_color_idx)
attributes_above["bordercolor_bottom"] = top_color.GetRGB()
# Handle leftmost cells' left borders
attributes_left = {}
left_width = border_line_style2width[xf.border.left_line_style]
if left_width != 1:
attributes_left["borderwidth_right"] = left_width
left_color_idx = xf.border.left_colour_index
if left_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[left_color_idx] is not None:
left_color = self.idx2colour(left_color_idx)
attributes_above["bordercolor_right"] = left_color.GetRGB()
if attributes_above:
self._cell_attribute_append(selection_above, tab,
attributes_above)
if attributes_left:
self._cell_attribute_append(selection_left, tab,
attributes_left)
if attributes:
self._cell_attribute_append(selection, tab, attributes) | Updates attributes in code_array | Below is the the instruction that describes the task:
### Input:
Updates attributes in code_array
### Response:
def _xls2attributes(self, worksheet, tab):
"""Updates attributes in code_array"""
# Merged cells
for top, bottom, left, right in worksheet.merged_cells:
attrs = {"merge_area": (top, left, bottom - 1, right - 1)}
selection = Selection([(top, left)], [(bottom - 1, right - 1)],
[], [], [])
self.code_array.cell_attributes.append((selection, tab, attrs))
# Which cell comprise which format ids
xf2cell = dict((xfid, []) for xfid in xrange(self.workbook.xfcount))
rows, cols = worksheet.nrows, worksheet.ncols
for row, col in product(xrange(rows), xrange(cols)):
xfid = worksheet.cell_xf_index(row, col)
xf2cell[xfid].append((row, col))
for xfid, xf in enumerate(self.workbook.xf_list):
selection = Selection([], [], [], [], xf2cell[xfid])
selection_above = selection.shifted(-1, 0)
selection_left = selection.shifted(0, -1)
attributes = {}
# Alignment
xfalign2justification = {
0: "left",
1: "left",
2: "center",
3: "right",
4: "left",
5: "left",
6: "center",
7: "left",
}
xfalign2vertical_align = {
0: "top",
1: "middle",
2: "bottom",
3: "middle",
4: "middle",
}
def xfrotation2angle(xfrotation):
"""Returns angle from xlrotatation"""
# angle is counterclockwise
if 0 <= xfrotation <= 90:
return xfrotation
elif 90 < xfrotation <= 180:
return - (xfrotation - 90)
return 0
try:
attributes["justification"] = \
xfalign2justification[xf.alignment.hor_align]
attributes["vertical_align"] = \
xfalign2vertical_align[xf.alignment.vert_align]
attributes["angle"] = \
xfrotation2angle(xf.alignment.rotation)
except AttributeError:
pass
# Background
if xf.background.fill_pattern == 1:
color_idx = xf.background.pattern_colour_index
color = self.idx2colour(color_idx)
attributes["bgcolor"] = color.GetRGB()
# Border
__border_line_style2width = {
0: 1,
1: 1,
2: 4,
5: 7,
}
def constant_factory(value):
return repeat(value).next
border_line_style2width = defaultdict(constant_factory(1))
border_line_style2width.update(__border_line_style2width)
bottom_color_idx = xf.border.bottom_colour_index
if bottom_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[bottom_color_idx] is not None:
bottom_color = self.idx2colour(bottom_color_idx)
attributes["bordercolor_bottom"] = bottom_color.GetRGB()
right_color_idx = xf.border.right_colour_index
if right_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[right_color_idx] is not None:
right_color = self.idx2colour(right_color_idx)
attributes["bordercolor_right"] = right_color.GetRGB()
bottom_width = border_line_style2width[xf.border.bottom_line_style]
attributes["borderwidth_bottom"] = bottom_width
right_width = border_line_style2width[xf.border.right_line_style]
attributes["borderwidth_right"] = right_width
# Font
font = self.workbook.font_list[xf.font_index]
attributes["textfont"] = font.name
attributes["pointsize"] = font.height / 20.0
fontweight = wx.BOLD if font.weight == 700 else wx.NORMAL
attributes["fontweight"] = fontweight
if font.italic:
attributes["fontstyle"] = wx.ITALIC
if font.colour_index in self.workbook.colour_map and \
self.workbook.colour_map[font.colour_index] is not None:
attributes["textcolor"] = \
self.idx2colour(font.colour_index).GetRGB()
if font.underline_type:
attributes["underline"] = True
if font.struck_out:
attributes["strikethrough"] = True
# Handle top cells' top borders
attributes_above = {}
top_width = border_line_style2width[xf.border.top_line_style]
if top_width != 1:
attributes_above["borderwidth_bottom"] = top_width
top_color_idx = xf.border.top_colour_index
if top_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[top_color_idx] is not None:
top_color = self.idx2colour(top_color_idx)
attributes_above["bordercolor_bottom"] = top_color.GetRGB()
# Handle leftmost cells' left borders
attributes_left = {}
left_width = border_line_style2width[xf.border.left_line_style]
if left_width != 1:
attributes_left["borderwidth_right"] = left_width
left_color_idx = xf.border.left_colour_index
if left_color_idx in self.workbook.colour_map and \
self.workbook.colour_map[left_color_idx] is not None:
left_color = self.idx2colour(left_color_idx)
attributes_above["bordercolor_right"] = left_color.GetRGB()
if attributes_above:
self._cell_attribute_append(selection_above, tab,
attributes_above)
if attributes_left:
self._cell_attribute_append(selection_left, tab,
attributes_left)
if attributes:
self._cell_attribute_append(selection, tab, attributes) |
def match(pattern, path):
# type: (str, str) -> bool
"""Compare a glob pattern with a path (case sensitive).
Arguments:
pattern (str): A glob pattern.
path (str): A path.
Returns:
bool: ``True`` if the path matches the pattern.
Example:
>>> from fs.glob import match
>>> match("**/*.py", "/fs/glob.py")
True
"""
try:
levels, recursive, re_pattern = _PATTERN_CACHE[(pattern, True)]
except KeyError:
levels, recursive, re_pattern = _translate_glob(pattern, case_sensitive=True)
_PATTERN_CACHE[(pattern, True)] = (levels, recursive, re_pattern)
return bool(re_pattern.match(path)) | Compare a glob pattern with a path (case sensitive).
Arguments:
pattern (str): A glob pattern.
path (str): A path.
Returns:
bool: ``True`` if the path matches the pattern.
Example:
>>> from fs.glob import match
>>> match("**/*.py", "/fs/glob.py")
True | Below is the the instruction that describes the task:
### Input:
Compare a glob pattern with a path (case sensitive).
Arguments:
pattern (str): A glob pattern.
path (str): A path.
Returns:
bool: ``True`` if the path matches the pattern.
Example:
>>> from fs.glob import match
>>> match("**/*.py", "/fs/glob.py")
True
### Response:
def match(pattern, path):
# type: (str, str) -> bool
"""Compare a glob pattern with a path (case sensitive).
Arguments:
pattern (str): A glob pattern.
path (str): A path.
Returns:
bool: ``True`` if the path matches the pattern.
Example:
>>> from fs.glob import match
>>> match("**/*.py", "/fs/glob.py")
True
"""
try:
levels, recursive, re_pattern = _PATTERN_CACHE[(pattern, True)]
except KeyError:
levels, recursive, re_pattern = _translate_glob(pattern, case_sensitive=True)
_PATTERN_CACHE[(pattern, True)] = (levels, recursive, re_pattern)
return bool(re_pattern.match(path)) |
def _flush_wait(flush_future, write_future):
"""Pause briefly allowing any pending metric writes to complete before
shutting down.
:param tornado.concurrent.Future flush_future: The future to resolve
when the shutdown is complete.
:param tornado.concurrent.Future write_future: The future that is for the
current batch write operation.
"""
if write_future.done():
if not _pending_measurements():
flush_future.set_result(True)
return
else:
write_future = _write_measurements()
ioloop.IOLoop.current().add_timeout(
ioloop.IOLoop.current().time() + 0.25,
_flush_wait, flush_future, write_future) | Pause briefly allowing any pending metric writes to complete before
shutting down.
:param tornado.concurrent.Future flush_future: The future to resolve
when the shutdown is complete.
:param tornado.concurrent.Future write_future: The future that is for the
current batch write operation. | Below is the the instruction that describes the task:
### Input:
Pause briefly allowing any pending metric writes to complete before
shutting down.
:param tornado.concurrent.Future flush_future: The future to resolve
when the shutdown is complete.
:param tornado.concurrent.Future write_future: The future that is for the
current batch write operation.
### Response:
def _flush_wait(flush_future, write_future):
"""Pause briefly allowing any pending metric writes to complete before
shutting down.
:param tornado.concurrent.Future flush_future: The future to resolve
when the shutdown is complete.
:param tornado.concurrent.Future write_future: The future that is for the
current batch write operation.
"""
if write_future.done():
if not _pending_measurements():
flush_future.set_result(True)
return
else:
write_future = _write_measurements()
ioloop.IOLoop.current().add_timeout(
ioloop.IOLoop.current().time() + 0.25,
_flush_wait, flush_future, write_future) |
def fast_inhomo_sweep_time_evolution(Ep, epsilonp, gamma,
omega_level, rm, xi, theta,
inhomogeneity,
semi_analytic=True,
file_name=None, return_code=False):
r"""Return a spectrum of time evolutions of the density matrix.
We test a basic two-level system.
>>> from fast.bloch import phase_transformation
>>> from fast import PlaneWave, electric_field_amplitude_top, Atom
>>> Ne = 2
>>> Nl = 1
>>> a0 = physical_constants["Bohr radius"][0]
>>> rm = [np.array([[0, 0], [a0, 0]]),
... np.array([[0, 0], [0, 0]]),
... np.array([[0, 0], [0, 0]])]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> omega_level = [0, 2.4e15]
>>> gamma21 = 2*np.pi*6e6
>>> gamma = np.array([[0, -gamma21], [gamma21, 0]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
>>> Ep, omega_laser = define_laser_variables(Nl)
>>> laser = PlaneWave(0, 0, 0, 0)
>>> epsilonp = [laser.epsilonp]
>>> k = [laser.k]
>>> detuning_knob = [symbols("delta1", real=True)]
A map to unfold the density matrix.
>>> unfolding = Unfolding(Ne, True, True, True)
>>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI")
>>> Eps = [Eps]
>>> t = np.linspace(0, 1e-6, 11)
>>> rho0 = np.array([[1, 0], [0, 0]])
>>> rho0 = unfolding(rho0)
We define the Doppler broadening.
>>> Nvz = 15
>>> shape = [Nvz]
>>> stds = [[-4, 4]]
>>> T = 273.15+20
>>> mass = Atom("Rb", 87).mass
>>> aux = (shape, stds, T, mass, detuning_knob, k,
... omega_level, xi, theta, unfolding, ["z", "x", "y"],
... True)
>>> doppler_effect = DopplerBroadening(*aux)
We get a function for the frequency sweep of time evolution.
>>> aux = (Ep, epsilonp, gamma,
... omega_level, rm, xi, theta,
... doppler_effect,
... True,
... "eqs")
>>> inhomo_time_evolution = fast_inhomo_sweep_time_evolution(*aux)
>>> amp = 1000e6*2*np.pi
>>> Ndelta = 101
>>> detuning_knobs = [[-amp, amp, Ndelta]]
>>> deltas, rhot = inhomo_time_evolution(t, rho0, Eps, detuning_knobs)
>>> print rhot.shape
(101, 11, 15, 3)
"""
# We unpack variables.
if True:
Nl = xi.shape[0]
# We determine which arguments are constants.
if True:
try:
Ep = np.array([complex(Ep[l]) for l in range(Nl)])
variable_Ep = False
except:
variable_Ep = True
try:
epsilonp = [np.array([complex(epsilonp[l][i]) for i in range(3)])
for l in range(Nl)]
variable_epsilonp = False
except:
variable_epsilonp = True
# We obtain code for the time evolution.
if True:
detuning_knob = symbols("delta1:"+str(Nl))
args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi, theta,
file_name, True)
args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi,
theta, inhomogeneity, True, file_name, True)
inhomo_time_evolution = fast_inhomo_time_evolution(*args)
code = inhomo_time_evolution+"\n\n"
# We establish the arguments of the output function.
if True:
code += "def inhomo_sweep_time_evolution(t, rho0, "
if variable_Ep: code += "Ep, "
if variable_epsilonp: code += "epsilonp, "
code += "detuning_knob, "
code += "inhomo_time_evolution=inhomo_time_evolution):\n"
code += ' r"""A fast frequency sweep of the steady state."""\n'
# Code to determine the sweep range.
if True:
code += """ sweepN = -1\n"""
code += """ for i, delta in enumerate(detuning_knob):\n"""
code += """ if hasattr(delta, "__getitem__"):\n"""
code += """ sweepN = i\n"""
code += """ delta0 = delta[0]\n"""
code += """ deltaf = delta[1]\n"""
code += """ Ndelta = delta[2]\n"""
code += """ break\n\n"""
code += """ if sweepN == -1:\n"""
code += """ s = 'One of the detuning knobs '\n"""
code += """ s += 'must be of the form '\n"""
code += """ s += '(start, stop, Nsteps)'\n"""
code += """ raise ValueError(s)\n\n"""
code += """ deltas = np.linspace(delta0, deltaf, Ndelta)\n\n"""
# We call time_evolution.
if True:
code += " args = [[t, rho0, "
if variable_Ep: code += "Ep, "
if variable_epsilonp: code += "epsilonp, "
code += """list(detuning_knob[:sweepN]) +\n"""
code += """ [deltas[i]] +\n"""
code += """ list(detuning_knob[sweepN+1:])]\n"""
code += """ for i in range(Ndelta)]\n\n"""
code += " rho = np.array([inhomo_time_evolution(*argsi)\n"
code += " for argsi in args])\n\n"
# We finish the code.
if True:
code += " return deltas, rho\n"
# We write the code to file if provided, and execute it.
if True:
if file_name is not None:
f = file(file_name+".py", "w")
f.write(code)
f.close()
inhomo_sweep_time_evolution = code
if not return_code:
exec inhomo_sweep_time_evolution
return inhomo_sweep_time_evolution | r"""Return a spectrum of time evolutions of the density matrix.
We test a basic two-level system.
>>> from fast.bloch import phase_transformation
>>> from fast import PlaneWave, electric_field_amplitude_top, Atom
>>> Ne = 2
>>> Nl = 1
>>> a0 = physical_constants["Bohr radius"][0]
>>> rm = [np.array([[0, 0], [a0, 0]]),
... np.array([[0, 0], [0, 0]]),
... np.array([[0, 0], [0, 0]])]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> omega_level = [0, 2.4e15]
>>> gamma21 = 2*np.pi*6e6
>>> gamma = np.array([[0, -gamma21], [gamma21, 0]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
>>> Ep, omega_laser = define_laser_variables(Nl)
>>> laser = PlaneWave(0, 0, 0, 0)
>>> epsilonp = [laser.epsilonp]
>>> k = [laser.k]
>>> detuning_knob = [symbols("delta1", real=True)]
A map to unfold the density matrix.
>>> unfolding = Unfolding(Ne, True, True, True)
>>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI")
>>> Eps = [Eps]
>>> t = np.linspace(0, 1e-6, 11)
>>> rho0 = np.array([[1, 0], [0, 0]])
>>> rho0 = unfolding(rho0)
We define the Doppler broadening.
>>> Nvz = 15
>>> shape = [Nvz]
>>> stds = [[-4, 4]]
>>> T = 273.15+20
>>> mass = Atom("Rb", 87).mass
>>> aux = (shape, stds, T, mass, detuning_knob, k,
... omega_level, xi, theta, unfolding, ["z", "x", "y"],
... True)
>>> doppler_effect = DopplerBroadening(*aux)
We get a function for the frequency sweep of time evolution.
>>> aux = (Ep, epsilonp, gamma,
... omega_level, rm, xi, theta,
... doppler_effect,
... True,
... "eqs")
>>> inhomo_time_evolution = fast_inhomo_sweep_time_evolution(*aux)
>>> amp = 1000e6*2*np.pi
>>> Ndelta = 101
>>> detuning_knobs = [[-amp, amp, Ndelta]]
>>> deltas, rhot = inhomo_time_evolution(t, rho0, Eps, detuning_knobs)
>>> print rhot.shape
(101, 11, 15, 3) | Below is the the instruction that describes the task:
### Input:
r"""Return a spectrum of time evolutions of the density matrix.
We test a basic two-level system.
>>> from fast.bloch import phase_transformation
>>> from fast import PlaneWave, electric_field_amplitude_top, Atom
>>> Ne = 2
>>> Nl = 1
>>> a0 = physical_constants["Bohr radius"][0]
>>> rm = [np.array([[0, 0], [a0, 0]]),
... np.array([[0, 0], [0, 0]]),
... np.array([[0, 0], [0, 0]])]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> omega_level = [0, 2.4e15]
>>> gamma21 = 2*np.pi*6e6
>>> gamma = np.array([[0, -gamma21], [gamma21, 0]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
>>> Ep, omega_laser = define_laser_variables(Nl)
>>> laser = PlaneWave(0, 0, 0, 0)
>>> epsilonp = [laser.epsilonp]
>>> k = [laser.k]
>>> detuning_knob = [symbols("delta1", real=True)]
A map to unfold the density matrix.
>>> unfolding = Unfolding(Ne, True, True, True)
>>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI")
>>> Eps = [Eps]
>>> t = np.linspace(0, 1e-6, 11)
>>> rho0 = np.array([[1, 0], [0, 0]])
>>> rho0 = unfolding(rho0)
We define the Doppler broadening.
>>> Nvz = 15
>>> shape = [Nvz]
>>> stds = [[-4, 4]]
>>> T = 273.15+20
>>> mass = Atom("Rb", 87).mass
>>> aux = (shape, stds, T, mass, detuning_knob, k,
... omega_level, xi, theta, unfolding, ["z", "x", "y"],
... True)
>>> doppler_effect = DopplerBroadening(*aux)
We get a function for the frequency sweep of time evolution.
>>> aux = (Ep, epsilonp, gamma,
... omega_level, rm, xi, theta,
... doppler_effect,
... True,
... "eqs")
>>> inhomo_time_evolution = fast_inhomo_sweep_time_evolution(*aux)
>>> amp = 1000e6*2*np.pi
>>> Ndelta = 101
>>> detuning_knobs = [[-amp, amp, Ndelta]]
>>> deltas, rhot = inhomo_time_evolution(t, rho0, Eps, detuning_knobs)
>>> print rhot.shape
(101, 11, 15, 3)
### Response:
def fast_inhomo_sweep_time_evolution(Ep, epsilonp, gamma,
omega_level, rm, xi, theta,
inhomogeneity,
semi_analytic=True,
file_name=None, return_code=False):
r"""Return a spectrum of time evolutions of the density matrix.
We test a basic two-level system.
>>> from fast.bloch import phase_transformation
>>> from fast import PlaneWave, electric_field_amplitude_top, Atom
>>> Ne = 2
>>> Nl = 1
>>> a0 = physical_constants["Bohr radius"][0]
>>> rm = [np.array([[0, 0], [a0, 0]]),
... np.array([[0, 0], [0, 0]]),
... np.array([[0, 0], [0, 0]])]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> omega_level = [0, 2.4e15]
>>> gamma21 = 2*np.pi*6e6
>>> gamma = np.array([[0, -gamma21], [gamma21, 0]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
>>> Ep, omega_laser = define_laser_variables(Nl)
>>> laser = PlaneWave(0, 0, 0, 0)
>>> epsilonp = [laser.epsilonp]
>>> k = [laser.k]
>>> detuning_knob = [symbols("delta1", real=True)]
A map to unfold the density matrix.
>>> unfolding = Unfolding(Ne, True, True, True)
>>> Eps = electric_field_amplitude_top(1e-3, 1e-3, 1, "SI")
>>> Eps = [Eps]
>>> t = np.linspace(0, 1e-6, 11)
>>> rho0 = np.array([[1, 0], [0, 0]])
>>> rho0 = unfolding(rho0)
We define the Doppler broadening.
>>> Nvz = 15
>>> shape = [Nvz]
>>> stds = [[-4, 4]]
>>> T = 273.15+20
>>> mass = Atom("Rb", 87).mass
>>> aux = (shape, stds, T, mass, detuning_knob, k,
... omega_level, xi, theta, unfolding, ["z", "x", "y"],
... True)
>>> doppler_effect = DopplerBroadening(*aux)
We get a function for the frequency sweep of time evolution.
>>> aux = (Ep, epsilonp, gamma,
... omega_level, rm, xi, theta,
... doppler_effect,
... True,
... "eqs")
>>> inhomo_time_evolution = fast_inhomo_sweep_time_evolution(*aux)
>>> amp = 1000e6*2*np.pi
>>> Ndelta = 101
>>> detuning_knobs = [[-amp, amp, Ndelta]]
>>> deltas, rhot = inhomo_time_evolution(t, rho0, Eps, detuning_knobs)
>>> print rhot.shape
(101, 11, 15, 3)
"""
# We unpack variables.
if True:
Nl = xi.shape[0]
# We determine which arguments are constants.
if True:
try:
Ep = np.array([complex(Ep[l]) for l in range(Nl)])
variable_Ep = False
except:
variable_Ep = True
try:
epsilonp = [np.array([complex(epsilonp[l][i]) for i in range(3)])
for l in range(Nl)]
variable_epsilonp = False
except:
variable_epsilonp = True
# We obtain code for the time evolution.
if True:
detuning_knob = symbols("delta1:"+str(Nl))
args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi, theta,
file_name, True)
args = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi,
theta, inhomogeneity, True, file_name, True)
inhomo_time_evolution = fast_inhomo_time_evolution(*args)
code = inhomo_time_evolution+"\n\n"
# We establish the arguments of the output function.
if True:
code += "def inhomo_sweep_time_evolution(t, rho0, "
if variable_Ep: code += "Ep, "
if variable_epsilonp: code += "epsilonp, "
code += "detuning_knob, "
code += "inhomo_time_evolution=inhomo_time_evolution):\n"
code += ' r"""A fast frequency sweep of the steady state."""\n'
# Code to determine the sweep range.
if True:
code += """ sweepN = -1\n"""
code += """ for i, delta in enumerate(detuning_knob):\n"""
code += """ if hasattr(delta, "__getitem__"):\n"""
code += """ sweepN = i\n"""
code += """ delta0 = delta[0]\n"""
code += """ deltaf = delta[1]\n"""
code += """ Ndelta = delta[2]\n"""
code += """ break\n\n"""
code += """ if sweepN == -1:\n"""
code += """ s = 'One of the detuning knobs '\n"""
code += """ s += 'must be of the form '\n"""
code += """ s += '(start, stop, Nsteps)'\n"""
code += """ raise ValueError(s)\n\n"""
code += """ deltas = np.linspace(delta0, deltaf, Ndelta)\n\n"""
# We call time_evolution.
if True:
code += " args = [[t, rho0, "
if variable_Ep: code += "Ep, "
if variable_epsilonp: code += "epsilonp, "
code += """list(detuning_knob[:sweepN]) +\n"""
code += """ [deltas[i]] +\n"""
code += """ list(detuning_knob[sweepN+1:])]\n"""
code += """ for i in range(Ndelta)]\n\n"""
code += " rho = np.array([inhomo_time_evolution(*argsi)\n"
code += " for argsi in args])\n\n"
# We finish the code.
if True:
code += " return deltas, rho\n"
# We write the code to file if provided, and execute it.
if True:
if file_name is not None:
f = file(file_name+".py", "w")
f.write(code)
f.close()
inhomo_sweep_time_evolution = code
if not return_code:
exec inhomo_sweep_time_evolution
return inhomo_sweep_time_evolution |
def get_config(data, *models, **kwargs):
'''
Return the native config.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.get_config {} models.openconfig_interfaces
Output Example:
.. code-block:: text
interface et1
ip address 192.168.1.1/24
description Uplink1
mtu 9000
interface et2
ip address 192.168.2.1/24
description Uplink2
mtu 9000
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
profiles = kwargs.pop('profiles', [])
if not profiles and hasattr(napalm_device, 'profile'): # pylint: disable=undefined-variable
profiles = napalm_device.profile # pylint: disable=undefined-variable
if not profiles:
profiles = [__grains__.get('os')]
parser_kwargs = {
'profile': profiles
}
root = _get_root_object(models)
root.load_dict(data)
native_config = root.translate_config(**parser_kwargs)
log.debug('Generated config')
log.debug(native_config)
return native_config | Return the native config.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.get_config {} models.openconfig_interfaces
Output Example:
.. code-block:: text
interface et1
ip address 192.168.1.1/24
description Uplink1
mtu 9000
interface et2
ip address 192.168.2.1/24
description Uplink2
mtu 9000 | Below is the the instruction that describes the task:
### Input:
Return the native config.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.get_config {} models.openconfig_interfaces
Output Example:
.. code-block:: text
interface et1
ip address 192.168.1.1/24
description Uplink1
mtu 9000
interface et2
ip address 192.168.2.1/24
description Uplink2
mtu 9000
### Response:
def get_config(data, *models, **kwargs):
'''
Return the native config.
data
Dictionary structured with respect to the models referenced.
models
A list of models to be used when generating the config.
profiles: ``None``
Use certain profiles to generate the config.
If not specified, will use the platform default profile(s).
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.get_config {} models.openconfig_interfaces
Output Example:
.. code-block:: text
interface et1
ip address 192.168.1.1/24
description Uplink1
mtu 9000
interface et2
ip address 192.168.2.1/24
description Uplink2
mtu 9000
'''
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
profiles = kwargs.pop('profiles', [])
if not profiles and hasattr(napalm_device, 'profile'): # pylint: disable=undefined-variable
profiles = napalm_device.profile # pylint: disable=undefined-variable
if not profiles:
profiles = [__grains__.get('os')]
parser_kwargs = {
'profile': profiles
}
root = _get_root_object(models)
root.load_dict(data)
native_config = root.translate_config(**parser_kwargs)
log.debug('Generated config')
log.debug(native_config)
return native_config |
def createRecordSensor(network, name, dataSource):
"""
Creates a RecordSensor region that allows us to specify a file record
stream as the input source.
"""
# Specific type of region. Possible options can be found in /nupic/regions/
regionType = "py.RecordSensor"
# Creates a json from specified dictionary.
regionParams = json.dumps({"verbosity": _VERBOSITY})
network.addRegion(name, regionType, regionParams)
# getSelf returns the actual region, instead of a region wrapper
sensorRegion = network.regions[name].getSelf()
# Specify how RecordSensor encodes input values
sensorRegion.encoder = createEncoder()
# Specify which sub-encoder should be used for "actValueOut"
network.regions[name].setParameter("predictedField", "consumption")
# Specify the dataSource as a file record stream instance
sensorRegion.dataSource = dataSource
return sensorRegion | Creates a RecordSensor region that allows us to specify a file record
stream as the input source. | Below is the the instruction that describes the task:
### Input:
Creates a RecordSensor region that allows us to specify a file record
stream as the input source.
### Response:
def createRecordSensor(network, name, dataSource):
"""
Creates a RecordSensor region that allows us to specify a file record
stream as the input source.
"""
# Specific type of region. Possible options can be found in /nupic/regions/
regionType = "py.RecordSensor"
# Creates a json from specified dictionary.
regionParams = json.dumps({"verbosity": _VERBOSITY})
network.addRegion(name, regionType, regionParams)
# getSelf returns the actual region, instead of a region wrapper
sensorRegion = network.regions[name].getSelf()
# Specify how RecordSensor encodes input values
sensorRegion.encoder = createEncoder()
# Specify which sub-encoder should be used for "actValueOut"
network.regions[name].setParameter("predictedField", "consumption")
# Specify the dataSource as a file record stream instance
sensorRegion.dataSource = dataSource
return sensorRegion |
def rerun_process(self, pid, title=None, agent=None):
'''
rerun_process(self, pid, title=None, agent=None)
Reruns a process
:Parameters:
* *pid* (`string`) -- Process id to rerun
* *title* (`string`) -- Title for the process
* *agent* (`string`) -- a valid value may be one of the following: agent identifier, agent identifiers (list) : ["agent_1", "agent_2"..], "all", "any"
:return: process id
'''
request_data = {}
if title:
request_data['name']=title
if agent:
request_data['agents']=agent
if self.input.get('pid'):
request_data['pflow_id']=self.input.get('pid')
ret_data= self._call_rest_api('post', '/processes/'+pid+'/rerun', data=request_data, error='Failed to create a new process')
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
new_pid = ret_data[0]
message = 'Re-executing process [%s] [new process pid = %s] '%(pid, new_pid)
self.logger.info(message)
return str(new_pid) | rerun_process(self, pid, title=None, agent=None)
Reruns a process
:Parameters:
* *pid* (`string`) -- Process id to rerun
* *title* (`string`) -- Title for the process
* *agent* (`string`) -- a valid value may be one of the following: agent identifier, agent identifiers (list) : ["agent_1", "agent_2"..], "all", "any"
:return: process id | Below is the the instruction that describes the task:
### Input:
rerun_process(self, pid, title=None, agent=None)
Reruns a process
:Parameters:
* *pid* (`string`) -- Process id to rerun
* *title* (`string`) -- Title for the process
* *agent* (`string`) -- a valid value may be one of the following: agent identifier, agent identifiers (list) : ["agent_1", "agent_2"..], "all", "any"
:return: process id
### Response:
def rerun_process(self, pid, title=None, agent=None):
'''
rerun_process(self, pid, title=None, agent=None)
Reruns a process
:Parameters:
* *pid* (`string`) -- Process id to rerun
* *title* (`string`) -- Title for the process
* *agent* (`string`) -- a valid value may be one of the following: agent identifier, agent identifiers (list) : ["agent_1", "agent_2"..], "all", "any"
:return: process id
'''
request_data = {}
if title:
request_data['name']=title
if agent:
request_data['agents']=agent
if self.input.get('pid'):
request_data['pflow_id']=self.input.get('pid')
ret_data= self._call_rest_api('post', '/processes/'+pid+'/rerun', data=request_data, error='Failed to create a new process')
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
new_pid = ret_data[0]
message = 'Re-executing process [%s] [new process pid = %s] '%(pid, new_pid)
self.logger.info(message)
return str(new_pid) |
def local_session(factory):
"""Cache a session thread local for up to 45m"""
factory_region = getattr(factory, 'region', 'global')
s = getattr(CONN_CACHE, factory_region, {}).get('session')
t = getattr(CONN_CACHE, factory_region, {}).get('time')
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
setattr(CONN_CACHE, factory_region, {'session': s, 'time': n})
return s | Cache a session thread local for up to 45m | Below is the the instruction that describes the task:
### Input:
Cache a session thread local for up to 45m
### Response:
def local_session(factory):
"""Cache a session thread local for up to 45m"""
factory_region = getattr(factory, 'region', 'global')
s = getattr(CONN_CACHE, factory_region, {}).get('session')
t = getattr(CONN_CACHE, factory_region, {}).get('time')
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
setattr(CONN_CACHE, factory_region, {'session': s, 'time': n})
return s |
def _find_usage_clusters(self):
"""
Find the ECS service usage for clusters. Calls
:py:meth:`~._find_usage_one_cluster` for each cluster.
"""
count = 0
fargate_task_count = 0
paginator = self.conn.get_paginator('list_clusters')
for page in paginator.paginate():
for cluster_arn in page['clusterArns']:
count += 1
resp = self.conn.describe_clusters(
clusters=[cluster_arn], include=['STATISTICS']
)
cluster = resp['clusters'][0]
self.limits[
'Container Instances per Cluster'
]._add_current_usage(
cluster['registeredContainerInstancesCount'],
aws_type='AWS::ECS::ContainerInstance',
resource_id=cluster['clusterName']
)
self.limits['Services per Cluster']._add_current_usage(
cluster['activeServicesCount'],
aws_type='AWS::ECS::Service',
resource_id=cluster['clusterName']
)
# Note: 'statistics' is not always present in API responses,
# even if requested. As far as I can tell, it's omitted if
# a cluster has no Fargate tasks.
for stat in cluster.get('statistics', []):
if stat['name'] != 'runningFargateTasksCount':
continue
logger.debug(
'Found %s Fargate tasks in cluster %s',
stat['value'], cluster_arn
)
fargate_task_count += int(stat['value'])
self._find_usage_one_cluster(cluster['clusterName'])
self.limits['Fargate Tasks']._add_current_usage(
fargate_task_count, aws_type='AWS::ECS::Task'
)
self.limits['Clusters']._add_current_usage(
count, aws_type='AWS::ECS::Cluster'
) | Find the ECS service usage for clusters. Calls
:py:meth:`~._find_usage_one_cluster` for each cluster. | Below is the the instruction that describes the task:
### Input:
Find the ECS service usage for clusters. Calls
:py:meth:`~._find_usage_one_cluster` for each cluster.
### Response:
def _find_usage_clusters(self):
"""
Find the ECS service usage for clusters. Calls
:py:meth:`~._find_usage_one_cluster` for each cluster.
"""
count = 0
fargate_task_count = 0
paginator = self.conn.get_paginator('list_clusters')
for page in paginator.paginate():
for cluster_arn in page['clusterArns']:
count += 1
resp = self.conn.describe_clusters(
clusters=[cluster_arn], include=['STATISTICS']
)
cluster = resp['clusters'][0]
self.limits[
'Container Instances per Cluster'
]._add_current_usage(
cluster['registeredContainerInstancesCount'],
aws_type='AWS::ECS::ContainerInstance',
resource_id=cluster['clusterName']
)
self.limits['Services per Cluster']._add_current_usage(
cluster['activeServicesCount'],
aws_type='AWS::ECS::Service',
resource_id=cluster['clusterName']
)
# Note: 'statistics' is not always present in API responses,
# even if requested. As far as I can tell, it's omitted if
# a cluster has no Fargate tasks.
for stat in cluster.get('statistics', []):
if stat['name'] != 'runningFargateTasksCount':
continue
logger.debug(
'Found %s Fargate tasks in cluster %s',
stat['value'], cluster_arn
)
fargate_task_count += int(stat['value'])
self._find_usage_one_cluster(cluster['clusterName'])
self.limits['Fargate Tasks']._add_current_usage(
fargate_task_count, aws_type='AWS::ECS::Task'
)
self.limits['Clusters']._add_current_usage(
count, aws_type='AWS::ECS::Cluster'
) |
def _build_custom_rv(distribution, sample_shape, value, name):
"""RandomVariable constructor with a dummy name argument."""
# Program transformations (e.g., `make_log_joint_fn`) assume that
# the traced constructor has `name` and `value` kwargs, enabling
# them to override the value of an RV according to its name.
# User-defined RVs inherit their name from the provided
# distribution; this helper method exposes the name as a dummy kwarg
# so that it's visible to program transformations.
del name # unused
return RandomVariable(distribution=distribution,
sample_shape=sample_shape,
value=value) | RandomVariable constructor with a dummy name argument. | Below is the the instruction that describes the task:
### Input:
RandomVariable constructor with a dummy name argument.
### Response:
def _build_custom_rv(distribution, sample_shape, value, name):
"""RandomVariable constructor with a dummy name argument."""
# Program transformations (e.g., `make_log_joint_fn`) assume that
# the traced constructor has `name` and `value` kwargs, enabling
# them to override the value of an RV according to its name.
# User-defined RVs inherit their name from the provided
# distribution; this helper method exposes the name as a dummy kwarg
# so that it's visible to program transformations.
del name # unused
return RandomVariable(distribution=distribution,
sample_shape=sample_shape,
value=value) |
def to_multi_dim_timeseries(self):
"""Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
"""
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts | Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries | Below is the the instruction that describes the task:
### Input:
Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
### Response:
def to_multi_dim_timeseries(self):
"""Return a TimeSeries with the values of :py:obj:`self`
The index of the row is used for the timestamp
:return: Return a new MultiDimensionalTimeSeries with the values
of the Matrix
:rtype: MultiDimensionalTimeSeries
"""
ts = MultiDimensionalTimeSeries(dimensions=self.get_width())
for row in xrange(self.get_height()):
newEntry = []
for col in xrange(self.get_width()):
newEntry.append(self.get_value(col, row))
ts.add_entry(row, newEntry)
return ts |
def convert_ids2objects(self):
""" Convert object IDs from `self._json_params` to objects if needed.
Only IDs that belong to relationship field of `self.Model`
are converted.
"""
if not self.Model:
log.info("%s has no model defined" % self.__class__.__name__)
return
for field in self._json_params.keys():
if not engine.is_relationship_field(field, self.Model):
continue
rel_model_cls = engine.get_relationship_cls(field, self.Model)
self.id2obj(field, rel_model_cls) | Convert object IDs from `self._json_params` to objects if needed.
Only IDs that belong to relationship field of `self.Model`
are converted. | Below is the the instruction that describes the task:
### Input:
Convert object IDs from `self._json_params` to objects if needed.
Only IDs that belong to relationship field of `self.Model`
are converted.
### Response:
def convert_ids2objects(self):
""" Convert object IDs from `self._json_params` to objects if needed.
Only IDs that belong to relationship field of `self.Model`
are converted.
"""
if not self.Model:
log.info("%s has no model defined" % self.__class__.__name__)
return
for field in self._json_params.keys():
if not engine.is_relationship_field(field, self.Model):
continue
rel_model_cls = engine.get_relationship_cls(field, self.Model)
self.id2obj(field, rel_model_cls) |
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with tf.name_scope(name or "fill_triangular_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
n = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 2)[-1])
if n is not None:
n = np.int32(n)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = tf.shape(input=x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 2)[:-2].concatenate([None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = tf.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = tf.reverse(
tf.reverse(triangular_portion, axis=[ndims - 1]), axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = tf.reshape(
consolidated_matrix,
tf.concat([tf.shape(input=x)[:-2], [n * (n - 1)]], axis=0))
y = tf.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
tensorshape_util.set_shape(y, static_final_shape)
return y | Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`. | Below is the the instruction that describes the task:
### Input:
Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
### Response:
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with tf.name_scope(name or "fill_triangular_inverse"):
x = tf.convert_to_tensor(value=x, name="x")
n = tf.compat.dimension_value(
tensorshape_util.with_rank_at_least(x.shape, 2)[-1])
if n is not None:
n = np.int32(n)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = tf.shape(input=x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = tensorshape_util.with_rank_at_least(
x.shape, 2)[:-2].concatenate([None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = tf.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = tf.reverse(
tf.reverse(triangular_portion, axis=[ndims - 1]), axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = tf.reshape(
consolidated_matrix,
tf.concat([tf.shape(input=x)[:-2], [n * (n - 1)]], axis=0))
y = tf.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
tensorshape_util.set_shape(y, static_final_shape)
return y |
def observed(obj=None, **kwds):
"""
Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions
"""
if obj is not None:
if isinstance(obj, Stochastic):
obj._observed = True
return obj
else:
p = stochastic(__func__=obj, observed=True, **kwds)
return p
kwds['observed'] = True
def instantiate_observed(func):
return stochastic(func, **kwds)
return instantiate_observed | Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions | Below is the the instruction that describes the task:
### Input:
Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions
### Response:
def observed(obj=None, **kwds):
"""
Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions
"""
if obj is not None:
if isinstance(obj, Stochastic):
obj._observed = True
return obj
else:
p = stochastic(__func__=obj, observed=True, **kwds)
return p
kwds['observed'] = True
def instantiate_observed(func):
return stochastic(func, **kwds)
return instantiate_observed |
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(operations_client.get_operation, operation.name)
cancel = functools.partial(operations_client.cancel_operation, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs) | Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation. | Below is the the instruction that describes the task:
### Input:
Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
### Response:
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(operations_client.get_operation, operation.name)
cancel = functools.partial(operations_client.cancel_operation, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs) |
def create_list_stories(
list_id_stories, number_of_stories, shuffle, max_threads
):
"""Show in a formatted way the stories for each item of the list."""
list_stories = []
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = {
executor.submit(get_story, new)
for new in list_id_stories[:number_of_stories]
}
for future in tqdm(
as_completed(futures),
desc='Getting results',
unit=' news',
):
list_stories.append(future.result())
if shuffle:
random.shuffle(list_stories)
return list_stories | Show in a formatted way the stories for each item of the list. | Below is the the instruction that describes the task:
### Input:
Show in a formatted way the stories for each item of the list.
### Response:
def create_list_stories(
list_id_stories, number_of_stories, shuffle, max_threads
):
"""Show in a formatted way the stories for each item of the list."""
list_stories = []
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = {
executor.submit(get_story, new)
for new in list_id_stories[:number_of_stories]
}
for future in tqdm(
as_completed(futures),
desc='Getting results',
unit=' news',
):
list_stories.append(future.result())
if shuffle:
random.shuffle(list_stories)
return list_stories |
def global_props(self):
''' Retrieves the global properties
used to determine rates used for calculations
in converting steempower to vests etc.
Stores these in the Utilities class as that
is where the conversions take place, however
SimpleSteem is the class that contains
steem_instance, so to to preserve heirarchy
and to avoid "spaghetti code", this method
exists in this class.
'''
if self.util.info is None:
self.util.info = self.steem_instance().get_dynamic_global_properties()
self.util.total_vesting_fund_steem = Amount(self.util.info["total_vesting_fund_steem"]).amount
self.util.total_vesting_shares = Amount(self.util.info["total_vesting_shares"]).amount
self.util.vote_power_reserve_rate = self.util.info["vote_power_reserve_rate"]
return self.util.info | Retrieves the global properties
used to determine rates used for calculations
in converting steempower to vests etc.
Stores these in the Utilities class as that
is where the conversions take place, however
SimpleSteem is the class that contains
steem_instance, so to to preserve heirarchy
and to avoid "spaghetti code", this method
exists in this class. | Below is the the instruction that describes the task:
### Input:
Retrieves the global properties
used to determine rates used for calculations
in converting steempower to vests etc.
Stores these in the Utilities class as that
is where the conversions take place, however
SimpleSteem is the class that contains
steem_instance, so to to preserve heirarchy
and to avoid "spaghetti code", this method
exists in this class.
### Response:
def global_props(self):
''' Retrieves the global properties
used to determine rates used for calculations
in converting steempower to vests etc.
Stores these in the Utilities class as that
is where the conversions take place, however
SimpleSteem is the class that contains
steem_instance, so to to preserve heirarchy
and to avoid "spaghetti code", this method
exists in this class.
'''
if self.util.info is None:
self.util.info = self.steem_instance().get_dynamic_global_properties()
self.util.total_vesting_fund_steem = Amount(self.util.info["total_vesting_fund_steem"]).amount
self.util.total_vesting_shares = Amount(self.util.info["total_vesting_shares"]).amount
self.util.vote_power_reserve_rate = self.util.info["vote_power_reserve_rate"]
return self.util.info |
def unpack_messages(msgs):
import msgpack
""" Deserialize a message to python structures """
for key, msg in msgs:
record = msgpack.unpackb(msg)
record['_key'] = key
yield record | Deserialize a message to python structures | Below is the the instruction that describes the task:
### Input:
Deserialize a message to python structures
### Response:
def unpack_messages(msgs):
import msgpack
""" Deserialize a message to python structures """
for key, msg in msgs:
record = msgpack.unpackb(msg)
record['_key'] = key
yield record |
def _on_read_only_error(self, command, future):
"""Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
failover_future = concurrent.TracebackFuture()
def on_replication_info(_):
common.maybe_raise_exception(failover_future)
LOGGER.debug('Failover closing current read-only connection')
self._closing = True
database = self._connection.database
self._connection.close()
self._connected.clear()
self._connect_future = concurrent.Future()
info = failover_future.result()
LOGGER.debug('Failover connecting to %s:%s', info['master_host'],
info['master_port'])
self._connection = _Connection(
info['master_host'], info['master_port'], database, self._read,
self._on_closed, self.io_loop, self._clustering)
# When the connection is re-established, re-run the command
self.io_loop.add_future(
self._connect_future,
lambda f: self._connection.execute(
command._replace(connection=self._connection), future))
# Use the normal connection processing flow when connecting
self.io_loop.add_future(self._connection.connect(),
self._on_connected)
if self._clustering:
command.connection.set_readonly(True)
LOGGER.debug('%s is read-only, need to failover to new master',
command.connection.name)
cmd = Command(
self._build_command(['INFO', 'REPLICATION']), self._connection,
None, common.format_info_response)
self.io_loop.add_future(failover_future, on_replication_info)
cmd.connection.execute(cmd, failover_future) | Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future | Below is the the instruction that describes the task:
### Input:
Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
### Response:
def _on_read_only_error(self, command, future):
"""Invoked when a Redis node returns an error indicating it's in
read-only mode. It will use the ``INFO REPLICATION`` command to
attempt to find the master server and failover to that, reissuing
the command to that server.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
failover_future = concurrent.TracebackFuture()
def on_replication_info(_):
common.maybe_raise_exception(failover_future)
LOGGER.debug('Failover closing current read-only connection')
self._closing = True
database = self._connection.database
self._connection.close()
self._connected.clear()
self._connect_future = concurrent.Future()
info = failover_future.result()
LOGGER.debug('Failover connecting to %s:%s', info['master_host'],
info['master_port'])
self._connection = _Connection(
info['master_host'], info['master_port'], database, self._read,
self._on_closed, self.io_loop, self._clustering)
# When the connection is re-established, re-run the command
self.io_loop.add_future(
self._connect_future,
lambda f: self._connection.execute(
command._replace(connection=self._connection), future))
# Use the normal connection processing flow when connecting
self.io_loop.add_future(self._connection.connect(),
self._on_connected)
if self._clustering:
command.connection.set_readonly(True)
LOGGER.debug('%s is read-only, need to failover to new master',
command.connection.name)
cmd = Command(
self._build_command(['INFO', 'REPLICATION']), self._connection,
None, common.format_info_response)
self.io_loop.add_future(failover_future, on_replication_info)
cmd.connection.execute(cmd, failover_future) |
def search_metadata_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_metadata m
INNER JOIN archive_metadataFields f ON m.fieldId=f.uid
INNER JOIN archive_observatories l ON m.observatory=l.uid""", where_clauses=["m.observatory IS NOT NULL"])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.field_name, 'f.metaKey = %s')
b.add_sql(search.time_min, 'm.time > %s')
b.add_sql(search.time_max, 'm.time < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_sql(search.item_id, 'm.publicId = %s')
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_metadataImport i WHERE i.metadataId = m.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_metadataExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.metadataId = m.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b | Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search | Below is the the instruction that describes the task:
### Input:
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
### Response:
def search_metadata_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.ObservatoryMetadataSearch`. This can then be used to retrieve the results of the search,
materialise them into :class:`meteorpi_model.ObservatoryMetadata` instances etc.
:param ObservatoryMetadataSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_metadata m
INNER JOIN archive_metadataFields f ON m.fieldId=f.uid
INNER JOIN archive_observatories l ON m.observatory=l.uid""", where_clauses=["m.observatory IS NOT NULL"])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.field_name, 'f.metaKey = %s')
b.add_sql(search.time_min, 'm.time > %s')
b.add_sql(search.time_max, 'm.time < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_sql(search.item_id, 'm.publicId = %s')
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_metadataImport i WHERE i.metadataId = m.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_metadataExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.metadataId = m.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b |
def t_DOT(self, t):
r"\."
t.endlexpos = t.lexpos + len(t.value)
return t | r"\. | Below is the the instruction that describes the task:
### Input:
r"\.
### Response:
def t_DOT(self, t):
r"\."
t.endlexpos = t.lexpos + len(t.value)
return t |
def _stieltjes_analytical(dist, order, normed):
"""Stieltjes' method with analytical recurrence coefficients."""
dimensions = len(dist)
mom_order = numpy.arange(order+1).repeat(dimensions)
mom_order = mom_order.reshape(order+1, dimensions).T
coeff1, coeff2 = dist.ttr(mom_order)
coeff2[:, 0] = 1.
poly = chaospy.poly.collection.core.variable(dimensions)
if normed:
orth = [
poly**0*numpy.ones(dimensions),
(poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]),
]
for order_ in range(1, order):
orth.append(
(orth[-1]*(poly-coeff1[:, order_])
-orth[-2]*numpy.sqrt(coeff2[:, order_]))
/numpy.sqrt(coeff2[:, order_+1])
)
norms = numpy.ones(coeff2.shape)
else:
orth = [poly-poly, poly**0*numpy.ones(dimensions)]
for order_ in range(order):
orth.append(
orth[-1]*(poly-coeff1[:, order_])
- orth[-2]*coeff2[:, order_]
)
orth = orth[1:]
norms = numpy.cumprod(coeff2, 1)
return orth, norms, coeff1, coeff2 | Stieltjes' method with analytical recurrence coefficients. | Below is the the instruction that describes the task:
### Input:
Stieltjes' method with analytical recurrence coefficients.
### Response:
def _stieltjes_analytical(dist, order, normed):
"""Stieltjes' method with analytical recurrence coefficients."""
dimensions = len(dist)
mom_order = numpy.arange(order+1).repeat(dimensions)
mom_order = mom_order.reshape(order+1, dimensions).T
coeff1, coeff2 = dist.ttr(mom_order)
coeff2[:, 0] = 1.
poly = chaospy.poly.collection.core.variable(dimensions)
if normed:
orth = [
poly**0*numpy.ones(dimensions),
(poly-coeff1[:, 0])/numpy.sqrt(coeff2[:, 1]),
]
for order_ in range(1, order):
orth.append(
(orth[-1]*(poly-coeff1[:, order_])
-orth[-2]*numpy.sqrt(coeff2[:, order_]))
/numpy.sqrt(coeff2[:, order_+1])
)
norms = numpy.ones(coeff2.shape)
else:
orth = [poly-poly, poly**0*numpy.ones(dimensions)]
for order_ in range(order):
orth.append(
orth[-1]*(poly-coeff1[:, order_])
- orth[-2]*coeff2[:, order_]
)
orth = orth[1:]
norms = numpy.cumprod(coeff2, 1)
return orth, norms, coeff1, coeff2 |
def resultsperpage(self, value):
""" Set the number of results that will be retrieved by the request.
:param value: 'resultsperpage' parameter value for the rest api call
:type value: str
Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object
"""
instance = copy(self)
instance._filters.append({
'resultsperpage': value
})
return instance | Set the number of results that will be retrieved by the request.
:param value: 'resultsperpage' parameter value for the rest api call
:type value: str
Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object | Below is the the instruction that describes the task:
### Input:
Set the number of results that will be retrieved by the request.
:param value: 'resultsperpage' parameter value for the rest api call
:type value: str
Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object
### Response:
def resultsperpage(self, value):
""" Set the number of results that will be retrieved by the request.
:param value: 'resultsperpage' parameter value for the rest api call
:type value: str
Take a look at https://apihelp.surveygizmo.com/help/surveyresponse-sub-object
"""
instance = copy(self)
instance._filters.append({
'resultsperpage': value
})
return instance |
def set_configs(self, key, d):
"""Set the whole configuration for a key"""
if '_config' in self.proxy:
self.proxy['_config'][key] = d
else:
self.proxy['_config'] = {key: d} | Set the whole configuration for a key | Below is the the instruction that describes the task:
### Input:
Set the whole configuration for a key
### Response:
def set_configs(self, key, d):
"""Set the whole configuration for a key"""
if '_config' in self.proxy:
self.proxy['_config'][key] = d
else:
self.proxy['_config'] = {key: d} |
def import_module(mod_str):
"""
inspired by post on stackoverflow
:param name: import path string like 'netshowlib.linux.provider_discovery'
:return: module matching the import statement
"""
_module = __import__(mod_str)
_mod_parts = mod_str.split('.')
for _mod_part in _mod_parts[1:]:
_module = getattr(_module, _mod_part)
return _module | inspired by post on stackoverflow
:param name: import path string like 'netshowlib.linux.provider_discovery'
:return: module matching the import statement | Below is the the instruction that describes the task:
### Input:
inspired by post on stackoverflow
:param name: import path string like 'netshowlib.linux.provider_discovery'
:return: module matching the import statement
### Response:
def import_module(mod_str):
"""
inspired by post on stackoverflow
:param name: import path string like 'netshowlib.linux.provider_discovery'
:return: module matching the import statement
"""
_module = __import__(mod_str)
_mod_parts = mod_str.split('.')
for _mod_part in _mod_parts[1:]:
_module = getattr(_module, _mod_part)
return _module |
def main():
"""Main function."""
time_start = time.time()
logging.info('loading vocab file from dataset: %s', args.vocab)
vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab)
tokenizer = BERTTokenizer(
vocab=vocab_obj, lower='uncased' in args.vocab)
input_files = []
for input_pattern in args.input_file.split(','):
input_files.extend(glob.glob(os.path.expanduser(input_pattern)))
logging.info('*** Reading from %d input files ***', len(input_files))
for input_file in input_files:
logging.info(' %s', input_file)
num_outputs = min(args.num_outputs, len(input_files))
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
rng = random.Random(args.random_seed)
nworker = args.num_workers
# calculate the number of splits
file_splits = []
split_size = (len(input_files) + num_outputs - 1) // num_outputs
for i in range(num_outputs - 1):
file_splits.append(input_files[i*split_size:(i+1)*split_size])
file_splits.append(input_files[(num_outputs-1)*split_size:])
# prepare workload
suffix = 'npz' if args.format == 'numpy' else 'rec'
count = 0
map_args = []
pool_args = (tokenizer, args.max_seq_length, args.dupe_factor,\
args.short_seq_prob, args.masked_lm_prob,
args.max_predictions_per_seq, rng)
for i, file_split in enumerate(file_splits):
out = os.path.join(output_dir, 'part-{}.{}'.format(str(i).zfill(3), suffix))
count += len(file_split)
map_args.append((file_split, out) + pool_args)
# sanity check
assert count == len(input_files)
# dispatch to workers
if nworker > 1:
pool = Pool(nworker)
pool.map(create_training_instances, map_args)
else:
for map_arg in map_args:
create_training_instances(map_arg)
time_end = time.time()
logging.info('Time cost=%.1f', time_end - time_start) | Main function. | Below is the the instruction that describes the task:
### Input:
Main function.
### Response:
def main():
"""Main function."""
time_start = time.time()
logging.info('loading vocab file from dataset: %s', args.vocab)
vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab)
tokenizer = BERTTokenizer(
vocab=vocab_obj, lower='uncased' in args.vocab)
input_files = []
for input_pattern in args.input_file.split(','):
input_files.extend(glob.glob(os.path.expanduser(input_pattern)))
logging.info('*** Reading from %d input files ***', len(input_files))
for input_file in input_files:
logging.info(' %s', input_file)
num_outputs = min(args.num_outputs, len(input_files))
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
rng = random.Random(args.random_seed)
nworker = args.num_workers
# calculate the number of splits
file_splits = []
split_size = (len(input_files) + num_outputs - 1) // num_outputs
for i in range(num_outputs - 1):
file_splits.append(input_files[i*split_size:(i+1)*split_size])
file_splits.append(input_files[(num_outputs-1)*split_size:])
# prepare workload
suffix = 'npz' if args.format == 'numpy' else 'rec'
count = 0
map_args = []
pool_args = (tokenizer, args.max_seq_length, args.dupe_factor,\
args.short_seq_prob, args.masked_lm_prob,
args.max_predictions_per_seq, rng)
for i, file_split in enumerate(file_splits):
out = os.path.join(output_dir, 'part-{}.{}'.format(str(i).zfill(3), suffix))
count += len(file_split)
map_args.append((file_split, out) + pool_args)
# sanity check
assert count == len(input_files)
# dispatch to workers
if nworker > 1:
pool = Pool(nworker)
pool.map(create_training_instances, map_args)
else:
for map_arg in map_args:
create_training_instances(map_arg)
time_end = time.time()
logging.info('Time cost=%.1f', time_end - time_start) |
def exists_case_sensitive(path):
"""
Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python
can only import using the case of the real file.
"""
result = os.path.exists(path)
if sys.platform.startswith('win') and result:
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result | Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python
can only import using the case of the real file. | Below is the the instruction that describes the task:
### Input:
Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python
can only import using the case of the real file.
### Response:
def exists_case_sensitive(path):
"""
Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python
can only import using the case of the real file.
"""
result = os.path.exists(path)
if sys.platform.startswith('win') and result:
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result |
def add_bundle(self, prov_bundle, identifier):
"""
Verbose method of adding a bundle.
Can also be done as:
>>> api = Api()
>>> document = api.document.get(148)
>>> document.bundles['identifier'] = prov_bundle
:param prov_bundle: The bundle to be added
:param str identifier: URI or QName for this bundle
:type prov_bundle: :py:class:`prov.model.ProvDocument` or :py:class:`str`
"""
if self.abstract:
raise AbstractDocumentException()
self._api.add_bundle(self.id, prov_bundle.serialize(), identifier) | Verbose method of adding a bundle.
Can also be done as:
>>> api = Api()
>>> document = api.document.get(148)
>>> document.bundles['identifier'] = prov_bundle
:param prov_bundle: The bundle to be added
:param str identifier: URI or QName for this bundle
:type prov_bundle: :py:class:`prov.model.ProvDocument` or :py:class:`str` | Below is the the instruction that describes the task:
### Input:
Verbose method of adding a bundle.
Can also be done as:
>>> api = Api()
>>> document = api.document.get(148)
>>> document.bundles['identifier'] = prov_bundle
:param prov_bundle: The bundle to be added
:param str identifier: URI or QName for this bundle
:type prov_bundle: :py:class:`prov.model.ProvDocument` or :py:class:`str`
### Response:
def add_bundle(self, prov_bundle, identifier):
"""
Verbose method of adding a bundle.
Can also be done as:
>>> api = Api()
>>> document = api.document.get(148)
>>> document.bundles['identifier'] = prov_bundle
:param prov_bundle: The bundle to be added
:param str identifier: URI or QName for this bundle
:type prov_bundle: :py:class:`prov.model.ProvDocument` or :py:class:`str`
"""
if self.abstract:
raise AbstractDocumentException()
self._api.add_bundle(self.id, prov_bundle.serialize(), identifier) |
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier() | Helper function to synchronize (barrier) among all processes when
using distributed training | Below is the the instruction that describes the task:
### Input:
Helper function to synchronize (barrier) among all processes when
using distributed training
### Response:
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier() |
def _split_dimension(text):
"""
Returns the number and unit from the given piece of text as a pair.
>>> _split_dimension('1pt')
(1, 'pt')
>>> _split_dimension('1 pt')
(1, 'pt')
>>> _split_dimension('1 \tpt')
(1, 'pt')
>>> _split_dimension('1 \tpt ')
(1, 'pt')
>>> _split_dimension(' 1 \tpt ')
(1, 'pt')
>>> _split_dimension('3')
(3, None)
>>> _split_dimension('-12.43mm')
(-12.43, 'mm')
>>> _split_dimension('-12.43"')
(-12.43, '"')
"""
match = _dimension_finder.match(text)
if not match:
raise DimensionError("Can't parse dimension '%s'." % text)
number = match.group(1)
unit = match.group(4)
if '.' in number:
return (float(number), unit)
else:
return (int(number), unit) | Returns the number and unit from the given piece of text as a pair.
>>> _split_dimension('1pt')
(1, 'pt')
>>> _split_dimension('1 pt')
(1, 'pt')
>>> _split_dimension('1 \tpt')
(1, 'pt')
>>> _split_dimension('1 \tpt ')
(1, 'pt')
>>> _split_dimension(' 1 \tpt ')
(1, 'pt')
>>> _split_dimension('3')
(3, None)
>>> _split_dimension('-12.43mm')
(-12.43, 'mm')
>>> _split_dimension('-12.43"')
(-12.43, '"') | Below is the the instruction that describes the task:
### Input:
Returns the number and unit from the given piece of text as a pair.
>>> _split_dimension('1pt')
(1, 'pt')
>>> _split_dimension('1 pt')
(1, 'pt')
>>> _split_dimension('1 \tpt')
(1, 'pt')
>>> _split_dimension('1 \tpt ')
(1, 'pt')
>>> _split_dimension(' 1 \tpt ')
(1, 'pt')
>>> _split_dimension('3')
(3, None)
>>> _split_dimension('-12.43mm')
(-12.43, 'mm')
>>> _split_dimension('-12.43"')
(-12.43, '"')
### Response:
def _split_dimension(text):
"""
Returns the number and unit from the given piece of text as a pair.
>>> _split_dimension('1pt')
(1, 'pt')
>>> _split_dimension('1 pt')
(1, 'pt')
>>> _split_dimension('1 \tpt')
(1, 'pt')
>>> _split_dimension('1 \tpt ')
(1, 'pt')
>>> _split_dimension(' 1 \tpt ')
(1, 'pt')
>>> _split_dimension('3')
(3, None)
>>> _split_dimension('-12.43mm')
(-12.43, 'mm')
>>> _split_dimension('-12.43"')
(-12.43, '"')
"""
match = _dimension_finder.match(text)
if not match:
raise DimensionError("Can't parse dimension '%s'." % text)
number = match.group(1)
unit = match.group(4)
if '.' in number:
return (float(number), unit)
else:
return (int(number), unit) |
def _find_supported(self, features, mechanism_classes):
"""
Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`.
"""
try:
mechanisms = features[SASLMechanisms]
except KeyError:
logger.error("No sasl mechanisms: %r", list(features))
raise errors.SASLUnavailable(
"Remote side does not support SASL") from None
remote_mechanism_list = mechanisms.get_mechanism_list()
for our_mechanism in mechanism_classes:
token = our_mechanism.any_supported(remote_mechanism_list)
if token is not None:
return our_mechanism, token
return None, None | Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`. | Below is the the instruction that describes the task:
### Input:
Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`.
### Response:
def _find_supported(self, features, mechanism_classes):
"""
Find the first mechansim class which supports a mechanism announced in
the given stream features.
:param features: Current XMPP stream features
:type features: :class:`~.nonza.StreamFeatures`
:param mechanism_classes: SASL mechanism classes to use
:type mechanism_classes: iterable of :class:`SASLMechanism`
sub\\ *classes*
:raises aioxmpp.errors.SASLUnavailable: if the peer does not announce
SASL support
:return: the :class:`SASLMechanism` subclass to use and a token
:rtype: pair
Return a supported SASL mechanism class, by looking the given
stream features `features`.
If no matching mechanism is found, ``(None, None)`` is
returned. Otherwise, a pair consisting of the mechanism class and the
value returned by the respective
:meth:`~.sasl.SASLMechanism.any_supported` method is returned. The
latter is an opaque token which must be passed to the `token` argument
of :meth:`_execute` or :meth:`aiosasl.SASLMechanism.authenticate`.
"""
try:
mechanisms = features[SASLMechanisms]
except KeyError:
logger.error("No sasl mechanisms: %r", list(features))
raise errors.SASLUnavailable(
"Remote side does not support SASL") from None
remote_mechanism_list = mechanisms.get_mechanism_list()
for our_mechanism in mechanism_classes:
token = our_mechanism.any_supported(remote_mechanism_list)
if token is not None:
return our_mechanism, token
return None, None |
def substitute_url_with_ref(self, txt):
"""
In the string `txt`, replace links to online docs with
corresponding sphinx cross-references.
"""
# Find links
mi = re.finditer(r'\[([^\]]+|\[[^\]]+\])\]\(([^\)]+)\)', txt)
if mi:
# Iterate over match objects in iterator returned by
# re.finditer
for mo in mi:
# Get components of current match: full matching text,
# the link label, and the postfix to the base url in the
# link url
mtxt = mo.group(0)
lbl = mo.group(1)
url = mo.group(2)
# Try to look up the current link url. Issue a warning if
# the lookup fails, and do the substitution if it succeeds.
try:
ref = self.get_sphinx_ref(url, lbl)
except KeyError as ex:
print('Warning: %s' % ex.args[0])
else:
txt = re.sub(re.escape(mtxt), ref, txt)
return txt | In the string `txt`, replace links to online docs with
corresponding sphinx cross-references. | Below is the the instruction that describes the task:
### Input:
In the string `txt`, replace links to online docs with
corresponding sphinx cross-references.
### Response:
def substitute_url_with_ref(self, txt):
"""
In the string `txt`, replace links to online docs with
corresponding sphinx cross-references.
"""
# Find links
mi = re.finditer(r'\[([^\]]+|\[[^\]]+\])\]\(([^\)]+)\)', txt)
if mi:
# Iterate over match objects in iterator returned by
# re.finditer
for mo in mi:
# Get components of current match: full matching text,
# the link label, and the postfix to the base url in the
# link url
mtxt = mo.group(0)
lbl = mo.group(1)
url = mo.group(2)
# Try to look up the current link url. Issue a warning if
# the lookup fails, and do the substitution if it succeeds.
try:
ref = self.get_sphinx_ref(url, lbl)
except KeyError as ex:
print('Warning: %s' % ex.args[0])
else:
txt = re.sub(re.escape(mtxt), ref, txt)
return txt |
def vrrp(self, **kwargs):
"""Enable or Disable VRRP.
Args:
ip_version (str): The IP version ('4' or '6') for which VRRP should
be enabled/disabled. Default: `4`.
enabled (bool): If VRRP should be enabled or disabled. Default:
``True``.
rbridge_id (str): The rbridge ID of the device on which VRRP will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(rbridge_id='225')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False)
... output = dev.bgp.local_asn(rbridge_id='225',
... ip_version='6')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False, ip_version='6')
... dev.services.vrrp() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_version = kwargs.pop('ip_version', '4')
enabled = kwargs.pop('enabled', True)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
vrrp_args = dict(rbridge_id=rbridge_id)
vrrp_method = 'rbridge_id_protocol_hide_vrrp_holder_vrrp'
if ip_version == '6':
vrrp_method = 'rbridge_id_ipv6_proto_vrrpv3_vrrp'
vrrp = getattr(self._rbridge, vrrp_method)
config = vrrp(**vrrp_args)
if not enabled:
config.find('.//*vrrp').set('operation', 'delete')
return callback(config) | Enable or Disable VRRP.
Args:
ip_version (str): The IP version ('4' or '6') for which VRRP should
be enabled/disabled. Default: `4`.
enabled (bool): If VRRP should be enabled or disabled. Default:
``True``.
rbridge_id (str): The rbridge ID of the device on which VRRP will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(rbridge_id='225')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False)
... output = dev.bgp.local_asn(rbridge_id='225',
... ip_version='6')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False, ip_version='6')
... dev.services.vrrp() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError | Below is the the instruction that describes the task:
### Input:
Enable or Disable VRRP.
Args:
ip_version (str): The IP version ('4' or '6') for which VRRP should
be enabled/disabled. Default: `4`.
enabled (bool): If VRRP should be enabled or disabled. Default:
``True``.
rbridge_id (str): The rbridge ID of the device on which VRRP will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(rbridge_id='225')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False)
... output = dev.bgp.local_asn(rbridge_id='225',
... ip_version='6')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False, ip_version='6')
... dev.services.vrrp() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
### Response:
def vrrp(self, **kwargs):
"""Enable or Disable VRRP.
Args:
ip_version (str): The IP version ('4' or '6') for which VRRP should
be enabled/disabled. Default: `4`.
enabled (bool): If VRRP should be enabled or disabled. Default:
``True``.
rbridge_id (str): The rbridge ID of the device on which VRRP will
be enabled/disabled. Default: `1`.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(rbridge_id='225')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False)
... output = dev.bgp.local_asn(rbridge_id='225',
... ip_version='6')
... output = dev.bgp.local_asn(rbridge_id='225',
... enabled=False, ip_version='6')
... dev.services.vrrp() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
ip_version = kwargs.pop('ip_version', '4')
enabled = kwargs.pop('enabled', True)
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
vrrp_args = dict(rbridge_id=rbridge_id)
vrrp_method = 'rbridge_id_protocol_hide_vrrp_holder_vrrp'
if ip_version == '6':
vrrp_method = 'rbridge_id_ipv6_proto_vrrpv3_vrrp'
vrrp = getattr(self._rbridge, vrrp_method)
config = vrrp(**vrrp_args)
if not enabled:
config.find('.//*vrrp').set('operation', 'delete')
return callback(config) |
def rpush(self, key, *values):
"""
Insert all the specified values at the tail of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
tail of the list.
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before performing
the push operation. When `key` holds a value that is not a list, an
error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the tail of the list,
from the leftmost element to the rightmost element. So for instance
the command ``client.rpush('mylist', 'a', 'b', 'c')`` will result
in a list containing ``a`` as first element, ``b`` as second element
and ``c`` as third element.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'RPUSH', key] + list(values)) | Insert all the specified values at the tail of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
tail of the list.
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before performing
the push operation. When `key` holds a value that is not a list, an
error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the tail of the list,
from the leftmost element to the rightmost element. So for instance
the command ``client.rpush('mylist', 'a', 'b', 'c')`` will result
in a list containing ``a`` as first element, ``b`` as second element
and ``c`` as third element.
.. note::
**Time complexity**: ``O(1)`` | Below is the the instruction that describes the task:
### Input:
Insert all the specified values at the tail of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
tail of the list.
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before performing
the push operation. When `key` holds a value that is not a list, an
error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the tail of the list,
from the leftmost element to the rightmost element. So for instance
the command ``client.rpush('mylist', 'a', 'b', 'c')`` will result
in a list containing ``a`` as first element, ``b`` as second element
and ``c`` as third element.
.. note::
**Time complexity**: ``O(1)``
### Response:
def rpush(self, key, *values):
"""
Insert all the specified values at the tail of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
tail of the list.
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before performing
the push operation. When `key` holds a value that is not a list, an
error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the tail of the list,
from the leftmost element to the rightmost element. So for instance
the command ``client.rpush('mylist', 'a', 'b', 'c')`` will result
in a list containing ``a`` as first element, ``b`` as second element
and ``c`` as third element.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'RPUSH', key] + list(values)) |
def isInside(self, pts, In='(X,Y,Z)', log='any'):
""" Return a 2D array of bool
Equivalent to applying isInside to each Struct
Check self.lStruct[0].isInside? for details
Arg log determines how Struct with multiple Limits are treated
- 'all' : True only if pts belong to all elements
- 'any' : True if pts belong to any element
"""
msg = "Arg pts must be a 1D or 2D np.ndarray !"
assert isinstance(pts,np.ndarray) and pts.ndim in [1,2], msg
msg = "Arg log must be in ['any','all']"
assert log in ['any','all'], msg
if pts.ndim==1:
msg = "Arg pts must contain the coordinates of a point !"
assert pts.size in [2,3], msg
pts = pts.reshape((pts.size,1)).astype(float)
else:
msg = "Arg pts must contain the coordinates of points !"
assert pts.shape[0] in [2,3], pts
nP = pts.shape[1]
ind = np.zeros((self._dStruct['nObj'],nP), dtype=bool)
lStruct = self.lStruct
for ii in range(0,self._dStruct['nObj']):
indi = _GG._Ves_isInside(pts,
lStruct[ii].Poly,
Lim=lStruct[ii].Lim,
nLim=lStruct[ii].noccur,
VType=lStruct[ii].Id.Type,
In=In, Test=True)
if lStruct[ii].noccur>1:
if log=='any':
indi = np.any(indi,axis=0)
else:
indi = np.all(indi,axis=0)
ind[ii,:] = indi
return ind | Return a 2D array of bool
Equivalent to applying isInside to each Struct
Check self.lStruct[0].isInside? for details
Arg log determines how Struct with multiple Limits are treated
- 'all' : True only if pts belong to all elements
- 'any' : True if pts belong to any element | Below is the the instruction that describes the task:
### Input:
Return a 2D array of bool
Equivalent to applying isInside to each Struct
Check self.lStruct[0].isInside? for details
Arg log determines how Struct with multiple Limits are treated
- 'all' : True only if pts belong to all elements
- 'any' : True if pts belong to any element
### Response:
def isInside(self, pts, In='(X,Y,Z)', log='any'):
""" Return a 2D array of bool
Equivalent to applying isInside to each Struct
Check self.lStruct[0].isInside? for details
Arg log determines how Struct with multiple Limits are treated
- 'all' : True only if pts belong to all elements
- 'any' : True if pts belong to any element
"""
msg = "Arg pts must be a 1D or 2D np.ndarray !"
assert isinstance(pts,np.ndarray) and pts.ndim in [1,2], msg
msg = "Arg log must be in ['any','all']"
assert log in ['any','all'], msg
if pts.ndim==1:
msg = "Arg pts must contain the coordinates of a point !"
assert pts.size in [2,3], msg
pts = pts.reshape((pts.size,1)).astype(float)
else:
msg = "Arg pts must contain the coordinates of points !"
assert pts.shape[0] in [2,3], pts
nP = pts.shape[1]
ind = np.zeros((self._dStruct['nObj'],nP), dtype=bool)
lStruct = self.lStruct
for ii in range(0,self._dStruct['nObj']):
indi = _GG._Ves_isInside(pts,
lStruct[ii].Poly,
Lim=lStruct[ii].Lim,
nLim=lStruct[ii].noccur,
VType=lStruct[ii].Id.Type,
In=In, Test=True)
if lStruct[ii].noccur>1:
if log=='any':
indi = np.any(indi,axis=0)
else:
indi = np.all(indi,axis=0)
ind[ii,:] = indi
return ind |
def console(gandi, resource):
"""Open a console to virtual machine.
Resource can be a Hostname or an ID
"""
gandi.echo('/!\ Please be aware that if you didn\'t provide a password '
'during creation, console service will be unavailable.')
gandi.echo('/!\ You can use "gandi vm update" command to set a password.')
gandi.echo('/!\ Use ~. ssh escape key to exit.')
gandi.iaas.console(resource) | Open a console to virtual machine.
Resource can be a Hostname or an ID | Below is the the instruction that describes the task:
### Input:
Open a console to virtual machine.
Resource can be a Hostname or an ID
### Response:
def console(gandi, resource):
"""Open a console to virtual machine.
Resource can be a Hostname or an ID
"""
gandi.echo('/!\ Please be aware that if you didn\'t provide a password '
'during creation, console service will be unavailable.')
gandi.echo('/!\ You can use "gandi vm update" command to set a password.')
gandi.echo('/!\ Use ~. ssh escape key to exit.')
gandi.iaas.console(resource) |
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True | Reset all ID fields. | Below is the the instruction that describes the task:
### Input:
Reset all ID fields.
### Response:
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True |
def acquire(self, block=True):
"""Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired."""
while True:
# Try to set the lock
if self.redis.set(self.name, self.value, px=self.timeout, nx=True):
# It's ours until the timeout now
return True
# Lock is taken
if not block:
return False
# If blocking, try again in a bit
time.sleep(self.sleep) | Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired. | Below is the the instruction that describes the task:
### Input:
Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired.
### Response:
def acquire(self, block=True):
"""Acquire lock. Blocks until acquired if `block` is `True`, otherwise returns `False` if the lock could not be acquired."""
while True:
# Try to set the lock
if self.redis.set(self.name, self.value, px=self.timeout, nx=True):
# It's ours until the timeout now
return True
# Lock is taken
if not block:
return False
# If blocking, try again in a bit
time.sleep(self.sleep) |
def endpointlist_post_save(instance, *args, **kwargs):
"""
Used to process the lines of the endpoint list.
"""
with open(instance.upload.file.name, mode='rb') as f:
lines = f.readlines()
for url in lines:
if len(url) > 255:
LOGGER.debug('Skipping this endpoint, as it is more than 255 characters: %s' % url)
else:
if Endpoint.objects.filter(url=url, catalog=instance.catalog).count() == 0:
endpoint = Endpoint(url=url, endpoint_list=instance)
endpoint.catalog = instance.catalog
endpoint.save()
if not settings.REGISTRY_SKIP_CELERY:
update_endpoints.delay(instance.id)
else:
update_endpoints(instance.id) | Used to process the lines of the endpoint list. | Below is the the instruction that describes the task:
### Input:
Used to process the lines of the endpoint list.
### Response:
def endpointlist_post_save(instance, *args, **kwargs):
"""
Used to process the lines of the endpoint list.
"""
with open(instance.upload.file.name, mode='rb') as f:
lines = f.readlines()
for url in lines:
if len(url) > 255:
LOGGER.debug('Skipping this endpoint, as it is more than 255 characters: %s' % url)
else:
if Endpoint.objects.filter(url=url, catalog=instance.catalog).count() == 0:
endpoint = Endpoint(url=url, endpoint_list=instance)
endpoint.catalog = instance.catalog
endpoint.save()
if not settings.REGISTRY_SKIP_CELERY:
update_endpoints.delay(instance.id)
else:
update_endpoints(instance.id) |
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units | Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`) | Below is the the instruction that describes the task:
### Input:
Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
### Response:
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units |
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
inv_supercell_matrix = np.linalg.inv(self._supercell_matrix)
if self._primitive_matrix is None:
trans_mat = inv_supercell_matrix
else:
trans_mat = np.dot(inv_supercell_matrix, self._primitive_matrix)
try:
self._primitive = get_primitive(
self._supercell, trans_mat, self._symprec)
except ValueError:
msg = ("Creating primitive cell is failed. "
"PRIMITIVE_AXIS may be incorrectly specified.")
raise RuntimeError(msg) | primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T | Below is the the instruction that describes the task:
### Input:
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
### Response:
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
inv_supercell_matrix = np.linalg.inv(self._supercell_matrix)
if self._primitive_matrix is None:
trans_mat = inv_supercell_matrix
else:
trans_mat = np.dot(inv_supercell_matrix, self._primitive_matrix)
try:
self._primitive = get_primitive(
self._supercell, trans_mat, self._symprec)
except ValueError:
msg = ("Creating primitive cell is failed. "
"PRIMITIVE_AXIS may be incorrectly specified.")
raise RuntimeError(msg) |
def sort2groups(array, gpat=['_R1','_R2']):
""" Sort an array of strings to groups by patterns """
groups = [REGroup(gp) for gp in gpat]
unmatched = []
for item in array:
matched = False
for m in groups:
if m.match(item):
matched = True
break
if not matched: unmatched.append(item)
return [sorted(m.list) for m in groups], sorted(unmatched) | Sort an array of strings to groups by patterns | Below is the the instruction that describes the task:
### Input:
Sort an array of strings to groups by patterns
### Response:
def sort2groups(array, gpat=['_R1','_R2']):
""" Sort an array of strings to groups by patterns """
groups = [REGroup(gp) for gp in gpat]
unmatched = []
for item in array:
matched = False
for m in groups:
if m.match(item):
matched = True
break
if not matched: unmatched.append(item)
return [sorted(m.list) for m in groups], sorted(unmatched) |
def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret | r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user | Below is the the instruction that describes the task:
### Input:
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
### Response:
def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
"""
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret |
def cmd_reindex():
"""Uses CREATE INDEX CONCURRENTLY to create a duplicate index, then tries to swap the new index for the original.
The index swap is done using a short lock timeout to prevent it from interfering with running queries. Retries until
the rename succeeds.
"""
db = connect(args.database)
for idx in args.indexes:
pg_reindex(db, idx) | Uses CREATE INDEX CONCURRENTLY to create a duplicate index, then tries to swap the new index for the original.
The index swap is done using a short lock timeout to prevent it from interfering with running queries. Retries until
the rename succeeds. | Below is the the instruction that describes the task:
### Input:
Uses CREATE INDEX CONCURRENTLY to create a duplicate index, then tries to swap the new index for the original.
The index swap is done using a short lock timeout to prevent it from interfering with running queries. Retries until
the rename succeeds.
### Response:
def cmd_reindex():
"""Uses CREATE INDEX CONCURRENTLY to create a duplicate index, then tries to swap the new index for the original.
The index swap is done using a short lock timeout to prevent it from interfering with running queries. Retries until
the rename succeeds.
"""
db = connect(args.database)
for idx in args.indexes:
pg_reindex(db, idx) |
def parse(self, data):
"""Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
"""
self.validate_packet(data)
id_ = self.dump_hex(data[4:6])
# channel = data[5] TBC
humidity = data[6]
humidity_status = self._extract_humidity_status(data[7])
sensor_specific = {
'id': id_,
# 'channel': channel, TBC
'humidity': humidity,
'humidity_status': humidity_status
}
results = self.parse_header_part(data)
results.update(RfxPacketUtils.parse_signal_and_battery(data[8]))
results.update(sensor_specific)
return results | Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
### Response:
def parse(self, data):
"""Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
"""
self.validate_packet(data)
id_ = self.dump_hex(data[4:6])
# channel = data[5] TBC
humidity = data[6]
humidity_status = self._extract_humidity_status(data[7])
sensor_specific = {
'id': id_,
# 'channel': channel, TBC
'humidity': humidity,
'humidity_status': humidity_status
}
results = self.parse_header_part(data)
results.update(RfxPacketUtils.parse_signal_and_battery(data[8]))
results.update(sensor_specific)
return results |
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
raw_acc_list = rsp_pb.s2c.accList
acc_list = [{
'acc_id': record.accID,
'trd_env': TRADE.REV_TRD_ENV_MAP[record.trdEnv] if record.trdEnv in TRADE.REV_TRD_ENV_MAP else "",
'trdMarket_list': [(TRADE.REV_TRD_MKT_MAP[trdMkt] if trdMkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE) for trdMkt in record.trdMarketAuthList]
} for record in raw_acc_list]
return RET_OK, "", acc_list | Convert from PLS response to user response | Below is the the instruction that describes the task:
### Input:
Convert from PLS response to user response
### Response:
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
raw_acc_list = rsp_pb.s2c.accList
acc_list = [{
'acc_id': record.accID,
'trd_env': TRADE.REV_TRD_ENV_MAP[record.trdEnv] if record.trdEnv in TRADE.REV_TRD_ENV_MAP else "",
'trdMarket_list': [(TRADE.REV_TRD_MKT_MAP[trdMkt] if trdMkt in TRADE.REV_TRD_MKT_MAP else TrdMarket.NONE) for trdMkt in record.trdMarketAuthList]
} for record in raw_acc_list]
return RET_OK, "", acc_list |
def get_asset_admin_session(self, proxy=None):
"""Gets an asset administration session for creating, updating and deleting assets.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetAdminSession) - an
``AssetAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_admin()`` is ``true``.*
"""
asset_lookup_session = self._provider_manager.get_asset_lookup_session(proxy)
return AssetAdminSession(
self._provider_manager.get_asset_admin_session(proxy),
self._config_map,
asset_lookup_session) | Gets an asset administration session for creating, updating and deleting assets.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetAdminSession) - an
``AssetAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_admin()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets an asset administration session for creating, updating and deleting assets.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetAdminSession) - an
``AssetAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_admin()`` is ``true``.*
### Response:
def get_asset_admin_session(self, proxy=None):
"""Gets an asset administration session for creating, updating and deleting assets.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetAdminSession) - an
``AssetAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_admin()`` is ``true``.*
"""
asset_lookup_session = self._provider_manager.get_asset_lookup_session(proxy)
return AssetAdminSession(
self._provider_manager.get_asset_admin_session(proxy),
self._config_map,
asset_lookup_session) |
def labels(self):
"""
Provide labels without the need of dockerd. Instead skopeo is being used.
:return: dict
"""
if self._labels is None:
cmd = ["skopeo", "inspect", self.skopeo_target]
self._labels = json.loads(subprocess.check_output(cmd))["Labels"]
return self._labels | Provide labels without the need of dockerd. Instead skopeo is being used.
:return: dict | Below is the the instruction that describes the task:
### Input:
Provide labels without the need of dockerd. Instead skopeo is being used.
:return: dict
### Response:
def labels(self):
"""
Provide labels without the need of dockerd. Instead skopeo is being used.
:return: dict
"""
if self._labels is None:
cmd = ["skopeo", "inspect", self.skopeo_target]
self._labels = json.loads(subprocess.check_output(cmd))["Labels"]
return self._labels |
def _is_definition_section(source):
"""Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False.
"""
try:
definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines()
return all(
re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions)
except IndexError:
return False | Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False. | Below is the the instruction that describes the task:
### Input:
Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False.
### Response:
def _is_definition_section(source):
"""Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False.
"""
try:
definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines()
return all(
re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions)
except IndexError:
return False |
def upload_rpm(rpm_path, repoid, connector, callback=None):
"""upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded
"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
info = rpm_info(rpm_path)
pkg_name = info['name']
nvrea = info['nvrea']
cksum = info['cksum']
size = info['size']
package_basename = info['package_basename']
juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size)
# initiate upload
upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector)
#create a statusbar
pbar = ProgressBar(size)
# read in rpm
total_seeked = 0
rpm_fd = open(rpm_path, 'rb')
rpm_fd.seek(0)
while total_seeked < size:
rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE)
last_offset = total_seeked
total_seeked += len(rpm_data)
juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked))
upload_code = upload.append(fdata=rpm_data, offset=last_offset)
if upload_code != Constants.PULP_PUT_OK:
juicer.utils.Log.log_error("Upload failed.")
pbar.update(len(rpm_data))
pbar.finish()
rpm_fd.close()
juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked)
# finalize upload
rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name)
juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id)
# clean up working dir
upload.clean_upload()
# Run callbacks?
if callback:
try:
juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback))
callback(pkg_name)
except Exception:
juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback))
pass
return rpm_id | upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded | Below is the the instruction that describes the task:
### Input:
upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded
### Response:
def upload_rpm(rpm_path, repoid, connector, callback=None):
"""upload an rpm into pulp
rpm_path: path to an rpm
connector: the connector to use for interacting with pulp
callback: Optional callback to call after an RPM is
uploaded. Callback should accept one argument, the name of the RPM
which was uploaded
"""
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
info = rpm_info(rpm_path)
pkg_name = info['name']
nvrea = info['nvrea']
cksum = info['cksum']
size = info['size']
package_basename = info['package_basename']
juicer.utils.Log.log_notice("Expected amount to seek: %s (package size by os.path.getsize)" % size)
# initiate upload
upload = juicer.utils.Upload.Upload(package_basename, cksum, size, repoid, connector)
#create a statusbar
pbar = ProgressBar(size)
# read in rpm
total_seeked = 0
rpm_fd = open(rpm_path, 'rb')
rpm_fd.seek(0)
while total_seeked < size:
rpm_data = rpm_fd.read(Constants.UPLOAD_AT_ONCE)
last_offset = total_seeked
total_seeked += len(rpm_data)
juicer.utils.Log.log_notice("Seeked %s data... (total seeked: %s)" % (len(rpm_data), total_seeked))
upload_code = upload.append(fdata=rpm_data, offset=last_offset)
if upload_code != Constants.PULP_PUT_OK:
juicer.utils.Log.log_error("Upload failed.")
pbar.update(len(rpm_data))
pbar.finish()
rpm_fd.close()
juicer.utils.Log.log_notice("Seeked total data: %s" % total_seeked)
# finalize upload
rpm_id = upload.import_upload(nvrea=nvrea, rpm_name=pkg_name)
juicer.utils.Log.log_debug("RPM upload complete. New 'packageid': %s" % rpm_id)
# clean up working dir
upload.clean_upload()
# Run callbacks?
if callback:
try:
juicer.utils.Log.log_debug("Calling upload callack: %s" % str(callback))
callback(pkg_name)
except Exception:
juicer.utils.Log.log_error("Exception raised in callback: %s", str(callback))
pass
return rpm_id |
def _make_2d_array(self, data):
"""
Convert a 1D array of mesh values to a masked 2D mesh array
given the 1D mesh indices ``mesh_idx``.
Parameters
----------
data : 1D `~numpy.ndarray`
A 1D array of mesh values.
Returns
-------
result : 2D `~numpy.ma.MaskedArray`
A 2D masked array. Pixels not defined in ``mesh_idx`` are
masked.
"""
if data.shape != self.mesh_idx.shape:
raise ValueError('data and mesh_idx must have the same shape')
if np.ma.is_masked(data):
raise ValueError('data must not be a masked array')
data2d = np.zeros(self._mesh_shape).astype(data.dtype)
data2d[self.mesh_yidx, self.mesh_xidx] = data
if len(self.mesh_idx) == self.nboxes:
# no meshes were masked
return data2d
else:
# some meshes were masked
mask2d = np.ones(data2d.shape).astype(np.bool)
mask2d[self.mesh_yidx, self.mesh_xidx] = False
return np.ma.masked_array(data2d, mask=mask2d) | Convert a 1D array of mesh values to a masked 2D mesh array
given the 1D mesh indices ``mesh_idx``.
Parameters
----------
data : 1D `~numpy.ndarray`
A 1D array of mesh values.
Returns
-------
result : 2D `~numpy.ma.MaskedArray`
A 2D masked array. Pixels not defined in ``mesh_idx`` are
masked. | Below is the the instruction that describes the task:
### Input:
Convert a 1D array of mesh values to a masked 2D mesh array
given the 1D mesh indices ``mesh_idx``.
Parameters
----------
data : 1D `~numpy.ndarray`
A 1D array of mesh values.
Returns
-------
result : 2D `~numpy.ma.MaskedArray`
A 2D masked array. Pixels not defined in ``mesh_idx`` are
masked.
### Response:
def _make_2d_array(self, data):
"""
Convert a 1D array of mesh values to a masked 2D mesh array
given the 1D mesh indices ``mesh_idx``.
Parameters
----------
data : 1D `~numpy.ndarray`
A 1D array of mesh values.
Returns
-------
result : 2D `~numpy.ma.MaskedArray`
A 2D masked array. Pixels not defined in ``mesh_idx`` are
masked.
"""
if data.shape != self.mesh_idx.shape:
raise ValueError('data and mesh_idx must have the same shape')
if np.ma.is_masked(data):
raise ValueError('data must not be a masked array')
data2d = np.zeros(self._mesh_shape).astype(data.dtype)
data2d[self.mesh_yidx, self.mesh_xidx] = data
if len(self.mesh_idx) == self.nboxes:
# no meshes were masked
return data2d
else:
# some meshes were masked
mask2d = np.ones(data2d.shape).astype(np.bool)
mask2d[self.mesh_yidx, self.mesh_xidx] = False
return np.ma.masked_array(data2d, mask=mask2d) |
def clear_cache(self):
"""
Clear the TTS cache, removing all cache files from disk.
.. versionadded:: 1.6.0
"""
if self.use_cache:
self.log(u"Requested to clear TTS cache")
self.cache.clear() | Clear the TTS cache, removing all cache files from disk.
.. versionadded:: 1.6.0 | Below is the the instruction that describes the task:
### Input:
Clear the TTS cache, removing all cache files from disk.
.. versionadded:: 1.6.0
### Response:
def clear_cache(self):
"""
Clear the TTS cache, removing all cache files from disk.
.. versionadded:: 1.6.0
"""
if self.use_cache:
self.log(u"Requested to clear TTS cache")
self.cache.clear() |
def render_form_field(parser, token):
"""
Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %}
- optional_help_text and optional_css_classes are strings
- if optional_help_text is not given, then it is taken from form field object
"""
try:
help_text = None
css_classes = None
token_split = token.split_contents()
if len(token_split) == 4:
tag_name, form_field, help_text, css_classes = token.split_contents()
elif len(token_split) == 3:
tag_name, form_field, help_text = token.split_contents()
else:
tag_name, form_field = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"Unable to parse arguments for {0}".format(repr(token.contents.split()[0])))
return FormFieldNode(form_field, help_text=help_text, css_classes=css_classes) | Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %}
- optional_help_text and optional_css_classes are strings
- if optional_help_text is not given, then it is taken from form field object | Below is the the instruction that describes the task:
### Input:
Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %}
- optional_help_text and optional_css_classes are strings
- if optional_help_text is not given, then it is taken from form field object
### Response:
def render_form_field(parser, token):
"""
Usage is {% render_form_field form.field_name optional_help_text optional_css_classes %}
- optional_help_text and optional_css_classes are strings
- if optional_help_text is not given, then it is taken from form field object
"""
try:
help_text = None
css_classes = None
token_split = token.split_contents()
if len(token_split) == 4:
tag_name, form_field, help_text, css_classes = token.split_contents()
elif len(token_split) == 3:
tag_name, form_field, help_text = token.split_contents()
else:
tag_name, form_field = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"Unable to parse arguments for {0}".format(repr(token.contents.split()[0])))
return FormFieldNode(form_field, help_text=help_text, css_classes=css_classes) |
def set_(key, value, profile=None, ttl=None, directory=False, **kwargs):
'''
.. versionadded:: 2014.7.0
Set a key in etcd by direct path. Optionally, create a directory
or set a TTL on the key. Returns None on failure.
CLI Example:
.. code-block:: bash
salt myminion etcd.set /path/to/key value
salt myminion etcd.set /path/to/key value profile=my_etcd_config
salt myminion etcd.set /path/to/key value host=127.0.0.1 port=2379
salt myminion etcd.set /path/to/dir '' directory=True
salt myminion etcd.set /path/to/key value ttl=5
'''
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
return client.set(key, value, ttl=ttl, directory=directory) | .. versionadded:: 2014.7.0
Set a key in etcd by direct path. Optionally, create a directory
or set a TTL on the key. Returns None on failure.
CLI Example:
.. code-block:: bash
salt myminion etcd.set /path/to/key value
salt myminion etcd.set /path/to/key value profile=my_etcd_config
salt myminion etcd.set /path/to/key value host=127.0.0.1 port=2379
salt myminion etcd.set /path/to/dir '' directory=True
salt myminion etcd.set /path/to/key value ttl=5 | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.7.0
Set a key in etcd by direct path. Optionally, create a directory
or set a TTL on the key. Returns None on failure.
CLI Example:
.. code-block:: bash
salt myminion etcd.set /path/to/key value
salt myminion etcd.set /path/to/key value profile=my_etcd_config
salt myminion etcd.set /path/to/key value host=127.0.0.1 port=2379
salt myminion etcd.set /path/to/dir '' directory=True
salt myminion etcd.set /path/to/key value ttl=5
### Response:
def set_(key, value, profile=None, ttl=None, directory=False, **kwargs):
'''
.. versionadded:: 2014.7.0
Set a key in etcd by direct path. Optionally, create a directory
or set a TTL on the key. Returns None on failure.
CLI Example:
.. code-block:: bash
salt myminion etcd.set /path/to/key value
salt myminion etcd.set /path/to/key value profile=my_etcd_config
salt myminion etcd.set /path/to/key value host=127.0.0.1 port=2379
salt myminion etcd.set /path/to/dir '' directory=True
salt myminion etcd.set /path/to/key value ttl=5
'''
client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs)
return client.set(key, value, ttl=ttl, directory=directory) |
def unsuppress(self, email):
"""Unsuppresses an email address by removing it from the the client's
suppression list"""
params = {"email": email}
response = self._put(self.uri_for("unsuppress"),
body=" ", params=params) | Unsuppresses an email address by removing it from the the client's
suppression list | Below is the the instruction that describes the task:
### Input:
Unsuppresses an email address by removing it from the the client's
suppression list
### Response:
def unsuppress(self, email):
"""Unsuppresses an email address by removing it from the the client's
suppression list"""
params = {"email": email}
response = self._put(self.uri_for("unsuppress"),
body=" ", params=params) |
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True | Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out | Below is the the instruction that describes the task:
### Input:
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
### Response:
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True |
def det_refpoint(self, angle):
"""Return the detector reference point position at ``angle``.
For an angle ``phi``, the detector position is given by ::
det_ref(phi) = translation +
rot_matrix(phi) * (det_rad * src_to_det_init) +
(offset_along_axis + pitch * phi) * axis
where ``src_to_det_init`` is the initial unit vector pointing
from source to detector.
Parameters
----------
angle : float or `array-like`
Angle(s) in radians describing the counter-clockwise
rotation of the detector.
Returns
-------
refpt : `numpy.ndarray`
Vector(s) pointing from the origin to the detector reference
point. If ``angle`` is a single parameter, the returned array
has shape ``(3,)``, otherwise ``angle.shape + (3,)``.
See Also
--------
src_position
Examples
--------
With default arguments, the detector starts at ``det_rad * e_y``
and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at
90 degrees:
>>> apart = odl.uniform_partition(0, 4 * np.pi, 10)
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = ConeFlatGeometry(
... apart, dpart, src_radius=5, det_radius=10, pitch=2)
>>> geom.det_refpoint(0)
array([ 0., 10., 0.])
>>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5])
True
The method is vectorized, i.e., it can be called with multiple
angles at once (or an n-dimensional array of angles):
>>> points = geom.det_refpoint([0, np.pi / 2])
>>> np.allclose(points[0], [0, 10, 0])
True
>>> np.allclose(points[1], [-10, 0, 0.5])
True
>>> geom.det_refpoint(np.zeros((4, 5))).shape
(4, 5, 3)
"""
squeeze_out = (np.shape(angle) == ())
angle = np.array(angle, dtype=float, copy=False, ndmin=1)
rot_matrix = self.rotation_matrix(angle)
extra_dims = angle.ndim
# Initial vector from center of rotation to detector.
# It can be computed this way since source and detector are at
# maximum distance, i.e. the connecting line passes the center.
center_to_det_init = self.det_radius * self.src_to_det_init
# `circle_component` has shape (a, ndim)
circle_component = rot_matrix.dot(center_to_det_init)
# Increment along the rotation axis according to pitch and
# offset_along_axis
# `shift_along_axis` has shape angles.shape
shift_along_axis = (self.offset_along_axis
+ self.pitch * angle / (2 * np.pi))
# Create outer product of `shift_along_axis` and `axis`, resulting
# in shape (a, ndim)
pitch_component = np.multiply.outer(shift_along_axis, self.axis)
# Broadcast translation along extra dimensions
transl_slc = (None,) * extra_dims + (slice(None),)
refpt = (self.translation[transl_slc]
+ circle_component
+ pitch_component)
if squeeze_out:
refpt = refpt.squeeze()
return refpt | Return the detector reference point position at ``angle``.
For an angle ``phi``, the detector position is given by ::
det_ref(phi) = translation +
rot_matrix(phi) * (det_rad * src_to_det_init) +
(offset_along_axis + pitch * phi) * axis
where ``src_to_det_init`` is the initial unit vector pointing
from source to detector.
Parameters
----------
angle : float or `array-like`
Angle(s) in radians describing the counter-clockwise
rotation of the detector.
Returns
-------
refpt : `numpy.ndarray`
Vector(s) pointing from the origin to the detector reference
point. If ``angle`` is a single parameter, the returned array
has shape ``(3,)``, otherwise ``angle.shape + (3,)``.
See Also
--------
src_position
Examples
--------
With default arguments, the detector starts at ``det_rad * e_y``
and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at
90 degrees:
>>> apart = odl.uniform_partition(0, 4 * np.pi, 10)
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = ConeFlatGeometry(
... apart, dpart, src_radius=5, det_radius=10, pitch=2)
>>> geom.det_refpoint(0)
array([ 0., 10., 0.])
>>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5])
True
The method is vectorized, i.e., it can be called with multiple
angles at once (or an n-dimensional array of angles):
>>> points = geom.det_refpoint([0, np.pi / 2])
>>> np.allclose(points[0], [0, 10, 0])
True
>>> np.allclose(points[1], [-10, 0, 0.5])
True
>>> geom.det_refpoint(np.zeros((4, 5))).shape
(4, 5, 3) | Below is the the instruction that describes the task:
### Input:
Return the detector reference point position at ``angle``.
For an angle ``phi``, the detector position is given by ::
det_ref(phi) = translation +
rot_matrix(phi) * (det_rad * src_to_det_init) +
(offset_along_axis + pitch * phi) * axis
where ``src_to_det_init`` is the initial unit vector pointing
from source to detector.
Parameters
----------
angle : float or `array-like`
Angle(s) in radians describing the counter-clockwise
rotation of the detector.
Returns
-------
refpt : `numpy.ndarray`
Vector(s) pointing from the origin to the detector reference
point. If ``angle`` is a single parameter, the returned array
has shape ``(3,)``, otherwise ``angle.shape + (3,)``.
See Also
--------
src_position
Examples
--------
With default arguments, the detector starts at ``det_rad * e_y``
and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at
90 degrees:
>>> apart = odl.uniform_partition(0, 4 * np.pi, 10)
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = ConeFlatGeometry(
... apart, dpart, src_radius=5, det_radius=10, pitch=2)
>>> geom.det_refpoint(0)
array([ 0., 10., 0.])
>>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5])
True
The method is vectorized, i.e., it can be called with multiple
angles at once (or an n-dimensional array of angles):
>>> points = geom.det_refpoint([0, np.pi / 2])
>>> np.allclose(points[0], [0, 10, 0])
True
>>> np.allclose(points[1], [-10, 0, 0.5])
True
>>> geom.det_refpoint(np.zeros((4, 5))).shape
(4, 5, 3)
### Response:
def det_refpoint(self, angle):
"""Return the detector reference point position at ``angle``.
For an angle ``phi``, the detector position is given by ::
det_ref(phi) = translation +
rot_matrix(phi) * (det_rad * src_to_det_init) +
(offset_along_axis + pitch * phi) * axis
where ``src_to_det_init`` is the initial unit vector pointing
from source to detector.
Parameters
----------
angle : float or `array-like`
Angle(s) in radians describing the counter-clockwise
rotation of the detector.
Returns
-------
refpt : `numpy.ndarray`
Vector(s) pointing from the origin to the detector reference
point. If ``angle`` is a single parameter, the returned array
has shape ``(3,)``, otherwise ``angle.shape + (3,)``.
See Also
--------
src_position
Examples
--------
With default arguments, the detector starts at ``det_rad * e_y``
and rotates to ``det_rad * (-e_x) + pitch/4 * e_z`` at
90 degrees:
>>> apart = odl.uniform_partition(0, 4 * np.pi, 10)
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = ConeFlatGeometry(
... apart, dpart, src_radius=5, det_radius=10, pitch=2)
>>> geom.det_refpoint(0)
array([ 0., 10., 0.])
>>> np.allclose(geom.det_refpoint(np.pi / 2), [-10, 0, 0.5])
True
The method is vectorized, i.e., it can be called with multiple
angles at once (or an n-dimensional array of angles):
>>> points = geom.det_refpoint([0, np.pi / 2])
>>> np.allclose(points[0], [0, 10, 0])
True
>>> np.allclose(points[1], [-10, 0, 0.5])
True
>>> geom.det_refpoint(np.zeros((4, 5))).shape
(4, 5, 3)
"""
squeeze_out = (np.shape(angle) == ())
angle = np.array(angle, dtype=float, copy=False, ndmin=1)
rot_matrix = self.rotation_matrix(angle)
extra_dims = angle.ndim
# Initial vector from center of rotation to detector.
# It can be computed this way since source and detector are at
# maximum distance, i.e. the connecting line passes the center.
center_to_det_init = self.det_radius * self.src_to_det_init
# `circle_component` has shape (a, ndim)
circle_component = rot_matrix.dot(center_to_det_init)
# Increment along the rotation axis according to pitch and
# offset_along_axis
# `shift_along_axis` has shape angles.shape
shift_along_axis = (self.offset_along_axis
+ self.pitch * angle / (2 * np.pi))
# Create outer product of `shift_along_axis` and `axis`, resulting
# in shape (a, ndim)
pitch_component = np.multiply.outer(shift_along_axis, self.axis)
# Broadcast translation along extra dimensions
transl_slc = (None,) * extra_dims + (slice(None),)
refpt = (self.translation[transl_slc]
+ circle_component
+ pitch_component)
if squeeze_out:
refpt = refpt.squeeze()
return refpt |
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d in out_edges + in_edges:
weight = d.get('weight', None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=u,
weight=weight,
dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites | Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first | Below is the the instruction that describes the task:
### Input:
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
### Response:
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d in out_edges + in_edges:
weight = d.get('weight', None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=u,
weight=weight,
dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site,
jimage=(0, 0, 0),
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites |
def convert_pdf_to_txt(pdf, startpage=None):
"""Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text
"""
if startpage is not None:
startpageargs = ['-f', str(startpage)]
else:
startpageargs = []
stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"],
stdout=subprocess.PIPE).communicate()[0]
# python2 and 3
if not isinstance(stdout, str):
stdout = stdout.decode()
return stdout | Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text | Below is the the instruction that describes the task:
### Input:
Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text
### Response:
def convert_pdf_to_txt(pdf, startpage=None):
"""Convert a pdf file to text and return the text.
This method requires pdftotext to be installed.
Parameters
----------
pdf : str
path to pdf file
startpage : int, optional
the first page we try to convert
Returns
-------
str
the converted text
"""
if startpage is not None:
startpageargs = ['-f', str(startpage)]
else:
startpageargs = []
stdout = subprocess.Popen(["pdftotext", "-q"] + startpageargs + [pdf, "-"],
stdout=subprocess.PIPE).communicate()[0]
# python2 and 3
if not isinstance(stdout, str):
stdout = stdout.decode()
return stdout |
def update(self, reseed):
"""
Update that snowflake!
:param reseed: Whether we are in the normal reseed cycle or not.
"""
self._screen.print_at(" ", self._x, self._y)
cell = None
for _ in range(self._rate):
self._y += 1
cell = self._screen.get_from(self._x, self._y)
if cell is None or cell[0] != 32:
break
if ((cell is not None and cell[0] in [ord(x) for x in self._snow_chars + " "]) and
(self._y < self._screen.start_line + self._screen.height)):
self._screen.print_at(self._char,
self._x,
self._y)
else:
if self._y > self._screen.start_line + self._screen.height:
self._y = self._screen.start_line + self._screen.height
drift_index = -1
if cell:
drift_index = self._drift_chars.find(chr(cell[0]))
if 0 <= drift_index < len(self._drift_chars) - 1:
drift_char = self._drift_chars[drift_index + 1]
self._screen.print_at(drift_char, self._x, self._y)
else:
self._screen.print_at(",", self._x, self._y - 1)
if reseed:
self._reseed() | Update that snowflake!
:param reseed: Whether we are in the normal reseed cycle or not. | Below is the the instruction that describes the task:
### Input:
Update that snowflake!
:param reseed: Whether we are in the normal reseed cycle or not.
### Response:
def update(self, reseed):
"""
Update that snowflake!
:param reseed: Whether we are in the normal reseed cycle or not.
"""
self._screen.print_at(" ", self._x, self._y)
cell = None
for _ in range(self._rate):
self._y += 1
cell = self._screen.get_from(self._x, self._y)
if cell is None or cell[0] != 32:
break
if ((cell is not None and cell[0] in [ord(x) for x in self._snow_chars + " "]) and
(self._y < self._screen.start_line + self._screen.height)):
self._screen.print_at(self._char,
self._x,
self._y)
else:
if self._y > self._screen.start_line + self._screen.height:
self._y = self._screen.start_line + self._screen.height
drift_index = -1
if cell:
drift_index = self._drift_chars.find(chr(cell[0]))
if 0 <= drift_index < len(self._drift_chars) - 1:
drift_char = self._drift_chars[drift_index + 1]
self._screen.print_at(drift_char, self._x, self._y)
else:
self._screen.print_at(",", self._x, self._y - 1)
if reseed:
self._reseed() |
def gateway_client_js():
"""
Template tag which provides a `script` tag for each javascript item
required by the payment gateway
"""
javascripts = GATEWAY.client_js()
if isinstance(javascripts, (tuple, list)):
tags = []
for js in javascripts:
tags.append('<script type="text/javascript" src="{}"></script>'.format(js))
return tags
else:
raise TypeError(
'function client_js of {} must return a list or tuple'.format(GATEWAY.__name__)) | Template tag which provides a `script` tag for each javascript item
required by the payment gateway | Below is the the instruction that describes the task:
### Input:
Template tag which provides a `script` tag for each javascript item
required by the payment gateway
### Response:
def gateway_client_js():
"""
Template tag which provides a `script` tag for each javascript item
required by the payment gateway
"""
javascripts = GATEWAY.client_js()
if isinstance(javascripts, (tuple, list)):
tags = []
for js in javascripts:
tags.append('<script type="text/javascript" src="{}"></script>'.format(js))
return tags
else:
raise TypeError(
'function client_js of {} must return a list or tuple'.format(GATEWAY.__name__)) |
def _download_movielens(dest_path):
"""
Download the dataset.
"""
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
req = requests.get(url, stream=True)
with open(dest_path, 'wb') as fd:
for chunk in req.iter_content():
fd.write(chunk) | Download the dataset. | Below is the the instruction that describes the task:
### Input:
Download the dataset.
### Response:
def _download_movielens(dest_path):
"""
Download the dataset.
"""
url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
req = requests.get(url, stream=True)
with open(dest_path, 'wb') as fd:
for chunk in req.iter_content():
fd.write(chunk) |
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush() | The windows implementation of print_() | Below is the the instruction that describes the task:
### Input:
The windows implementation of print_()
### Response:
def _print_windows(objects, sep, end, file, flush):
"""The windows implementation of print_()"""
h = winapi.INVALID_HANDLE_VALUE
try:
fileno = file.fileno()
except (EnvironmentError, AttributeError):
pass
else:
if fileno == 1:
h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE)
elif fileno == 2:
h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE)
encoding = _encoding
parts = []
for obj in objects:
if isinstance(obj, bytes):
obj = obj.decode(encoding, "replace")
if not isinstance(obj, text_type):
obj = text_type(obj)
parts.append(obj)
if isinstance(sep, bytes):
sep = sep.decode(encoding, "replace")
if not isinstance(sep, text_type):
raise TypeError
if isinstance(end, bytes):
end = end.decode(encoding, "replace")
if not isinstance(end, text_type):
raise TypeError
if end == u"\n":
end = os.linesep
text = sep.join(parts) + end
assert isinstance(text, text_type)
is_console = True
if h == winapi.INVALID_HANDLE_VALUE:
is_console = False
else:
# get the default value
info = winapi.CONSOLE_SCREEN_BUFFER_INFO()
if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)):
is_console = False
if is_console:
# make sure we flush before we apply any console attributes
file.flush()
# try to force a utf-8 code page, use the output CP if that fails
cp = winapi.GetConsoleOutputCP()
try:
encoding = "utf-8"
if winapi.SetConsoleOutputCP(65001) == 0:
encoding = None
for is_ansi, part in ansi_split(text):
if is_ansi:
ansi_state.apply(h, part)
else:
if encoding is not None:
data = part.encode(encoding, _surrogatepass)
else:
data = _encode_codepage(cp, part)
os.write(fileno, data)
finally:
# reset the code page to what we had before
winapi.SetConsoleOutputCP(cp)
else:
# try writing bytes first, so in case of Python 2 StringIO we get
# the same type on all platforms
try:
file.write(text.encode("utf-8", _surrogatepass))
except (TypeError, ValueError):
file.write(text)
if flush:
file.flush() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.