_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q6800 | SeqRepo._get_unique_seqid | train | def _get_unique_seqid(self, alias, namespace):
"""given alias and namespace, return seq_id if exactly one distinct
sequence id is found, raise KeyError if there's no match, or
raise ValueError if there's more than one match.
"""
recs = self.aliases.find_aliases(alias=alias, namespace=namespace)
seq_ids = set(r["seq_id"] for r in recs)
if len(seq_ids) == 0:
| python | {
"resource": ""
} |
q6801 | SeqAliasDB.find_aliases | train | def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None):
"""returns iterator over alias annotation records that match criteria
The arguments, all optional, restrict the records that are
returned. Without arguments, all aliases are returned.
If arguments contain %, the `like` comparison operator is
used. Otherwise arguments must match exactly.
"""
clauses = []
params = []
def eq_or_like(s):
return "like" if "%" in s else "="
if translate_ncbi_namespace is None:
translate_ncbi_namespace = self.translate_ncbi_namespace
if alias is not None:
clauses += ["alias {} ?".format(eq_or_like(alias))]
params += [alias]
if namespace is not None:
# Switch to using RefSeq for RefSeq accessions
# issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups
# issue #31: later breaking change, translate database
if namespace == "RefSeq":
namespace = "NCBI"
clauses += ["namespace {} ?".format(eq_or_like(namespace))]
| python | {
"resource": ""
} |
q6802 | SeqAliasDB.store_alias | train | def store_alias(self, seq_id, namespace, alias):
"""associate a namespaced alias with a sequence
Alias association with sequences is idempotent: duplicate
associations are discarded silently.
"""
if not self._writeable:
raise RuntimeError("Cannot write -- opened read-only")
log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id)
try:
c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace,
alias))
| python | {
"resource": ""
} |
q6803 | add_assembly_names | train | def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
| python | {
"resource": ""
} |
q6804 | snapshot | train | def snapshot(opts):
"""snapshot a seqrepo data directory by hardlinking sequence files,
copying sqlite databases, and remove write permissions from directories
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
dst_dir = opts.destination_name
if not dst_dir.startswith("/"):
# interpret dst_dir as relative to parent dir of seqrepo_dir
dst_dir = os.path.join(opts.root_directory, dst_dir)
src_dir = os.path.realpath(seqrepo_dir)
dst_dir = os.path.realpath(dst_dir)
if commonpath([src_dir, dst_dir]).startswith(src_dir):
raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir))
if os.path.exists(dst_dir):
raise IOError(dst_dir + ": File exists")
tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".")
_logger.debug("src_dir = " + src_dir)
_logger.debug("dst_dir = " + dst_dir)
_logger.debug("tmp_dir = " + tmp_dir)
# TODO: cleanup of tmpdir on failure
makedirs(tmp_dir, exist_ok=True)
wd = os.getcwd()
os.chdir(src_dir)
# make destination directories (walk is top-down)
for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames):
dp = os.path.join(tmp_dir, rp)
os.mkdir(dp)
# hard link sequence files
for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames
if ".bgz" in filename):
| python | {
"resource": ""
} |
q6805 | StorageBackend.set_sqlite_pragmas | train | def set_sqlite_pragmas(self):
"""
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine.
It currently sets:
- journal_mode to WAL
:return: None
"""
def _pragmas_on_connect(dbapi_con, | python | {
"resource": ""
} |
q6806 | StorageBackend.schedule_job | train | def schedule_job(self, j):
"""
Add the job given by j to the job queue.
Note: Does not actually run the job.
"""
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
| python | {
"resource": ""
} |
q6807 | StorageBackend.mark_job_as_canceling | train | def mark_job_as_canceling(self, job_id):
"""
Mark the job as requested for canceling. Does not actually try to cancel a running job. | python | {
"resource": ""
} |
q6808 | BaseWorkerBackend.handle_incoming_message | train | def handle_incoming_message(self, msg):
"""
Start or cancel a job, based on the msg.
If msg.type == MessageType.START_JOB, then start the job given by msg.job.
If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. | python | {
"resource": ""
} |
q6809 | WorkerBackend.schedule_job | train | def schedule_job(self, job):
"""
schedule a job to the type of workers spawned by self.start_workers.
:param job: the job to schedule for running.
:return:
"""
l = _reraise_with_traceback(job.get_lambda_to_execute())
future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel)
# assign the futures to a dict, mapping them to a job
self.job_future_mapping[future] = job
| python | {
"resource": ""
} |
q6810 | WorkerBackend._check_for_cancel | train | def _check_for_cancel(self, job_id, current_stage=""):
"""
Check if a job has been requested to be cancelled. When called, the calling function can
optionally give the stage it is currently in, so the user has information on where the job
was before it was cancelled.
| python | {
"resource": ""
} |
q6811 | Scheduler.request_job_cancel | train | def request_job_cancel(self, job_id):
"""
Send a message to the workers to cancel the job with job_id. We then mark the job in the storage
as being canceled.
:param job_id: the job to cancel
:return: None
""" | python | {
"resource": ""
} |
q6812 | Scheduler.handle_worker_messages | train | def handle_worker_messages(self, timeout):
"""
Read messages that are placed in self.incoming_mailbox,
and then update the job states corresponding to each message.
Args:
timeout: How long to wait for an incoming message, if the mailbox is empty right now.
| python | {
"resource": ""
} |
q6813 | Job.get_lambda_to_execute | train | def get_lambda_to_execute(self):
"""
return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job.
"""
def y(update_progress_func, cancel_job_func):
"""
Call the function stored in self.func, and passing in update_progress_func
or cancel_job_func depending if self.track_progress or self.cancellable is defined,
respectively.
:param update_progress_func: The callback for when the job updates its progress.
:param cancel_job_func: The function that the function has to call occasionally to see
if the user wants to cancel the currently running job.
:return: Any
"""
| python | {
"resource": ""
} |
q6814 | Job.percentage_progress | train | def percentage_progress(self):
"""
Returns a float between 0 and 1, representing the current job's progress in its task.
If total_progress is not given or 0, just return self.progress.
:return: float corresponding to the total percentage progress of the job.
"""
| python | {
"resource": ""
} |
q6815 | Client.schedule | train | def schedule(self, func, *args, **kwargs):
"""
Schedules a function func for execution.
One special parameter is track_progress. If passed in and not None, the func will be passed in a
keyword parameter called update_progress:
def update_progress(progress, total_progress, stage=""):
The running function can call the update_progress function to notify interested parties of the function's
current progress.
Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special
"check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job
| python | {
"resource": ""
} |
q6816 | Client.wait | train | def wait(self, job_id, timeout=None):
"""
Wait until the job given by job_id has a new update.
:param job_id: the id of the job to wait for. | python | {
"resource": ""
} |
q6817 | Client.wait_for_completion | train | def wait_for_completion(self, job_id, timeout=None):
"""
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a
iceqube.exceptions.TimeoutError if timeout is exceeded before each job change.
| python | {
"resource": ""
} |
q6818 | invalidate_cache_after_error | train | def invalidate_cache_after_error(f):
"""
catch any exception and invalidate internal cache with list of nodes
"""
@wraps(f)
def wrapper(self, *args, **kwds):
try:
return f(self, *args, **kwds)
| python | {
"resource": ""
} |
q6819 | ElastiCache.update_params | train | def update_params(self, params):
"""
update connection params to maximize performance
"""
if not params.get('BINARY', True):
raise Warning('To increase performance please use ElastiCache'
' in binary mode')
else:
params['BINARY'] = True # patch params, set binary mode
if 'OPTIONS' not in | python | {
"resource": ""
} |
q6820 | ElastiCache.get_cluster_nodes | train | def get_cluster_nodes(self):
"""
return list with all nodes in cluster
"""
if not hasattr(self, '_cluster_nodes_cache'):
server, port = self._servers[0].split(':')
try:
self._cluster_nodes_cache = (
get_cluster_info(server, port,
self._ignore_cluster_errors)['nodes'])
| python | {
"resource": ""
} |
q6821 | restore_placeholders | train | def restore_placeholders(msgid, translation):
"""Restore placeholders in the translated message."""
placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid)
return re.sub(
r'(\s*)(__[\w]+?__)(\s*)',
| python | {
"resource": ""
} |
q6822 | Command.translate_file | train | def translate_file(self, root, file_name, target_language):
"""
convenience method for translating a pot file
:param root: the absolute path of folder where the file is present
:param file_name: name of the file to be translated (it should be a pot file)
:param target_language: language in which the file needs to be translated
"""
logger.info('filling up translations for locale `{}`'.format(target_language))
po = polib.pofile(os.path.join(root, file_name))
strings = self.get_strings_to_translate(po)
# translate the strings,
# all the translated strings | python | {
"resource": ""
} |
q6823 | Command.get_strings_to_translate | train | def get_strings_to_translate(self, po):
"""Return list of string to translate from po file.
:param po: POFile object to translate
:type po: polib.POFile
:return: list of string to translate
:rtype: collections.Iterable[six.text_type]
"""
strings = []
for index, entry in enumerate(po):
if not self.need_translate(entry):
continue
| python | {
"resource": ""
} |
q6824 | Command.update_translations | train | def update_translations(self, entries, translated_strings):
"""Update translations in entries.
The order and number of translations should match to get_strings_to_translate() result.
:param entries: list of entries to translate
:type entries: collections.Iterable[polib.POEntry] | polib.POFile
:param translated_strings: list of translations
:type translated_strings: collections.Iterable[six.text_type]
"""
translations = iter(translated_strings)
for entry in entries:
if not self.need_translate(entry):
continue
if entry.msgid_plural:
# fill the first plural form with the entry.msgid translation
translation = next(translations)
translation = fix_translation(entry.msgid, translation)
entry.msgstr_plural[0] = translation
# fill the rest of plural forms with the entry.msgid_plural translation
translation = next(translations)
| python | {
"resource": ""
} |
q6825 | load_ply | train | def load_ply(fileobj):
"""Same as load_ply, but takes a file-like object"""
def nextline():
"""Read next line, skip comments"""
while True:
line = fileobj.readline()
assert line != '' # eof
if not line.startswith('comment'):
return line.strip()
assert nextline() == 'ply'
assert nextline() == 'format ascii 1.0'
line = nextline()
assert line.startswith('element vertex')
nverts = int(line.split()[2])
# print 'nverts : ', nverts
assert nextline() == 'property float x'
assert nextline() == 'property float y'
assert nextline() == 'property float z'
line = nextline()
assert line.startswith('element face')
nfaces = int(line.split()[2])
# print 'nfaces : ', nfaces
assert nextline() == 'property list uchar int vertex_indices'
line = nextline()
has_texcoords = line == 'property list uchar float texcoord'
if has_texcoords:
assert nextline() == 'end_header'
else:
assert line == 'end_header'
# Verts
verts = np.zeros((nverts, 3))
for i in range(nverts):
vals = nextline().split()
verts[i, :] = [float(v) for v in vals[:3]]
# Faces
faces = []
| python | {
"resource": ""
} |
q6826 | read_ssh_config | train | def read_ssh_config(path):
"""
Read ssh config file and return parsed SshConfig
"""
with open(path, "r") as fh_:
| python | {
"resource": ""
} |
q6827 | _remap_key | train | def _remap_key(key):
""" Change key into correct casing if we know the parameter """
if key in KNOWN_PARAMS:
return key
if key.lower() | python | {
"resource": ""
} |
q6828 | SshConfig.parse | train | def parse(self, lines):
"""Parse lines from ssh config file"""
cur_entry = None
for line in lines:
kv_ = _key_value(line)
if len(kv_) > 1:
key, value = kv_
if key.lower() == "host":
cur_entry = value
self.hosts_.add(value)
| python | {
"resource": ""
} |
q6829 | SshConfig.host | train | def host(self, host):
"""
Return the configuration of a specific host as a dictionary.
Dictionary always contains lowercase versions of the attribute names.
Parameters
----------
host : the host to return values for.
Returns
-------
dict of key value pairs, excluding "Host", empty map if host is not found.
"""
if host in self.hosts_:
vals = defaultdict(list)
for k, value in [(x.key.lower(), x.value) for x in self.lines_
| python | {
"resource": ""
} |
q6830 | SshConfig.set | train | def set(self, host, **kwargs):
"""
Set configuration values for an existing host.
Overwrites values for existing settings, or adds new settings.
Parameters
----------
host : the Host to modify.
**kwargs : The new configuration parameters
"""
self.__check_host_args(host, kwargs)
def update_line(key, value):
"""Produce new config line"""
return " %s %s" % (key, value)
for key, values in kwargs.items():
if type(values) not in [list, tuple]: # pylint: disable=unidiomatic-typecheck
values = [values]
lower_key = key.lower()
update_idx = [idx for idx, x in enumerate(self.lines_)
if x.host == host and x.key.lower() == lower_key]
extra_remove = []
for idx in update_idx:
if values: # values available, update the line
value = values.pop()
self.lines_[idx].line = update_line(self.lines_[idx].key, value)
| python | {
"resource": ""
} |
q6831 | SshConfig.unset | train | def unset(self, host, *args):
"""
Removes settings for a host.
Parameters
----------
host : the host to remove settings from.
*args : list of settings to removes.
"""
self.__check_host_args(host, args)
remove_idx = [idx for idx, x in | python | {
"resource": ""
} |
q6832 | SshConfig.rename | train | def rename(self, old_host, new_host):
"""
Renames a host configuration.
Parameters
----------
old_host : the host to rename.
new_host : the new host value
"""
if new_host in self.hosts_:
raise ValueError("Host %s: already exists." % new_host)
for line in self.lines_: # update lines
if line.host == old_host:
line.host = new_host
if | python | {
"resource": ""
} |
q6833 | SshConfig.add | train | def add(self, host, **kwargs):
"""
Add another host to the SSH configuration.
Parameters
----------
host: The Host entry to add.
**kwargs: The parameters for the host (without "Host" parameter itself)
"""
if host in self.hosts_:
raise ValueError("Host %s: exists (use update)." % host)
| python | {
"resource": ""
} |
q6834 | SshConfig.remove | train | def remove(self, host):
"""
Removes a host from the SSH configuration.
Parameters
----------
host : The host to remove
"""
if host not in self.hosts_:
raise ValueError("Host %s: not found." % host)
self.hosts_.remove(host)
# remove lines, including comments inside the host lines
| python | {
"resource": ""
} |
q6835 | SshConfig.write | train | def write(self, path):
"""
Writes ssh config file
Parameters
| python | {
"resource": ""
} |
q6836 | orthogonal_vector | train | def orthogonal_vector(v):
"""Return an arbitrary vector that is orthogonal to v"""
if v[1] != 0 or v[2] != 0:
| python | {
"resource": ""
} |
q6837 | show_plane | train | def show_plane(orig, n, scale=1.0, **kwargs):
"""
Show the plane with the given origin and normal. scale give its size
"""
b1 = orthogonal_vector(n)
b1 /= la.norm(b1)
b2 = np.cross(b1, n)
b2 /= la.norm(b2)
verts = [orig + scale*(-b1 - b2),
| python | {
"resource": ""
} |
q6838 | triangle_intersects_plane | train | def triangle_intersects_plane(mesh, tid, plane):
"""
Returns true if the given triangle is cut by the plane. This will return
false if a single vertex of the triangle lies on the plane
"""
dists | python | {
"resource": ""
} |
q6839 | compute_triangle_plane_intersections | train | def compute_triangle_plane_intersections(mesh, tid, plane, dist_tol=1e-8):
"""
Compute the intersection between a triangle and a plane
Returns a list of intersections in the form
(INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection
(INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices
This return between 0 and 2 intersections :
- 0 : the plane does not intersect the plane
- 1 : one of the triangle's vertices lies on the plane (so it just
"touches" the plane without really intersecting)
- 2 : the plane slice the triangle in two parts (either vertex-edge,
vertex-vertex or edge-edge)
"""
# TODO: Use a distance cache
dists = {vid: point_to_plane_dist(mesh.verts[vid], plane)
for vid in mesh.tris[tid]}
# TODO: Use an edge intersection cache (we currently compute each edge
# intersection twice : once for each tri)
# This is to avoid registering the same vertex intersection twice
# from two different edges
vert_intersect = {vid: False for vid in dists.keys()}
# Iterate | python | {
"resource": ""
} |
q6840 | _walk_polyline | train | def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol):
"""
Given an intersection, walk through the mesh triangles, computing
intersection with the cut plane for each visited triangle and adding
those intersection to a polyline.
"""
T = set(T)
p = []
# Loop until we have explored all the triangles for the current
# polyline
while True:
p.append(intersect[1])
tid, intersections, T = get_next_triangle(mesh, T, plane,
| python | {
"resource": ""
} |
q6841 | cross_section | train | def cross_section(verts, tris, plane_orig, plane_normal, **kwargs):
"""
Compute the planar cross section of a mesh. This returns a set of
polylines.
Args:
verts: Nx3 array of the vertices position
faces: Nx3 array of the faces, containing vertex indices
plane_orig: 3-vector indicating the plane origin
plane_normal: 3-vector indicating the plane normal
Returns: | python | {
"resource": ""
} |
q6842 | merge_close_vertices | train | def merge_close_vertices(verts, faces, close_epsilon=1e-5):
"""
Will merge vertices that are closer than close_epsilon.
Warning, this has a O(n^2) memory usage because we compute the full
vert-to-vert distance matrix. If you have a large mesh, might want
to use some kind of spatial search structure like an octree or some fancy
hashing scheme
Returns: new_verts, new_faces
"""
# Pairwise distance between verts
if USE_SCIPY:
D = spdist.cdist(verts, verts)
else:
D = np.sqrt(np.abs(pdist_squareformed_numpy(verts)))
# Compute a mapping from old to new : for each input vert, store the index
| python | {
"resource": ""
} |
q6843 | signed_to_float | train | def signed_to_float(hex: str) -> float:
"""Convert signed hexadecimal to floating value."""
if int(hex, 16) & 0x8000:
| python | {
"resource": ""
} |
q6844 | encode_packet | train | def encode_packet(packet: dict) -> str:
"""Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;'
"""
if packet['protocol'] == 'rfdebug':
return '10;RFDEBUG=' + packet['command'] + ';'
| python | {
"resource": ""
} |
q6845 | serialize_packet_id | train | def serialize_packet_id(packet: dict) -> str:
"""Serialize packet identifiers into one reversable string.
>>> serialize_packet_id({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'newkaku_000001_01'
>>> serialize_packet_id({
... 'protocol': 'ikea koppla',
... 'id': '000080',
... 'switch': '0',
... 'command': 'on',
... })
'ikeakoppla_000080_0'
>>> # unserializeable protocol name without explicit entry
>>> # in translation table should be properly serialized
>>> serialize_packet_id({
... 'protocol': 'alecto v4',
... 'id': '000080',
... | python | {
"resource": ""
} |
q6846 | packet_events | train | def packet_events(packet: dict) -> Generator:
"""Return list of all events in the packet.
>>> x = list(packet_events({
... 'protocol': 'alecto v1',
... 'id': 'ec02',
... 'temperature': 1.0,
... 'temperature_unit': '°C',
... 'humidity': 10,
... 'humidity_unit': '%',
... }))
>>> assert {
... 'id': 'alectov1_ec02_temp',
... 'sensor': 'temperature',
... 'value': 1.0,
... 'unit': '°C',
... } in x
>>> assert {
... 'id': 'alectov1_ec02_hum',
... 'sensor': 'humidity',
... 'value': 10,
... 'unit': '%',
... } in x
>>> y = list(packet_events({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... }))
>>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y
"""
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}
packet_id = serialize_packet_id(packet)
events = {f: v for f, v in packet.items() if f in field_abbrev}
if 'command' in events or 'version' in events:
# switch events only have one event in each packet
yield dict(id=packet_id, **events)
else:
if packet_id == 'debug':
yield {
'id': 'raw',
| python | {
"resource": ""
} |
q6847 | RFLinkProxy.forward_packet | train | def forward_packet(self, writer, packet, raw_packet):
"""Forward packet from client to RFLink."""
peer = writer.get_extra_info('peername')
log.debug(' %s:%s: forwarding data: %s', peer[0], peer[1], packet)
if 'command' in packet:
packet_id = serialize_packet_id(packet)
command = packet['command']
ack = yield from self.protocol.send_command_ack(
packet_id, command)
if ack:
| python | {
"resource": ""
} |
q6848 | RFLinkProxy.client_connected_callback | train | def client_connected_callback(self, reader, writer):
"""Handle connected client."""
peer = writer.get_extra_info('peername')
clients.append((reader, writer, peer))
log.info("Incoming connection from: %s:%s", peer[0], peer[1])
try:
while True:
data = yield from reader.readline()
if not data:
break
try:
line = data.decode().strip()
except UnicodeDecodeError:
line = '\x00'
# Workaround for domoticz issue #2816
if line[-1] != DELIM:
line = line + DELIM
if valid_packet(line):
| python | {
"resource": ""
} |
q6849 | RFLinkProxy.raw_callback | train | def raw_callback(self, raw_packet):
"""Send data to all connected clients."""
if not ';PONG;' in raw_packet:
log.info('forwarding packet %s to clients', raw_packet)
else:
log.debug('forwarding packet %s to clients', raw_packet)
| python | {
"resource": ""
} |
q6850 | RFLinkProxy.reconnect | train | def reconnect(self, exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
self.protocol = None
if not self.closing:
| python | {
"resource": ""
} |
q6851 | create_rflink_connection | train | def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol,
packet_callback=None, event_callback=None,
disconnect_callback=None, ignore=None, loop=None):
"""Create Rflink manager class, returns transport coroutine."""
# use default protocol if not specified
| python | {
"resource": ""
} |
q6852 | ProtocolBase.data_received | train | def data_received(self, data):
"""Add incoming data to buffer."""
data = data.decode()
| python | {
"resource": ""
} |
q6853 | ProtocolBase.send_raw_packet | train | def send_raw_packet(self, packet: str):
"""Encode and put packet string onto write buffer."""
data = packet + '\r\n'
| python | {
"resource": ""
} |
q6854 | ProtocolBase.log_all | train | def log_all(self, file):
"""Log all data received from RFLink to file."""
global rflink_log
if file == None:
rflink_log = None
else: | python | {
"resource": ""
} |
q6855 | PacketHandling.handle_packet | train | def handle_packet(self, packet):
"""Process incoming packet dict and optionally call callback."""
if self.packet_callback:
# forward to callback
| python | {
"resource": ""
} |
q6856 | PacketHandling.send_command | train | def send_command(self, device_id, action):
"""Send device command to rflink gateway."""
command = | python | {
"resource": ""
} |
q6857 | CommandSerialization.send_command_ack | train | def send_command_ack(self, device_id, action):
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug('waiting for acknowledgement')
try:
yield from asyncio.wait_for(self._command_ack.wait(),
TIMEOUT.seconds, loop=self.loop)
| python | {
"resource": ""
} |
q6858 | EventHandling._handle_packet | train | def _handle_packet(self, packet):
"""Event specific packet handling logic.
Break packet into events and fires configured event callback or
nicely prints events for console.
"""
events = packet_events(packet)
| python | {
"resource": ""
} |
q6859 | EventHandling.ignore_event | train | def ignore_event(self, event_id):
"""Verify event id against list of events to ignore.
>>> e = EventHandling(ignore=[
... 'test1_00',
... 'test2_*',
... ])
>>> e.ignore_event('test1_00')
| python | {
"resource": ""
} |
q6860 | _initial_population_gsa | train | def _initial_population_gsa(population_size, solution_size, lower_bounds,
upper_bounds):
"""Create a random initial population of floating point values.
Args:
population_size: an integer representing the number of solutions in the population.
problem_size: the number of values in each solution.
lower_bounds: a list, each value is a lower bound for the corresponding
part of the solution.
upper_bounds: a list, each value is a upper bound for the corresponding
part of the | python | {
"resource": ""
} |
q6861 | _new_population_gsa | train | def _new_population_gsa(population, fitnesses, velocities, lower_bounds,
upper_bounds, grav_initial, grav_reduction_rate,
iteration, max_iterations):
"""Generate a new population as given by GSA algorithm.
In GSA paper, grav_initial is G_i
"""
# Update the gravitational constant, and the best and worst of the population
# Calculate the mass and acceleration for each solution
# Update the velocity and position of each solution
population_size = len(population)
solution_size = len(population[0])
# In GSA paper, grav is G
grav = _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations)
masses = _get_masses(fitnesses)
# Create bundled solution with position and mass for the K best calculation
# Also store index to later check if two solutions are the same
# Sorted by solution fitness (mass)
solutions = [{
'pos': pos,
'mass': mass,
'index': i
} for i, (pos, mass) in enumerate(zip(population, masses))]
solutions.sort(key=lambda x: x['mass'], reverse=True)
# Get the force on each solution
# Only the best K solutions apply force
# K linearly decreases to 1
num_best = int(population_size - (population_size - 1) *
(iteration / float(max_iterations)))
forces = []
for i in range(population_size):
force_vectors = []
for j in range(num_best):
# If it is not the same solution
| python | {
"resource": ""
} |
q6862 | _next_grav_gsa | train | def _next_grav_gsa(grav_initial, grav_reduction_rate, iteration,
max_iterations):
"""Calculate G as given by GSA algorithm.
In GSA paper, grav is G
| python | {
"resource": ""
} |
q6863 | _get_masses | train | def _get_masses(fitnesses):
"""Convert fitnesses into masses, as given by GSA algorithm."""
# Obtain constants
best_fitness = max(fitnesses)
worst_fitness = min(fitnesses)
fitness_range = best_fitness - worst_fitness
# Calculate raw masses for each solution
| python | {
"resource": ""
} |
q6864 | _gsa_force | train | def _gsa_force(grav, mass_i, mass_j, position_i, position_j):
"""Gives the force of solution j on solution i.
Variable name in GSA paper given in ()
args:
grav: The gravitational constant. (G)
mass_i: The mass of solution i (derived from fitness). (M_i)
mass_j: The mass of solution j (derived from fitness). (M_j)
position_i: | python | {
"resource": ""
} |
q6865 | _gsa_total_force | train | def _gsa_total_force(force_vectors, vector_length):
"""Return a randomly weighted sum of the force vectors.
args:
force_vectors: A list of force vectors on solution i.
returns:
numpy.array; The total force on solution i.
"""
if len(force_vectors) == 0:
return [0.0] * vector_length
# The GSA algorithm specifies that the total force in each dimension
# is a random sum of the individual forces in that dimension.
| python | {
"resource": ""
} |
q6866 | _gsa_update_velocity | train | def _gsa_update_velocity(velocity, acceleration):
"""Stochastically update velocity given acceleration.
In GSA paper, velocity is v_i, acceleration is a_i
"""
# The GSA algorithm specifies that the new velocity for each dimension
# is a sum of a random fraction of its current velocity in that dimension,
# and its acceleration in that dimension
# For this reason we sum the | python | {
"resource": ""
} |
q6867 | _new_population_genalg | train | def _new_population_genalg(population,
fitnesses,
mutation_chance=0.02,
crossover_chance=0.7,
selection_function=gaoperators.tournament_selection,
crossover_function=gaoperators.one_point_crossover):
"""Perform all genetic algorithm operations on a population, and return a new population.
population must have an even number of chromosomes.
Args:
population: A list of binary lists, ex. [[0,1,1,0], [1,0,1,0]]
fitness: A list of fitnesses that correspond with chromosomes in the population,
| python | {
"resource": ""
} |
q6868 | _crossover | train | def _crossover(population, crossover_chance, crossover_operator):
"""Perform crossover on a population, return the new crossed-over population."""
new_population = []
for i in range(0, len(population), 2): # For every other index
# Take parents from every set of 2 in the population
# Wrap index if out of range
try:
parents = (population[i], population[i + 1])
except IndexError:
parents = (population[i], population[0])
# If crossover takes | python | {
"resource": ""
} |
q6869 | random_real_solution | train | def random_real_solution(solution_size, lower_bounds, upper_bounds):
"""Make a list of random real numbers between lower and upper bounds."""
return [
| python | {
"resource": ""
} |
q6870 | make_population | train | def make_population(population_size, solution_generator, *args, **kwargs):
"""Make a population with the supplied generator."""
return [
| python | {
"resource": ""
} |
q6871 | tournament_selection | train | def tournament_selection(population,
fitnesses,
num_competitors=2,
diversity_weight=0.0):
"""Create a list of parents with tournament selection.
Args:
population: A list of solutions.
fitnesses: A list of fitness values corresponding to solutions in population.
num_competitors: Number of solutions to compare every round.
Best solution among competitors is selected.
diversity_weight: Weight of diversity metric.
Determines how frequently diversity is used to select tournament winners.
Note that fitness is given a weight of 1.0.
diversity_weight == 1.0 gives equal weight to diversity and fitness.
"""
# Optimization if diversity factor is disabled
if diversity_weight <= 0.0:
fitness_pop = zip(fitnesses,
population) # Zip for easy fitness comparison
# Get num_competitors random chromosomes, then add best to result,
# by taking max fitness and getting chromosome from tuple.
# Repeat until full.
return [
max(random.sample(fitness_pop, num_competitors))[1]
for _ in range(len(population))
]
else:
indices = range(len(population))
# Select tournament winners by either max fitness or diversity.
# The metric to check is randomly selected, weighted by diversity_weight.
# diversity_metric is calculated between the given solution,
# and the list of all currently selected solutions.
selected_solutions | python | {
"resource": ""
} |
q6872 | stochastic_selection | train | def stochastic_selection(population, fitnesses):
"""Create a list of parents with stochastic universal sampling."""
pop_size = len(population)
probabilities = _fitnesses_to_probabilities(fitnesses)
# Create selection list (for stochastic universal sampling)
selection_list = []
selection_spacing = 1.0 / pop_size
selection_start = random.uniform(0.0, selection_spacing)
for i in range(pop_size):
selection_list.append(selection_start + selection_spacing * i)
# Select intermediate population according to selection list | python | {
"resource": ""
} |
q6873 | roulette_selection | train | def roulette_selection(population, fitnesses):
"""Create a list of parents with roulette selection."""
probabilities = _fitnesses_to_probabilities(fitnesses)
intermediate_population = []
for _ in range(len(population)):
# Choose a random individual
selection = random.uniform(0.0, 1.0)
# Iterate over probabilities list
| python | {
"resource": ""
} |
q6874 | _diversity_metric | train | def _diversity_metric(solution, population):
"""Return diversity value for solution compared to given population.
Metric is sum of distance between solution and each solution in population,
normalized to [0.0, 1.0].
"""
# Edge case for empty population
# If there are no other solutions, the given solution has maximum diversity
if population == []:
return 1.0
| python | {
"resource": ""
} |
q6875 | _manhattan_distance | train | def _manhattan_distance(vec_a, vec_b):
"""Return manhattan distance between two lists of numbers."""
if len(vec_a) != len(vec_b):
raise ValueError('len(vec_a) | python | {
"resource": ""
} |
q6876 | _fitnesses_to_probabilities | train | def _fitnesses_to_probabilities(fitnesses):
"""Return a list of probabilities proportional to fitnesses."""
# Do not allow negative fitness values
min_fitness = min(fitnesses)
if min_fitness < 0.0:
# Make smallest fitness value 0
fitnesses = map(lambda f: f - min_fitness, fitnesses)
fitness_sum = sum(fitnesses)
# Generate probabilities
# Creates a list of increasing values.
# The greater the gap between two values, the greater the probability.
# Ex. [0.1, 0.23, 0.56, 1.0]
prob_sum = 0.0
probabilities = []
for fitness in fitnesses:
if fitness < 0:
| python | {
"resource": ""
} |
q6877 | one_point_crossover | train | def one_point_crossover(parents):
"""Perform one point crossover on two parent chromosomes.
Select a random position in the chromosome.
Take genes to the left from one parent and the rest from the other parent.
Ex. p1 = xxxxx, p2 = yyyyy, position = 2 (starting at 0), child = xxyyy
"""
# The point that the chromosomes will be crossed at (see Ex. above)
| python | {
"resource": ""
} |
q6878 | uniform_crossover | train | def uniform_crossover(parents):
"""Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
"""
chromosome_length = len(parents[0])
children = [[], []]
for i in range(chromosome_length):
selected_parent = random.randint(0, 1)
# Take | python | {
"resource": ""
} |
q6879 | random_flip_mutate | train | def random_flip_mutate(population, mutation_chance):
"""Mutate every chromosome in a population, list is modified in place.
Mutation occurs by randomly flipping bits (genes).
"""
for chromosome in | python | {
"resource": ""
} |
q6880 | _duplicates | train | def _duplicates(list_):
"""Return dict mapping item -> indices."""
item_indices = {}
for i, item in enumerate(list_):
| python | {
"resource": ""
} |
q6881 | _parse_parameter_locks | train | def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks):
"""Synchronize meta_parameters and locked_values.
The union of these two sets will have all necessary parameters.
locked_values will have the parameters specified in parameter_locks.
"""
# WARNING: meta_parameters is modified inline
locked_values = {}
if parameter_locks:
| python | {
"resource": ""
} |
q6882 | _get_hyperparameter_solution_size | train | def _get_hyperparameter_solution_size(meta_parameters):
"""Determine size of binary encoding of parameters.
Also adds binary size information for each parameter.
"""
# WARNING: meta_parameters is modified inline
solution_size = 0
for _, parameters in meta_parameters.iteritems():
if parameters['type'] == 'discrete':
# Binary encoding of discrete values -> log_2 N
num_values = len(parameters['values'])
binary_size = helpers.binary_size(num_values)
elif parameters['type'] == 'int':
# Use enough bits to cover range from min | python | {
"resource": ""
} |
q6883 | _make_hyperparameter_decode_func | train | def _make_hyperparameter_decode_func(locked_values, meta_parameters):
"""Create a function that converts the binary solution to parameters."""
# Locked parameters are also returned by decode function, but are not
# based on solution
def decode(solution):
"""Convert solution into dict of hyperparameters."""
# Start with out stationary (locked) paramaters
hyperparameters = copy.deepcopy(locked_values)
# Obtain moving hyperparameters from binary solution
index = 0
for name, parameters in meta_parameters.iteritems():
# Obtain binary for this hyperparameter
binary_size = parameters['binary_size']
binary = solution[index:index + binary_size]
index += binary_size # Just index to start of next hyperparameter
# Decode binary
if parameters['type'] == 'discrete':
i = helpers.binary_to_int(
binary, upper_bound=len(parameters['values']) - 1)
value = parameters['values'][i]
elif parameters['type'] == 'int':
| python | {
"resource": ""
} |
q6884 | _meta_fitness_func | train | def _meta_fitness_func(parameters,
_optimizer,
_problems,
_master_fitness_dict,
_runs=20):
"""Test a metaheuristic with parameters encoded in solution.
Our goal is to minimize number of evaluation runs until a solution is found,
while maximizing chance of finding solution to the underlying problem
NOTE: while meta optimization requires a 'known' solution, this solution
can be an estimate to provide the meta optimizer with a sense of progress.
"""
# Create the optimizer with parameters encoded in solution
optimizer = copy.deepcopy(_optimizer)
optimizer._set_hyperparameters(parameters)
optimizer.logging = False
# Preload fitness dictionary from master, and disable clearing dict
| python | {
"resource": ""
} |
q6885 | Problem.copy | train | def copy(self,
fitness_function=None,
decode_function=None,
fitness_args=None,
decode_args=None,
fitness_kwargs=None,
decode_kwargs=None):
"""Return a copy of this problem.
Optionally replace this problems arguments with those passed in.
"""
if fitness_function is None:
fitness_function = self._fitness_function
if decode_function is None:
decode_function = self._decode_function
if fitness_args is None:
fitness_args = self._fitness_args
if decode_args is None:
decode_args = | python | {
"resource": ""
} |
q6886 | Problem.get_fitness | train | def get_fitness(self, solution):
"""Return fitness for the given solution."""
return self._fitness_function(solution, | python | {
"resource": ""
} |
q6887 | Problem.decode_solution | train | def decode_solution(self, encoded_solution):
"""Return solution from an encoded representation."""
return | python | {
"resource": ""
} |
q6888 | Optimizer.optimize | train | def optimize(self, problem, max_iterations=100, max_seconds=float('inf'),
cache_encoded=True, cache_solution=False, clear_cache=True,
logging_func=_print_fitnesses,
n_processes=0):
"""Find the optimal inputs for a given fitness function.
Args:
problem: An instance of Problem. The problem to solve.
max_iterations: The number of iterations to optimize before stopping.
max_seconds: Maximum number of seconds to optimize for, before stopping.
Note that condition is only checked one per iteration,
meaning optimization can take more than max_seconds,
especially if fitnesses take a long time to calculate.
cache_encoded: bool; Whether or not to cache fitness of encoded strings.
Encoded strings are produced directly by the optimizer.
If an encoded string is found in cache, it will not be decoded.
cache_solution: bool; Whether or not to cache fitness of decoded solutions.
Decoded solution is provided by problems decode function.
If problem does not provide a hash solution function,
Various naive hashing methods will be attempted, including:
tuple, tuple(sorted(dict.items)), str.
clear_cache: bool; Whether or not to reset cache after optimization.
Disable if you want to run optimize multiple times on the same problem.
logging_func: func/None; Function taking:
iteration, population, solutions, fitnesses, best_solution, best_fitness
Called after every iteration.
Use for custom logging, or set to None to disable logging.
Note that best_solution and best_fitness are the best of all iterations so far.
n_processes: int; Number of processes to use for multiprocessing.
If <= 0, do not use multiprocessing.
Returns:
object; The best solution, after decoding.
"""
if not isinstance(problem, Problem):
raise TypeError('problem must be an instance of Problem class')
# Prepare pool for multiprocessing
if n_processes > 0:
try:
pool = multiprocessing.Pool(processes=n_processes)
except NameError:
raise ImportError(
'pickle, dill, or multiprocessing library is not available.'
)
else:
pool = None
# Set first, incase optimizer uses _max_iterations in initialization
self.__max_iterations = max_iterations
# Initialize algorithm
self._reset()
best_solution = {'solution': None, 'fitness': None}
population = self.initial_population()
try:
# Begin optimization loop
start = time.clock()
for self.iteration in itertools.count(1): # Infinite sequence of iterations
# Evaluate potential solutions
solutions, fitnesses, finished = self._get_fitnesses(
problem,
population,
cache_encoded=cache_encoded,
cache_solution=cache_solution,
pool=pool)
# If the best fitness from this iteration is better than
# the global best
best_index, best_fitness = max(
enumerate(fitnesses), key=operator.itemgetter(1))
if best_fitness > best_solution['fitness']:
# Store the new best | python | {
"resource": ""
} |
q6889 | Optimizer._reset_bookkeeping | train | def _reset_bookkeeping(self):
"""Reset bookkeeping parameters to initial values.
Call before beginning optimization.
"""
self.iteration = 0
self.fitness_runs = 0
| python | {
"resource": ""
} |
q6890 | Optimizer._get_fitnesses | train | def _get_fitnesses(self,
problem,
population,
cache_encoded=True,
cache_solution=False,
pool=None):
"""Get the fitness for every solution in a population.
Args:
problem: Problem; The problem that defines fitness.
population: list; List of potential solutions.
pool: None/multiprocessing.Pool; Pool of processes for parallel
decoding and evaluation.
"""
fitnesses = [None] * len(population)
#############################
# Decoding
#############################
if cache_encoded:
try:
encoded_keys = map(self._get_encoded_key, population)
# Get all fitnesses from encoded_solution cache
to_decode_indices = []
for i, encoded_key in enumerate(encoded_keys):
try:
fitnesses[i] = self.__encoded_cache[encoded_key]
# Note that this fitness will never be better than the current best
# because we have already evaluted it,
# Therefore, we do not need to worry about decoding the solution
except KeyError: # Cache miss
to_decode_indices.append(i)
except UnhashableError: # Cannot hash encoded solution
encoded_keys = None
to_decode_indices = range(len(population))
else:
encoded_keys = None
to_decode_indices = range(len(population))
# Decode all that need to be decoded, and combine back into list the same length
# as population
if encoded_keys is None:
to_decode_keys = None
else:
to_decode_keys = [encoded_keys[i] for i in to_decode_indices]
solutions = [None] * len(population)
for i, solution in zip(to_decode_indices,
self._pmap(
problem.decode_solution,
[population[i] for i in to_decode_indices],
to_decode_keys,
pool)):
solutions[i] = solution
#############################
# Evaluating
#############################
if cache_solution:
try:
# Try to make solutions hashable
# Use user provided hash function if available
if problem.hash_solution:
hash_solution_func = problem.hash_solution
else:
# Otherwise, default to built in "smart" hash function
hash_solution_func = self._get_solution_key
solution_keys = [
hash_solution_func(solution)
# None corresponds to encoded_solutions found in cache
if solution is not None else None for solution in solutions
]
# Get all fitnesses from solution cache
to_eval_indices = []
for i, solution_key in enumerate(solution_keys):
if solution_key is not None: # Otherwise, fitness already found in encoded cache
try:
fitnesses[i] = self.__solution_cache[solution_key]
except KeyError: # Cache miss
to_eval_indices.append(i)
except UnhashableError: # Cannot hash solution
solution_keys = None
to_eval_indices = to_decode_indices[:]
else: | python | {
"resource": ""
} |
q6891 | Optimizer._pmap | train | def _pmap(self, func, items, keys, pool, bookkeeping_dict=None):
"""Efficiently map func over all items.
Calls func only once for duplicate items.
Item duplicates are detected by corresponding keys.
Unless keys is None.
Serial if pool is None, but still skips duplicates.
"""
if keys is not None: # Otherwise, cannot hash items
# Remove duplicates first (use keys)
# Create mapping (dict) of key to list of indices
key_indices = _duplicates(keys).values()
else: # Cannot hash items
# Assume no duplicates
key_indices = [[i] for i in range(len(items))]
# Use only the first of duplicate indices in decoding
if pool is not None:
# Parallel map
results = pool.map(
functools.partial(_unpickle_run, pickle.dumps(func)),
[items[i[0]] for i in key_indices])
else:
results = map(func, [items[i[0]] for i in key_indices])
# | python | {
"resource": ""
} |
q6892 | Optimizer._set_hyperparameters | train | def _set_hyperparameters(self, parameters):
"""Set internal optimization parameters."""
for name, value in parameters.iteritems():
try:
| python | {
"resource": ""
} |
q6893 | Optimizer._get_hyperparameters | train | def _get_hyperparameters(self):
"""Get internal optimization parameters."""
hyperparameters = {}
for key in self._hyperparameters:
| python | {
"resource": ""
} |
q6894 | Optimizer.optimize_hyperparameters | train | def optimize_hyperparameters(self,
problems,
parameter_locks=None,
smoothing=20,
max_iterations=100,
_meta_optimizer=None,
_low_memory=True):
"""Optimize hyperparameters for a given problem.
Args:
parameter_locks: a list of strings, each corresponding to a hyperparamter
that should not be optimized.
problems: Either a single problem, or a list of problem instances,
allowing optimization based on multiple similar problems.
smoothing: int; number of runs to average over for each set of hyperparameters.
max_iterations: The number of iterations to optimize before stopping.
_low_memory: disable performance enhancements to save memory
(they use a lot of memory otherwise).
"""
if smoothing <= 0:
raise ValueError('smoothing must be > 0')
# problems supports either one or many problem instances
if isinstance(problems, collections.Iterable):
for problem in problems:
if not isinstance(problem, Problem):
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
elif isinstance(problems, Problem):
problems = [problems]
else:
raise TypeError(
'problem must be Problem instance or list of Problem instances'
)
# Copy to avoid permanent modification
meta_parameters = copy.deepcopy(self._hyperparameters)
# First, handle parameter locks, since it will modify our
# meta_parameters dict
locked_values = _parse_parameter_locks(self, meta_parameters,
parameter_locks)
# We need to know the size of our chromosome,
# based on the hyperparameters to optimize
solution_size = _get_hyperparameter_solution_size(meta_parameters)
# We also need to create a decode function to transform the binary solution
# into parameters for the metaheuristic
decode = _make_hyperparameter_decode_func(locked_values,
| python | {
"resource": ""
} |
q6895 | compare | train | def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
| python | {
"resource": ""
} |
q6896 | benchmark | train | def benchmark(optimizer, problem, runs=20, **kwargs):
"""Run an optimizer through a problem multiple times.
Args:
optimizer: Optimizer; The optimizer to benchmark.
problem: Problem; The problem to benchmark on.
runs: int > 0; Number of times that optimize is called on problem.
Returns:
dict; A dictionary of various statistics.
"""
stats = {'runs': []}
# Disable logging, to avoid spamming the user
# TODO: Maybe we shouldn't disable by default?
kwargs = copy.copy(kwargs)
kwargs['logging_func'] = None
# Determine effectiveness of metaheuristic over many runs
# The stochastic nature of metaheuristics make this necessary
# for an accurate evaluation
for _ in range(runs):
optimizer.optimize(problem, **kwargs)
# Convert bool to number for mean and standard deviation calculations
| python | {
"resource": ""
} |
q6897 | aggregate | train | def aggregate(all_stats):
"""Combine stats for multiple optimizers to obtain one mean and sd.
Useful for combining stats for the same optimizer class and multiple problems.
Args:
all_stats: dict; output from compare.
"""
aggregate_stats = {'means': [], 'standard_deviations': []}
for optimizer_key in all_stats:
# runs is the mean, for add_mean_sd function
mean_stats = copy.deepcopy(all_stats[optimizer_key]['mean'])
mean_stats['name'] = optimizer_key
| python | {
"resource": ""
} |
q6898 | _mean_of_runs | train | def _mean_of_runs(stats, key='runs'):
"""Obtain the mean of stats.
Args:
stats: dict; A set of stats, structured as above.
key: str; Optional key to determine where list of runs is found in stats
"""
num_runs = len(stats[key])
first = stats[key][0]
mean = {}
for stat_key in first:
| python | {
"resource": ""
} |
q6899 | _sd_of_runs | train | def _sd_of_runs(stats, mean, key='runs'):
"""Obtain the standard deviation of stats.
Args:
stats: dict; A set of stats, structured as above.
mean: dict; Mean for each key in stats.
key: str; Optional key to determine where list of runs is found in stats
"""
num_runs = len(stats[key])
first = stats[key][0]
standard_deviation = {}
for stat_key in first: | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.