positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def random_quat(rand=None): """Return uniform random unit quaternion. rand: array like or None Three independent random variables that are uniformly distributed between 0 and 1. >>> q = random_quat() >>> np.allclose(1.0, vector_norm(q)) True >>> q = random_quat(np.random.random(3)) >>> q.shape (4,) """ if rand is None: rand = np.random.rand(3) else: assert len(rand) == 3 r1 = np.sqrt(1.0 - rand[0]) r2 = np.sqrt(rand[0]) pi2 = math.pi * 2.0 t1 = pi2 * rand[1] t2 = pi2 * rand[2] return np.array( (np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2), dtype=np.float32, )
Return uniform random unit quaternion. rand: array like or None Three independent random variables that are uniformly distributed between 0 and 1. >>> q = random_quat() >>> np.allclose(1.0, vector_norm(q)) True >>> q = random_quat(np.random.random(3)) >>> q.shape (4,)
def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid)
Get the returns for the command line interface via the event system
def prompt_and_select_link(self): """ Prompt the user to select a link from a list to open. Return the link that was selected, or ``None`` if no link was selected. """ data = self.get_selected_item() url_full = data.get('url_full') permalink = data.get('permalink') if url_full and url_full != permalink: # The item is a link-only submission that won't contain text link = url_full else: html = data.get('html') if html: extracted_links = self.content.extract_links(html) if not extracted_links: # Only one selection to choose from, so just pick it link = permalink else: # Let the user decide which link to open links = [] if permalink: links += [{'text': 'Permalink', 'href': permalink}] links += extracted_links link = self.term.prompt_user_to_select_link(links) else: # Some items like hidden comments don't have any HTML to parse link = permalink return link
Prompt the user to select a link from a list to open. Return the link that was selected, or ``None`` if no link was selected.
def check_response(response): """ Checks that a response is successful, raising the appropriate Exceptions otherwise. """ status_code = response.status_code if 100 < status_code < 299: return True elif status_code == 401 or status_code == 403: message = get_response_data(response) raise AuthError('Access Token Error, Received ' + str(status_code) + ' from Outlook REST Endpoint with the message: {}'.format(message)) elif status_code == 400: message = get_response_data(response) raise RequestError('The request made to the Outlook API was invalid. Received the following message: {}'. format(message)) else: message = get_response_data(response) raise APIError('Encountered an unknown error from the Outlook API: {}'.format(message))
Checks that a response is successful, raising the appropriate Exceptions otherwise.
def get_z_variable(nc): ''' Returns the name of the variable that defines the Z axis or height/depth :param netCDF4.Dataset nc: netCDF dataset ''' axis_z = nc.get_variables_by_attributes(axis='Z') if axis_z: return axis_z[0].name valid_standard_names = ('depth', 'height', 'altitude') z = nc.get_variables_by_attributes(standard_name=lambda x: x in valid_standard_names) if z: return z[0].name return
Returns the name of the variable that defines the Z axis or height/depth :param netCDF4.Dataset nc: netCDF dataset
def setGroups(self, groups, kerningGroupConversionRenameMaps=None): """ Copy the groups into our font. """ skipping = [] for name, members in groups.items(): checked = [] for m in members: if m in self.font: checked.append(m) else: skipping.append(m) if checked: self.font.groups[name] = checked if skipping: if self.verbose and self.logger: self.logger.info("\tNote: some glyphnames were removed from groups: %s (unavailable in the font)", ", ".join(skipping)) if kerningGroupConversionRenameMaps: # in case the sources were UFO2, # and defcon upconverted them to UFO3 # and now we have to down convert them again, # we don't want the UFO3 public prefixes in the group names self.font.kerningGroupConversionRenameMaps = kerningGroupConversionRenameMaps
Copy the groups into our font.
def check_ups_output_current(the_session, the_helper, the_snmp_value): """ OID .1.3.6.1.2.1.33.1.4.4.1.3.1 MIB excerpt The present output current. """ a_current = calc_output_current_from_snmpvalue(the_snmp_value) the_helper.add_metric( label=the_helper.options.type, value=a_current, uom='A') the_helper.set_summary("Output Current is {} A".format(a_current))
OID .1.3.6.1.2.1.33.1.4.4.1.3.1 MIB excerpt The present output current.
def get_dataset(self, dataset_id, ds_info): """Read a GRIB message into an xarray DataArray.""" msg = self._get_message(ds_info) ds_info = self.get_metadata(msg, ds_info) fill = msg['missingValue'] data = msg.values.astype(np.float32) if msg.valid_key('jScansPositively') and msg['jScansPositively'] == 1: data = data[::-1] if isinstance(data, np.ma.MaskedArray): data = data.filled(np.nan) data = da.from_array(data, chunks=CHUNK_SIZE) else: data[data == fill] = np.nan data = da.from_array(data, chunks=CHUNK_SIZE) return xr.DataArray(data, attrs=ds_info, dims=('y', 'x'))
Read a GRIB message into an xarray DataArray.
def schedule_next_requests(self): """Schedules a request if available""" # TODO: While there is capacity, schedule a batch of redis requests. for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self)
Schedules a request if available
def drain(self): """ Let the write buffer of the underlying transport a chance to be flushed. """ data = self._stream.getvalue() if len(data): yield from self._protocol.send(data) self._stream = io.BytesIO(b'')
Let the write buffer of the underlying transport a chance to be flushed.
def delete(self): """Deletes a selection if any else deletes the cursor cell Refreshes grid after deletion """ if self.grid.IsSelection(): # Delete selection self.grid.actions.delete_selection() else: # Delete cell at cursor cursor = self.grid.actions.cursor self.grid.actions.delete_cell(cursor) # Update grid self.grid.ForceRefresh()
Deletes a selection if any else deletes the cursor cell Refreshes grid after deletion
def ofp_instruction_from_str(ofproto, action_str): """ Parse an ovs-ofctl style action string and return a list of jsondict representations of OFPInstructionActions, which can then be passed to ofproto_parser.ofp_instruction_from_jsondict. Please note that this is for making transition from ovs-ofctl easier. Please consider using OFPAction constructors when writing new codes. This function takes the following arguments. =========== ================================================= Argument Description =========== ================================================= ofproto An ofproto module. action_str An action string. =========== ================================================= """ action_re = re.compile(r"([a-z_]+)(\([^)]*\)|[^a-z_,()][^,()]*)*") result = [] while len(action_str): m = action_re.match(action_str) if not m: raise ryu.exception.OFPInvalidActionString(action_str=action_str) action_name = m.group(1) this_action = m.group(0) paren_level = this_action.count('(') - this_action.count(')') assert paren_level >= 0 try: # Parens can be nested. Look for as many ')'s as '('s. if paren_level > 0: this_action, rest = _tokenize_paren_block(action_str, m.end(0)) else: rest = action_str[m.end(0):] if len(rest): assert rest[0] == ',' rest = rest[1:] except Exception: raise ryu.exception.OFPInvalidActionString(action_str=action_str) if action_name == 'drop': assert this_action == 'drop' assert len(result) == 0 and rest == '' return [] converter = getattr(OfctlActionConverter, action_name, None) if converter is None or not callable(converter): raise ryu.exception.OFPInvalidActionString(action_str=action_name) result.append(converter(ofproto, this_action)) action_str = rest return result
Parse an ovs-ofctl style action string and return a list of jsondict representations of OFPInstructionActions, which can then be passed to ofproto_parser.ofp_instruction_from_jsondict. Please note that this is for making transition from ovs-ofctl easier. Please consider using OFPAction constructors when writing new codes. This function takes the following arguments. =========== ================================================= Argument Description =========== ================================================= ofproto An ofproto module. action_str An action string. =========== =================================================
def _get_closest_week(self, metric_date): """ Gets the closest monday to the date provided. """ #find the offset to the closest monday days_after_monday = metric_date.isoweekday() - 1 return metric_date - datetime.timedelta(days=days_after_monday)
Gets the closest monday to the date provided.
def AddNodeTags(r, node, tags, dry_run=False): """ Adds tags to a node. @type node: str @param node: node to add tags to @type tags: list of str @param tags: tags to add to the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id """ query = { "tag": tags, "dry-run": dry_run, } return r.request("put", "/2/nodes/%s/tags" % node, query=query, content=tags)
Adds tags to a node. @type node: str @param node: node to add tags to @type tags: list of str @param tags: tags to add to the node @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id
def _exit_door(self, _input): """This function is passed to each SelectableObject as a callback The SelectableObjects have to call it once there are ready""" self.results.append(_input) if self._ended: return self._ended = True self._release_all()
This function is passed to each SelectableObject as a callback The SelectableObjects have to call it once there are ready
def parse(self, filePath, skipLines=0, separator = ',', stringSeparator = '"', lineSeparator = '\n') : """Loads a CSV file""" self.filename = filePath f = open(filePath) if lineSeparator == '\n' : lines = f.readlines() else : lines = f.read().split(lineSeparator) f.flush() f.close() lines = lines[skipLines:] self.lines = [] self.comments = [] for l in lines : # print l if len(l) != 0 and l[0] != "#" : self.lines.append(l) elif l[0] == "#" : self.comments.append(l) self.separator = separator self.lineSeparator = lineSeparator self.stringSeparator = stringSeparator self.legend = collections.OrderedDict() i = 0 for c in self.lines[0].lower().replace(stringSeparator, '').split(separator) : legendElement = c.strip() if legendElement not in self.legend : self.legend[legendElement] = i i+=1 self.strLegend = self.lines[0].replace('\r', '\n').replace('\n', '') self.lines = self.lines[1:]
Loads a CSV file
def first_image(self): """Ready-only attribute that provides the value of the first non-none image that's not the thumbnail override field. """ # loop through image fields and grab the first non-none one for model_field in self._meta.fields: if isinstance(model_field, ImageField): if model_field.name is not 'thumbnail_override': field_value = getattr(self, model_field.name) if field_value.id is not None: return field_value # no non-none images, return None return None
Ready-only attribute that provides the value of the first non-none image that's not the thumbnail override field.
def reset_to_default(self): """Reset to default values of the shortcuts making a confirmation.""" reset = QMessageBox.warning(self, _("Shortcuts reset"), _("Do you want to reset " "to default values?"), QMessageBox.Yes | QMessageBox.No) if reset == QMessageBox.No: return reset_shortcuts() self.main.apply_shortcuts() self.table.load_shortcuts() self.load_from_conf() self.set_modified(False)
Reset to default values of the shortcuts making a confirmation.
def should_send(self, request): """Returns whether or not the request should be sent to the modules, based on the filters.""" if self.filters.get('whitelist', None): return request.tree.type in self.filters['whitelist'] elif self.filters.get('blacklist', None): return request.tree.type not in self.filters['blacklist'] else: return True
Returns whether or not the request should be sent to the modules, based on the filters.
def storage_volume_attachments(self): """ Gets the StorageVolumeAttachments API client. Returns: StorageVolumeAttachments: """ if not self.__storage_volume_attachments: self.__storage_volume_attachments = StorageVolumeAttachments(self.__connection) return self.__storage_volume_attachments
Gets the StorageVolumeAttachments API client. Returns: StorageVolumeAttachments:
def file_follow_durable( path, min_dump_interval=10, xattr_name='user.collectd.logtail.pos', xattr_update=True, **follow_kwz ): '''Records log position into xattrs after reading line every min_dump_interval seconds. Checksum of the last line at the position is also recorded (so line itself don't have to fit into xattr) to make sure file wasn't truncated between last xattr dump and re-open.''' from xattr import xattr from io import open from hashlib import sha1 from time import time import struct # Try to restore position src = open(path, mode='rb') src_xattr = xattr(src) try: if not xattr_name: raise KeyError pos = src_xattr[xattr_name] except KeyError: pos = None if pos: data_len = struct.calcsize('=I') (pos,), chksum = struct.unpack('=I', pos[:data_len]), pos[data_len:] (data_len,), chksum = struct.unpack('=I', chksum[:data_len]), chksum[data_len:] try: src.seek(pos - data_len) if sha1(src.read(data_len)).digest() != chksum: raise IOError('Last log line doesnt match checksum') except (OSError, IOError) as err: collectd.info('Failed to restore log position: {}'.format(err)) src.seek(0) tailer = file_follow(src, yield_file=True, **follow_kwz) # ...and keep it updated pos_dump_ts_get = lambda ts=None: (ts or time()) + min_dump_interval pos_dump_ts = pos_dump_ts_get() while True: line, src_chk = next(tailer) if not line: pos_dump_ts = 0 # force-write xattr ts = time() if ts > pos_dump_ts: if src is not src_chk: src, src_xattr = src_chk, xattr(src_chk) pos_new = src.tell() if pos != pos_new: pos = pos_new if xattr_update: src_xattr[xattr_name] =\ struct.pack('=I', pos)\ + struct.pack('=I', len(line))\ + sha1(line).digest() pos_dump_ts = pos_dump_ts_get(ts) if (yield line.decode('utf-8', 'replace')): tailer.send(StopIteration) break
Records log position into xattrs after reading line every min_dump_interval seconds. Checksum of the last line at the position is also recorded (so line itself don't have to fit into xattr) to make sure file wasn't truncated between last xattr dump and re-open.
def _tls_auth_decrypt(self, s): """ Provided with the record header and AEAD-ciphered data, return the sliced and clear tuple (TLSInnerPlaintext, tag). Note that we still return the slicing of the original input in case of decryption failure. Also, if the integrity check fails, a warning will be issued, but we still return the sliced (unauthenticated) plaintext. """ rcs = self.tls_session.rcs read_seq_num = struct.pack("!Q", rcs.seq_num) rcs.seq_num += 1 try: return rcs.cipher.auth_decrypt(b"", s, read_seq_num) except CipherError as e: return e.args except AEADTagError as e: pkt_info = self.firstlayer().summary() log_runtime.info("TLS: record integrity check failed [%s]", pkt_info) # noqa: E501 return e.args
Provided with the record header and AEAD-ciphered data, return the sliced and clear tuple (TLSInnerPlaintext, tag). Note that we still return the slicing of the original input in case of decryption failure. Also, if the integrity check fails, a warning will be issued, but we still return the sliced (unauthenticated) plaintext.
def gte(min_value): """ Validates that a field value is greater than or equal to the value given to this validator. """ def validate(value): if value < min_value: return e("{} is not greater than or equal to {}", value, min_value) return validate
Validates that a field value is greater than or equal to the value given to this validator.
def nested_key_indices(nested_dict): """ Give an ordering to the outer and inner keys used in a dictionary that maps to dictionaries. """ outer_keys, inner_keys = collect_nested_keys(nested_dict) outer_key_indices = {k: i for (i, k) in enumerate(outer_keys)} inner_key_indices = {k: i for (i, k) in enumerate(inner_keys)} return outer_key_indices, inner_key_indices
Give an ordering to the outer and inner keys used in a dictionary that maps to dictionaries.
def read_PIA0_A_control(self, cpu_cycles, op_address, address): """ read from 0xff01 -> PIA 0 A side control register """ value = 0xb3 log.error( "%04x| read $%04x (PIA 0 A side Control reg.) send $%02x (%s) back.\t|%s", op_address, address, value, byte2bit_string(value), self.cfg.mem_info.get_shortest(op_address) ) return value
read from 0xff01 -> PIA 0 A side control register
def driver_name(self): """ Returns the name of the driver that provides this tacho motor device. """ (self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name') return value
Returns the name of the driver that provides this tacho motor device.
def renameMenu( self ): """ Prompts the user to supply a new name for the menu. """ item = self.uiMenuTREE.currentItem() name, accepted = QInputDialog.getText( self, 'Rename Menu', 'Name:', QLineEdit.Normal, item.text(0)) if ( accepted ): item.setText(0, name)
Prompts the user to supply a new name for the menu.
def get_response(self): """Generate the response block of this request. Careful: it only sets the fields which can be set from the request """ res = IODWriteRes() for field in ["seqNum", "ARUUID", "API", "slotNumber", "subslotNumber", "index"]: res.setfieldval(field, self.getfieldval(field)) return res
Generate the response block of this request. Careful: it only sets the fields which can be set from the request
def handel_default(self) -> None: """ 处理设置到body上的数据默认 headers """ raw_body = self._body body = cast(Optional[bytes], None) default_type = 2 charset = self._charset or self._default_charset if raw_body is None: pass elif isinstance(raw_body, bytes): # body为bytes default_type = 2 body = raw_body elif isinstance(raw_body, str): # body 为字符串 default_type = 2 body = encode_str(raw_body, charset) elif isinstance(raw_body, (list, dict)): # body 为json default_type = 3 body = encode_str(json.dumps(raw_body, ensure_ascii=False), charset) elif isinstance(raw_body, RawIOBase): # body 为文件 default_type = 1 body = raw_body.read() raw_body.close() if "Content-Length" not in self._headers and \ "Transfer-Encoding" not in self._headers \ or self._headers["Transfer-Encoding"] != "chunked": if self.length is None: if body is not None: self.length = len(body) else: self.length = 0 # 设置默认 Content-Length self.set("Content-Length", str(self.length)) # print(body[0], body[1]) if body is not None and body.startswith(encode_str("<", charset)): default_type = 4 if "Content-Type" not in self._headers.keys(): type_str = self.type if type_str is None: temp = DEFAULT_TYPE.get(default_type) if temp is not None: if default_type != 1: temp += "; charset=%s" % charset type_str = temp if type_str is not None: # 设置默认 Content-Type self.set("Content-Type", type_str) self._body = body
处理设置到body上的数据默认 headers
def begin(self, transaction=None, headers=None, **keyword_headers): """ Begin a transaction. :param str transaction: the identifier for the transaction (optional - if not specified a unique transaction id will be generated) :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires :return: the transaction id :rtype: str """ headers = utils.merge_headers([headers, keyword_headers]) if not transaction: transaction = utils.get_uuid() headers[HDR_TRANSACTION] = transaction self.send_frame(CMD_BEGIN, headers) return transaction
Begin a transaction. :param str transaction: the identifier for the transaction (optional - if not specified a unique transaction id will be generated) :param dict headers: a map of any additional headers the broker requires :param keyword_headers: any additional headers the broker requires :return: the transaction id :rtype: str
def _parse_timestamp(timestamp): """ Parse a given timestamp value, raising ValueError if None or Flasey """ if timestamp: try: return aniso8601.parse_datetime(timestamp) except AttributeError: # raised by aniso8601 if raw_timestamp is not valid string # in ISO8601 format try: return datetime.utcfromtimestamp(timestamp) except: # relax the timestamp a bit in case it was sent in millis return datetime.utcfromtimestamp(timestamp/1000) raise ValueError('Invalid timestamp value! Cannot parse from either ISO8601 string or UTC timestamp.')
Parse a given timestamp value, raising ValueError if None or Flasey
def _get_input_readers(self, state): """Get input readers. Args: state: a MapreduceState model. Returns: A tuple: (a list of input readers, a model._HugeTaskPayload entity). The payload entity contains the json serialized input readers. (None, None) when input reader inplitting returned no data to process. """ serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY % state.key().id_or_name()) serialized_input_readers = model._HugeTaskPayload.get_by_key_name( serialized_input_readers_key, parent=state) # Initialize input readers. input_reader_class = state.mapreduce_spec.mapper.input_reader_class() split_param = state.mapreduce_spec.mapper if issubclass(input_reader_class, map_job.InputReader): split_param = map_job.JobConfig._to_map_job_config( state.mapreduce_spec, os.environ.get("HTTP_X_APPENGINE_QUEUENAME")) if serialized_input_readers is None: readers = input_reader_class.split_input(split_param) else: readers = [input_reader_class.from_json_str(_json) for _json in json.loads(zlib.decompress( serialized_input_readers.payload))] if not readers: return None, None # Update state and spec with actual shard count. state.mapreduce_spec.mapper.shard_count = len(readers) state.active_shards = len(readers) # Prepare to save serialized input readers. if serialized_input_readers is None: # Use mr_state as parent so it can be easily cleaned up later. serialized_input_readers = model._HugeTaskPayload( key_name=serialized_input_readers_key, parent=state) readers_json_str = [i.to_json_str() for i in readers] serialized_input_readers.payload = zlib.compress(json.dumps( readers_json_str)) return readers, serialized_input_readers
Get input readers. Args: state: a MapreduceState model. Returns: A tuple: (a list of input readers, a model._HugeTaskPayload entity). The payload entity contains the json serialized input readers. (None, None) when input reader inplitting returned no data to process.
def toggleCollapseAfter( self ): """ Collapses the splitter after this handle. """ if ( self.isCollapsed() ): self.uncollapse() else: self.collapse( XSplitterHandle.CollapseDirection.After )
Collapses the splitter after this handle.
def next(self): """Next point in iteration """ x, y = next(self.scan) xr = self.sx * x yr = self.sy * y return xr, yr
Next point in iteration
def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None, disable_notification=None, timeout=None): """ Use this method to send video files, Telegram clients support mp4 videos. :param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id :param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server :param duration: Integer : Duration of sent video in seconds :param length: Integer : Video width and height, Can't be None and should be in range of (0, 640) :param reply_to_message_id: :param reply_markup: :return: """ return types.Message.de_json( apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup, disable_notification, timeout))
Use this method to send video files, Telegram clients support mp4 videos. :param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id :param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server :param duration: Integer : Duration of sent video in seconds :param length: Integer : Video width and height, Can't be None and should be in range of (0, 640) :param reply_to_message_id: :param reply_markup: :return:
def analyze(output_dir, dataset, cloud=False, project_id=None): """Blocking version of analyze_async. See documentation of analyze_async.""" job = analyze_async( output_dir=output_dir, dataset=dataset, cloud=cloud, project_id=project_id) job.wait() print('Analyze: ' + str(job.state))
Blocking version of analyze_async. See documentation of analyze_async.
def corr_flat_dir(a1, a2): ''' Returns the correlation coefficient between two flattened adjacency matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray directed matrix 1 A2 : NxN np.ndarray directed matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2 ''' n = len(a1) if len(a2) != n: raise BCTParamError("Cannot calculate flattened correlation on " "matrices of different size") ix = np.logical_not(np.eye(n)) return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1]
Returns the correlation coefficient between two flattened adjacency matrices. Similarity metric for weighted matrices. Parameters ---------- A1 : NxN np.ndarray directed matrix 1 A2 : NxN np.ndarray directed matrix 2 Returns ------- r : float Correlation coefficient describing edgewise similarity of a1 and a2
def _do_smart_punctuation(self, text): """Fancifies 'single quotes', "double quotes", and apostrophes. Converts --, ---, and ... into en dashes, em dashes, and ellipses. Inspiration is: <http://daringfireball.net/projects/smartypants/> See "test/tm-cases/smarty_pants.text" for a full discussion of the support here and <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a discussion of some diversion from the original SmartyPants. """ if "'" in text: # guard for perf text = self._do_smart_contractions(text) text = self._opening_single_quote_re.sub("&#8216;", text) text = self._closing_single_quote_re.sub("&#8217;", text) if '"' in text: # guard for perf text = self._opening_double_quote_re.sub("&#8220;", text) text = self._closing_double_quote_re.sub("&#8221;", text) text = text.replace("---", "&#8212;") text = text.replace("--", "&#8211;") text = text.replace("...", "&#8230;") text = text.replace(" . . . ", "&#8230;") text = text.replace(". . .", "&#8230;") return text
Fancifies 'single quotes', "double quotes", and apostrophes. Converts --, ---, and ... into en dashes, em dashes, and ellipses. Inspiration is: <http://daringfireball.net/projects/smartypants/> See "test/tm-cases/smarty_pants.text" for a full discussion of the support here and <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a discussion of some diversion from the original SmartyPants.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_label') and self.document_label is not None: _dict['document_label'] = self.document_label if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location._to_dict() if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'types') and self.types is not None: _dict['types'] = [x._to_dict() for x in self.types] if hasattr(self, 'categories') and self.categories is not None: _dict['categories'] = [x._to_dict() for x in self.categories] if hasattr(self, 'attributes') and self.attributes is not None: _dict['attributes'] = [x._to_dict() for x in self.attributes] return _dict
Return a json dictionary representing this model.
def _get_model_table(self, part): """ Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement)) """ rows = self.parser.find(part).find_children('tr').list_results() table = [] for row in rows: table.append(self._get_model_row(self.parser.find( row ).find_children('td,th').list_results())) return self._get_valid_model_table(table)
Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
def _get_degree(num_nodes): """Get the degree of the current surface. Args: num_nodes (int): The number of control points for a B |eacute| zier surface. Returns: int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2` equals ``num_nodes``. Raises: ValueError: If ``num_nodes`` isn't a triangular number. """ # 8 * num_nodes = 4(d + 1)(d + 2) # = 4d^2 + 12d + 8 # = (2d + 3)^2 - 1 d_float = 0.5 * (np.sqrt(8.0 * num_nodes + 1.0) - 3.0) d_int = int(np.round(d_float)) if (d_int + 1) * (d_int + 2) == 2 * num_nodes: return d_int else: raise ValueError(num_nodes, "not a triangular number")
Get the degree of the current surface. Args: num_nodes (int): The number of control points for a B |eacute| zier surface. Returns: int: The degree :math:`d` such that :math:`(d + 1)(d + 2)/2` equals ``num_nodes``. Raises: ValueError: If ``num_nodes`` isn't a triangular number.
def get_bond_length(sp1, sp2, bond_order=1): """ Get the bond length between two species. Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. bond_order: For species with different possible bond orders, this allows one to obtain the bond length for a particular bond order. For example, to get the C=C bond length instead of the C-C bond length, this should be set to 2. Defaults to 1. Returns: Bond length in Angstrom. If no data is available, the sum of the atomic radius is used. """ sp1 = Element(sp1) if isinstance(sp1, str) else sp1 sp2 = Element(sp2) if isinstance(sp2, str) else sp2 try: all_lengths = obtain_all_bond_lengths(sp1, sp2) return all_lengths[bond_order] # The ValueError is raised in `obtain_all_bond_lengths` where no bond # data for both elements is found. The KeyError is raised in # `__getitem__` method of `dict` builtin class where although bond data # for both elements is found, the data for specified bond order does # not exist. In both cases, sum of atomic radius is returned. except (ValueError, KeyError): warnings.warn("No order %d bond lengths between %s and %s found in " "database. Returning sum of atomic radius." % (bond_order, sp1, sp2)) return sp1.atomic_radius + sp2.atomic_radius
Get the bond length between two species. Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. bond_order: For species with different possible bond orders, this allows one to obtain the bond length for a particular bond order. For example, to get the C=C bond length instead of the C-C bond length, this should be set to 2. Defaults to 1. Returns: Bond length in Angstrom. If no data is available, the sum of the atomic radius is used.
def _populate(self, soup): """ Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object. """ tables = soup.select('table[rules=all]') if not tables: return trs = tables[0].select('tr')[1:] if len(trs[0]) == 5: # M1 self._populate_small_table(trs) else: # M2 self._populate_large_table(trs)
Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object.
def stacks_2_eqns(self,stacks): """returns equation strings from stacks""" if stacks: return list(map(lambda p: self.stack_2_eqn(p), stacks)) else: return []
returns equation strings from stacks
def space_toolbar(settings_items, empty_space): """ formats the toolbar """ counter = 0 for part in settings_items: counter += len(part) if len(settings_items) == 1: spacing = '' else: spacing = empty_space[ :int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))] settings = spacing.join(settings_items) empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:] return settings, empty_space
formats the toolbar
def destroys(self): """ Returns which single registers (including f, flag) this instruction changes. Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r LD a, X => Destroys a LD a, a => Destroys nothing INC a => Destroys a, f POP af => Destroys a, f, sp PUSH af => Destroys sp ret => Destroys SP """ if self.asm in arch.zx48k.backend.ASMS: return ALL_REGS res = set([]) i = self.inst o = self.opers if i in {'push', 'ret', 'call', 'rst', 'reti', 'retn'}: return ['sp'] if i == 'pop': res.update('sp', single_registers(o[:1])) elif i in {'ldi', 'ldir', 'ldd', 'lddr'}: res.update('a', 'b', 'c', 'd', 'e', 'f') elif i in {'otir', 'otdr', 'oti', 'otd', 'inir', 'indr', 'ini', 'ind'}: res.update('h', 'l', 'b') elif i in {'cpir', 'cpi', 'cpdr', 'cpd'}: res.update('h', 'l', 'b', 'c', 'f') elif i in ('ld', 'in'): res.update(single_registers(o[:1])) elif i in ('inc', 'dec'): res.update('f', single_registers(o[:1])) elif i == 'exx': res.update('b', 'c', 'd', 'e', 'h', 'l') elif i == 'ex': res.update(single_registers(o[0])) res.update(single_registers(o[1])) elif i in {'ccf', 'scf', 'bit', 'cp'}: res.add('f') elif i in {'or', 'and', 'xor', 'add', 'adc', 'sub', 'sbc'}: if len(o) > 1: res.update(single_registers(o[0])) else: res.add('a') res.add('f') elif i in {'neg', 'cpl', 'daa', 'rra', 'rla', 'rrca', 'rlca', 'rrd', 'rld'}: res.update('a', 'f') elif i == 'djnz': res.update('b', 'f') elif i in {'rr', 'rl', 'rrc', 'rlc', 'srl', 'sra', 'sll', 'sla'}: res.update(single_registers(o[0])) res.add('f') elif i in ('set', 'res'): res.update(single_registers(o[1])) return list(res)
Returns which single registers (including f, flag) this instruction changes. Registers are: a, b, c, d, e, i, h, l, ixh, ixl, iyh, iyl, r LD a, X => Destroys a LD a, a => Destroys nothing INC a => Destroys a, f POP af => Destroys a, f, sp PUSH af => Destroys sp ret => Destroys SP
def recover_all_handler(self): """ Relink the file handler association you just removed. """ for handler in self._handler_cache: self.logger.addHandler(handler) self._handler_cache = list()
Relink the file handler association you just removed.
def data(self, name, subject_ids=None, visit_ids=None, session_ids=None, **kwargs): """ Returns the Fileset(s) or Field(s) associated with the provided spec name(s), generating derived filesets as required. Multiple names in a list can be provided, to allow their workflows to be combined into a single workflow. Parameters ---------- name : str | List[str] The name of the FilesetSpec|FieldSpec to retried the filesets for subject_id : str | None The subject ID of the data to return. If provided (including None values) the data will be return as a single item instead of a collection visit_id : str | None The visit ID of the data to return. If provided (including None values) the data will be return as a single item instead of a c ollection subject_ids : list[str] The subject IDs to include in the returned collection visit_ids : list[str] The visit IDs to include in the returned collection session_ids : list[str] The session IDs (i.e. 2-tuples of the form (<subject-id>, <visit-id>) to include in the returned collection Returns ------- data : BaseItem | BaseCollection | list[BaseItem | BaseCollection] If 'subject_id' or 'visit_id' is provided then the data returned is a single Fileset or Field. Otherwise a collection of Filesets or Fields are returned. If muliple spec names are provided then a list of items or collections corresponding to each spec name. """ if isinstance(name, basestring): single_name = True names = [name] else: names = name single_name = False single_item = 'subject_id' in kwargs or 'visit_id' in kwargs filter_items = (subject_ids, visit_ids, session_ids) != (None, None, None) specs = [self.spec(n) for n in names] if single_item: if filter_items: raise ArcanaUsageError( "Cannot provide 'subject_id' and/or 'visit_id' in " "combination with 'subject_ids', 'visit_ids' or " "'session_ids'") subject_id = kwargs.pop('subject_id', None) visit_id = kwargs.pop('visit_id', None) iterators = set(chain(self.FREQUENCIES[s.frequency] for s in specs)) if subject_id is not None and visit_id is not None: session_ids = [(subject_id, visit_id)] elif subject_id is not None: if self.VISIT_ID in iterators: raise ArcanaUsageError( "Non-None values for visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) subject_ids = [subject_id] elif visit_id is not None: if self.SUBJECT_ID in iterators: raise ArcanaUsageError( "Non-None values for subject IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) visit_ids = [visit_id] elif iterators: raise ArcanaUsageError( "Non-None values for subject and/or visit IDs need to be " "provided to select a single item for each of '{}'" .format("', '".join(names))) # Work out which pipelines need to be run pipeline_getters = defaultdict(set) for spec in specs: if spec.derived or spec.derivable: # Filter out Study inputs # Add name of spec to set of required outputs pipeline_getters[spec.pipeline_getter].add(spec.name) # Run required pipelines if pipeline_getters: kwargs = copy(kwargs) kwargs.update({'subject_ids': subject_ids, 'visit_ids': visit_ids, 'session_ids': session_ids}) pipelines, required_outputs = zip(*( (self.pipeline(k), v) for k, v in pipeline_getters.items())) kwargs['required_outputs'] = required_outputs self.processor.run(*pipelines, **kwargs) # Find and return Item/Collection corresponding to requested spec # names all_data = [] for name in names: spec = self.bound_spec(name) data = spec.collection if single_item: data = data.item(subject_id=subject_id, visit_id=visit_id) elif filter_items and spec.frequency != 'per_study': if subject_ids is None: subject_ids = [] if visit_ids is None: visit_ids = [] if session_ids is None: session_ids = [] if spec.frequency == 'per_session': data = [d for d in data if (d.subject_id in subject_ids or d.visit_id in visit_ids or d.session_id in session_ids)] elif spec.frequency == 'per_subject': data = [d for d in data if (d.subject_id in subject_ids or d.subject_id in [s[0] for s in session_ids])] elif spec.frequency == 'per_visit': data = [d for d in data if (d.visit_id in visit_ids or d.visit_id in [s[1] for s in session_ids])] if not data: raise ArcanaUsageError( "No matching data found (subject_ids={}, visit_ids={} " ", session_ids={})" .format(subject_ids, visit_ids, session_ids)) data = spec.CollectionClass(spec.name, data) if single_name: return data else: all_data.append(data) return all_data
Returns the Fileset(s) or Field(s) associated with the provided spec name(s), generating derived filesets as required. Multiple names in a list can be provided, to allow their workflows to be combined into a single workflow. Parameters ---------- name : str | List[str] The name of the FilesetSpec|FieldSpec to retried the filesets for subject_id : str | None The subject ID of the data to return. If provided (including None values) the data will be return as a single item instead of a collection visit_id : str | None The visit ID of the data to return. If provided (including None values) the data will be return as a single item instead of a c ollection subject_ids : list[str] The subject IDs to include in the returned collection visit_ids : list[str] The visit IDs to include in the returned collection session_ids : list[str] The session IDs (i.e. 2-tuples of the form (<subject-id>, <visit-id>) to include in the returned collection Returns ------- data : BaseItem | BaseCollection | list[BaseItem | BaseCollection] If 'subject_id' or 'visit_id' is provided then the data returned is a single Fileset or Field. Otherwise a collection of Filesets or Fields are returned. If muliple spec names are provided then a list of items or collections corresponding to each spec name.
def pub_date(soup): """ Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub """ pub_date = first(raw_parser.pub_date(soup, date_type="pub")) if pub_date is None: pub_date = first(raw_parser.pub_date(soup, date_type="publication")) if pub_date is None: return None (day, month, year) = ymd(pub_date) return date_struct(year, month, day)
Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub
def generate_by_hash(hashcode): """Generates an PIL image avatar based on the given hash String. Acts as the main accessor to pagan.""" img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR) if len(hashcode) < 32: print ("hashcode must have lenght >= 32, %s" % hashcode) raise FalseHashError allowed = "0123456789abcdef" hashcheck = [c in allowed for c in hashcode] if False in hashcheck: print ("hashcode has not allowed structure %s" % hashcode) raise FalseHashError pixelmap = setup_pixelmap(hashcode) draw_image(pixelmap, img) return img
Generates an PIL image avatar based on the given hash String. Acts as the main accessor to pagan.
def run_info(template): """ Print information about a specific template. """ template.project_name = 'TowelStuff' # fake project name, always the same name = template_name_from_class_name(template.__class__.__name__) term = TerminalView() term.print_info("Content of template {} with an example project " \ "named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN))) dir_name = None for file_info in sorted(template.files(), key=lambda dir: dir[0]): directory = file_name = template_name = '' if file_info[0]: directory = file_info[0] if file_info[1]: file_name = file_info[1] if file_info[2]: template_name = '\t\t - ' + file_info[2] if (directory != dir_name): term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK)) dir_name = directory term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name) # print substitutions try: subs = template.substitutes().keys() if len(subs) > 0: subs.sort() term.print_info("\nSubstitutions of this template are: ") max_len = 0 for key in subs: if max_len < len(key): max_len = len(key) for key in subs: term.print_info(u"\t{0:{1}} -> {2}". format(key, max_len, template.substitutes()[key])) except AttributeError: pass
Print information about a specific template.
def get_custom_implementations(self): """Retrieve a list of cutom implementations. Yields: (str, str, ImplementationProperty) tuples: The name of the attribute an implementation lives at, the name of the related transition, and the related implementation. """ for trname in self.custom_implems: attr = self.transitions_at[trname] implem = self.implementations[trname] yield (trname, attr, implem)
Retrieve a list of cutom implementations. Yields: (str, str, ImplementationProperty) tuples: The name of the attribute an implementation lives at, the name of the related transition, and the related implementation.
def from_file(cls, f): """ Constructs a :class:`~mwxml.iteration.dump.Dump` from a `file` pointer. :Parameters: f : `file` A plain text file pointer containing XML to process """ element = ElementIterator.from_file(f) assert element.tag == "mediawiki" return cls.from_element(element)
Constructs a :class:`~mwxml.iteration.dump.Dump` from a `file` pointer. :Parameters: f : `file` A plain text file pointer containing XML to process
def _check_avail(cmd): ''' Check to see if the given command can be run ''' if isinstance(cmd, list): cmd = ' '.join([six.text_type(x) if not isinstance(x, six.string_types) else x for x in cmd]) bret = True wret = False if __salt__['config.get']('cmd_blacklist_glob'): blist = __salt__['config.get']('cmd_blacklist_glob', []) for comp in blist: if fnmatch.fnmatch(cmd, comp): # BAD! you are blacklisted bret = False if __salt__['config.get']('cmd_whitelist_glob', []): blist = __salt__['config.get']('cmd_whitelist_glob', []) for comp in blist: if fnmatch.fnmatch(cmd, comp): # GOOD! You are whitelisted wret = True break else: # If no whitelist set then alls good! wret = True return bret and wret
Check to see if the given command can be run
def wr_py_nts(fout_py, nts, docstring=None, varname="nts"): """Save namedtuples into a Python module.""" if nts: with open(fout_py, 'w') as prt: prt.write('"""{DOCSTRING}"""\n\n'.format(DOCSTRING=docstring)) prt.write("# Created: {DATE}\n".format(DATE=str(datetime.date.today()))) prt_nts(prt, nts, varname) sys.stdout.write(" {N:7,} items WROTE: {PY}\n".format(N=len(nts), PY=fout_py))
Save namedtuples into a Python module.
def main(): """ How to control a DMX light through an Anyma USB controller """ # Channel value list for channels 1-512 cv = [0 for v in range(0, 512)] # Create an instance of the DMX controller and open it print("Opening DMX controller...") dev = pyudmx.uDMXDevice() # This will automagically find a single Anyma-type USB DMX controller dev.open() # For informational purpose, display what we know about the DMX controller print(dev.Device) # Send messages to the light changing it to red, then green, then blue # This is the "hard way" to do it, but illustrates how it's done print("Setting to red...") cv[0] = 255 # red cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to red") sleep(3.0) print("Setting to green...") cv[0] = 0 # red cv[1] = 255 # green cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to green") sleep(3.0) print("Setting to blue...") cv[0] = 0 # red cv[1] = 0 # green cv[2] = 255 # blue cv[6] = 128 # dimmer to half value sent = dev.send_multi_value(1, cv) print("Set to blue") sleep(3.0) # Here's an easier way to do it print("And, again the easier way") send_rgb(dev, 255, 0, 0, 128) sleep(3.0) send_rgb(dev, 0, 255, 0, 128) sleep(3.0) send_rgb(dev, 0, 0, 255, 128) sleep(3.0) print("Reset all channels and close..") # Turns the light off cv = [0 for v in range(0, 512)] dev.send_multi_value(1, cv) dev.close()
How to control a DMX light through an Anyma USB controller
def reverse_ip_whois(self, query=None, ip=None, country=None, server=None, include_total_count=False, page=1, **kwargs): """Pass in an IP address or a list of free text query terms.""" if (ip and query) or not (ip or query): raise ValueError('Query or IP Address (but not both) must be defined') return self._results('reverse-ip-whois', '/v1/reverse-ip-whois', query=query, ip=ip, country=country, server=server, include_total_count=include_total_count, page=page, items_path=('records', ), **kwargs)
Pass in an IP address or a list of free text query terms.
def get_mac(self): ''' Obtain the device's mac address. ''' ifreq = struct.pack('16sH14s', self.name, AF_UNIX, b'\x00'*14) res = fcntl.ioctl(sockfd, SIOCGIFHWADDR, ifreq) address = struct.unpack('16sH14s', res)[2] mac = struct.unpack('6B8x', address) return ":".join(['%02X' % i for i in mac])
Obtain the device's mac address.
def generate_base(path: str) -> str: """ Convert path, which can be a URL or a file path into a base URI :param path: file location or url :return: file location or url sans actual name """ if ':' in path: parts = urlparse(path) parts_dict = parts._asdict() parts_dict['path'] = os.path.split(parts.path)[0] if '/' in parts.path else '' return urlunparse(ParseResult(**parts_dict)) + '/' else: return (os.path.split(path)[0] if '/' in path else '') + '/'
Convert path, which can be a URL or a file path into a base URI :param path: file location or url :return: file location or url sans actual name
def _conversion_function(pt_wrapper, dtype=None, name=None, as_ref=False): """Allows PrettyTensors and Loss to work as a tensor.""" # Ignore as_ref to not create backward compatibility issues. _ = name, as_ref t = pt_wrapper.tensor if dtype and not dtype.is_compatible_with(t.dtype): raise ValueError( 'Tensor conversion requested dtype %s for Tensor with dtype %s: %r' % (dtype, t.dtype, t)) return t
Allows PrettyTensors and Loss to work as a tensor.
def _text2bool(val): """ Converts strings to True/False depending on the 'truth' expressed by the string. If the string can't be converted, the original value will be returned. See '__true_strings' and '__false_strings' for values considered 'true' or 'false respectively. This is usable as 'converter' for SCons' Variables. """ lval = val.lower() if lval in __true_strings: return True if lval in __false_strings: return False raise ValueError("Invalid value for boolean option: %s" % val)
Converts strings to True/False depending on the 'truth' expressed by the string. If the string can't be converted, the original value will be returned. See '__true_strings' and '__false_strings' for values considered 'true' or 'false respectively. This is usable as 'converter' for SCons' Variables.
def gaussian_filter(data, sigma): """ Drop-in replacement for scipy.ndimage.gaussian_filter. (note: results are only approximately equal to the output of gaussian_filter) """ if np.isscalar(sigma): sigma = (sigma,) * data.ndim baseline = data.mean() filtered = data - baseline for ax in range(data.ndim): s = float(sigma[ax]) if s == 0: continue # generate 1D gaussian kernel ksize = int(s * 6) x = np.arange(-ksize, ksize) kernel = np.exp(-x**2 / (2*s**2)) kshape = [1, ] * data.ndim kshape[ax] = len(kernel) kernel = kernel.reshape(kshape) # convolve as product of FFTs shape = data.shape[ax] + ksize scale = 1.0 / (abs(s) * (2*np.pi)**0.5) filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * np.fft.rfft(kernel, shape, axis=ax), axis=ax) # clip off extra data sl = [slice(None)] * data.ndim sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None) filtered = filtered[sl] return filtered + baseline
Drop-in replacement for scipy.ndimage.gaussian_filter. (note: results are only approximately equal to the output of gaussian_filter)
def get_tag_cloud(context, steps=6, min_count=None, template='zinnia/tags/tag_cloud.html'): """ Return a cloud of published tags. """ tags = Tag.objects.usage_for_queryset( Entry.published.all(), counts=True, min_count=min_count) return {'template': template, 'tags': calculate_cloud(tags, steps), 'context_tag': context.get('tag')}
Return a cloud of published tags.
def mass_3d(self, r, rho0, gamma): """ mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return: """ mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3) return mass_3d
mass enclosed a 3d sphere or radius r :param r: :param a: :param s: :return:
def find_n50(self): """ Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig """ for sample in self.metadata: # Initialise the N50 attribute in case there is no assembly, and the attribute is not created in the loop sample[self.analysistype].n50 = '-' # Initialise a variable to store a running total of contig lengths currentlength = 0 for contig_length in sample[self.analysistype].contig_lengths: # Increment the current length with the length of the current contig currentlength += contig_length # If the current length is now greater than the total genome / 2, the current contig length is the N50 if currentlength >= sample[self.analysistype].genome_length * 0.5: # Populate the dictionary, and break the loop sample[self.analysistype].n50 = contig_length break
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig
def handle_exception(self, exception): """ Handle a unspecified exception and return the correct method that should be used for handling it. If the exception has the `can_redirect` property set to False, it is rendered to the browser. Otherwise, it will be redirected to the location provided in the `RedirectUri` object that is associated with the request. """ can_redirect = getattr(exception, "can_redirect", True) redirect_uri = getattr(self, "redirect_uri", None) if can_redirect and redirect_uri: return self.redirect_exception(exception) else: return self.render_exception(exception)
Handle a unspecified exception and return the correct method that should be used for handling it. If the exception has the `can_redirect` property set to False, it is rendered to the browser. Otherwise, it will be redirected to the location provided in the `RedirectUri` object that is associated with the request.
def write_branch_data(self, file): """ Writes branch data to an Excel spreadsheet. """ branch_sheet = self.book.add_sheet("Branches") for i, branch in enumerate(self.case.branches): for j, attr in enumerate(BRANCH_ATTRS): branch_sheet.write(i, j, getattr(branch, attr))
Writes branch data to an Excel spreadsheet.
def _collate_results(self, field=None): """For a list of objects associated with a classification result, return the results as a DataFrame and dict of taxa info. Parameters ---------- field : {'readcount_w_children', 'readcount', 'abundance'} Which field to use for the abundance/count of a particular taxon in a sample. - 'readcount_w_children': total reads of this taxon and all its descendants - 'readcount': total reads of this taxon - 'abundance': genome size-normalized relative abundances, from shotgun sequencing Returns ------- None, but stores a result in self._cached. """ import pandas as pd field = field if field else self._kwargs["field"] if field not in ("auto", "abundance", "readcount", "readcount_w_children"): raise OneCodexException("Specified field ({}) not valid.".format(field)) # we'll fill these dicts that eventually turn into DataFrames df = {"classification_id": [c.id for c in self._classifications]} tax_info = {"tax_id": [], "name": [], "rank": [], "parent_tax_id": []} if field == "auto": field = "readcount_w_children" self._cached["field"] = field for c_idx, c in enumerate(self._classifications): # pulling results from mainline is the slowest part of the function result = c.results()["table"] # d contains info about a taxon in result, including name, id, counts, rank, etc. for d in result: d_tax_id = d["tax_id"] if d_tax_id not in tax_info["tax_id"]: for k in ("tax_id", "name", "rank", "parent_tax_id"): tax_info[k].append(d[k]) # first time we've seen this taxon, so make a vector for it df[d_tax_id] = [0] * len(self._classifications) df[d_tax_id][c_idx] = d[field] # format as a Pandas DataFrame df = pd.DataFrame(df).set_index("classification_id").fillna(0) df.columns.name = "tax_id" tax_info = pd.DataFrame(tax_info).set_index("tax_id") self._cached["results"] = df self._cached["taxonomy"] = tax_info
For a list of objects associated with a classification result, return the results as a DataFrame and dict of taxa info. Parameters ---------- field : {'readcount_w_children', 'readcount', 'abundance'} Which field to use for the abundance/count of a particular taxon in a sample. - 'readcount_w_children': total reads of this taxon and all its descendants - 'readcount': total reads of this taxon - 'abundance': genome size-normalized relative abundances, from shotgun sequencing Returns ------- None, but stores a result in self._cached.
def addcomment(self, creditmemo_increment_id, comment, email=True, include_in_email=False): """ Add new comment to credit memo :param creditmemo_increment_id: Credit memo increment ID :return: bool """ return bool( self.call( 'sales_order_creditmemo.addComment', [creditmemo_increment_id, comment, email, include_in_email] ) )
Add new comment to credit memo :param creditmemo_increment_id: Credit memo increment ID :return: bool
def load_corpus(self, path, config): '''Load a dialogue corpus; eventually, support pickles and potentially other formats''' # use the default dataset if no path is provided # TODO -- change this to use a pre-saved dataset if path == '': path = self.default_path_to_corpus self.data = Corpus(path=path, config=self.data_config)
Load a dialogue corpus; eventually, support pickles and potentially other formats
def predict(self, X): """Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks. """ kernel_mat = self._get_kernel(X, self.fit_X_) val = numpy.dot(kernel_mat, self.coef_) if hasattr(self, "intercept_"): val += self.intercept_ # Order by increasing survival time if objective is pure ranking if self.rank_ratio == 1: val *= -1 else: # model was fitted on log(time), transform to original scale val = numpy.exp(val) return val
Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks.
def _get_reference(self): """ Sets up necessary reference for robots, grippers, and objects. """ super()._get_reference() # indices for joints in qpos, qvel self.robot_joints = list(self.mujoco_robot.joints) self._ref_joint_pos_indexes = [ self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints ] self._ref_joint_vel_indexes = [ self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints ] if self.use_indicator_object: ind_qpos = self.sim.model.get_joint_qpos_addr("pos_indicator") self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos ind_qvel = self.sim.model.get_joint_qvel_addr("pos_indicator") self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel self.indicator_id = self.sim.model.body_name2id("pos_indicator") # indices for grippers in qpos, qvel if self.has_gripper: self.gripper_joints = list(self.gripper.joints) self._ref_gripper_joint_pos_indexes = [ self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints ] self._ref_gripper_joint_vel_indexes = [ self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints ] # indices for joint pos actuation, joint vel actuation, gripper actuation self._ref_joint_pos_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("pos") ] self._ref_joint_vel_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("vel") ] if self.has_gripper: self._ref_joint_gripper_actuator_indexes = [ self.sim.model.actuator_name2id(actuator) for actuator in self.sim.model.actuator_names if actuator.startswith("gripper") ] # IDs of sites for gripper visualization self.eef_site_id = self.sim.model.site_name2id("grip_site") self.eef_cylinder_id = self.sim.model.site_name2id("grip_site_cylinder")
Sets up necessary reference for robots, grippers, and objects.
def attach_image(field, nested_fields, page, record_keeper=None): ''' Returns a function that attaches an image to page if it exists Currenlty assumes that images have already been imported and info has been stored in record_keeper ''' if (field in nested_fields) and nested_fields[field]: foreign_image_id = nested_fields[field]["id"] # Handle the following # record keeper may not exist # record keeper may not have image ref if record_keeper: try: local_image_id = record_keeper.get_local_image( foreign_image_id) local_image = Image.objects.get(id=local_image_id) setattr(page, field, local_image) except ObjectDoesNotExist: raise ObjectDoesNotExist( ("executing attach_image: local image referenced" "in record_keeper does not actually exist."), None) except Exception: raise else: raise Exception( ("Attempted to attach image without record_keeper. " "This functionality is not yet implemented"))
Returns a function that attaches an image to page if it exists Currenlty assumes that images have already been imported and info has been stored in record_keeper
def plot_frequency_recency_matrix( model, T=1, max_frequency=None, max_recency=None, title=None, xlabel="Customer's Historical Frequency", ylabel="Customer's Recency", **kwargs ): """ Plot recency frequecy matrix as heatmap. Plot a figure of expected transactions in T next units of time by a customer's frequency and recency. Parameters ---------- model: lifetimes model A fitted lifetimes model. T: fload, optional Next units of time to make predictions for max_frequency: int, optional The maximum frequency to plot. Default is max observed frequency. max_recency: int, optional The maximum recency to plot. This also determines the age of the customer. Default to max observed age. title: str, optional Figure title xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.imshow command. Returns ------- axes: matplotlib.AxesSubplot """ from matplotlib import pyplot as plt if max_frequency is None: max_frequency = int(model.data["frequency"].max()) if max_recency is None: max_recency = int(model.data["T"].max()) Z = np.zeros((max_recency + 1, max_frequency + 1)) for i, recency in enumerate(np.arange(max_recency + 1)): for j, frequency in enumerate(np.arange(max_frequency + 1)): Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(T, frequency, recency, max_recency) interpolation = kwargs.pop("interpolation", "none") ax = plt.subplot(111) pcm = ax.imshow(Z, interpolation=interpolation, **kwargs) plt.xlabel(xlabel) plt.ylabel(ylabel) if title is None: title = ( "Expected Number of Future Purchases for {} Unit{} of Time,".format(T, "s"[T == 1 :]) + "\nby Frequency and Recency of a Customer" ) plt.title(title) # turn matrix into square forceAspect(ax) # plot colorbar beside matrix plt.colorbar(pcm, ax=ax) return ax
Plot recency frequecy matrix as heatmap. Plot a figure of expected transactions in T next units of time by a customer's frequency and recency. Parameters ---------- model: lifetimes model A fitted lifetimes model. T: fload, optional Next units of time to make predictions for max_frequency: int, optional The maximum frequency to plot. Default is max observed frequency. max_recency: int, optional The maximum recency to plot. This also determines the age of the customer. Default to max observed age. title: str, optional Figure title xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.imshow command. Returns ------- axes: matplotlib.AxesSubplot
def add_qtl_to_map(qtlfile, mapfile, outputfile='map_with_qtls.csv'): """ This function adds to a genetic map for each marker the number of significant QTLs found. :arg qtlfile, the output from MapQTL transformed to a csv file via 'parse_mapqtl_file' which contains the closest markers. :arg mapfile, the genetic map with all the markers. :kwarg outputfile, the name of the output file in which the map will be written. """ qtl_list = read_input_file(qtlfile, ',') map_list = read_input_file(mapfile, ',') map_list[0].append('# QTLs') markers = [] markers.append(map_list[0]) qtl_cnt = 0 for marker in map_list[1:]: markers.append(add_qtl_to_marker(marker, qtl_list[1:])) qtl_cnt = qtl_cnt + int(markers[-1][-1]) LOG.info('- %s markers processed in %s' % (len(markers), mapfile)) LOG.info('- %s QTLs located in the map: %s' % (qtl_cnt, outputfile)) write_matrix(outputfile, markers)
This function adds to a genetic map for each marker the number of significant QTLs found. :arg qtlfile, the output from MapQTL transformed to a csv file via 'parse_mapqtl_file' which contains the closest markers. :arg mapfile, the genetic map with all the markers. :kwarg outputfile, the name of the output file in which the map will be written.
def download(self, bundle_uuid, replica, version="", download_dir="", metadata_files=('*',), data_files=('*',), num_retries=10, min_delay_seconds=0.25): """ Download a bundle and save it to the local filesystem as a directory. :param str bundle_uuid: The uuid of the bundle to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param str version: The version to download, else if not specified, download the latest. The version is a timestamp of bundle creation in RFC3339 :param str dest_name: The destination file path for the download :param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded. :param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The file will be downloaded only if a data file matches any of the patterns in `data_files` will it be downloaded. :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Download a bundle and save it to the local filesystem as a directory. By default, all data and metadata files are downloaded. To disable the downloading of data files, use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically). Likewise for metadata files. If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and decreases each time we successfully read a block. We set a quota for the number of failures that goes up with every successful block read and down with each failure. """ errors = 0 with concurrent.futures.ThreadPoolExecutor(self.threads) as executor: futures_to_dss_file = {executor.submit(task): dss_file for dss_file, task in self._download_tasks(bundle_uuid, replica, version, download_dir, metadata_files, data_files, num_retries, min_delay_seconds)} for future in concurrent.futures.as_completed(futures_to_dss_file): dss_file = futures_to_dss_file[future] try: future.result() except Exception as e: errors += 1 logger.warning('Failed to download file %s version %s from replica %s', dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e) if errors: raise RuntimeError('{} file(s) failed to download'.format(errors))
Download a bundle and save it to the local filesystem as a directory. :param str bundle_uuid: The uuid of the bundle to download :param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param str version: The version to download, else if not specified, download the latest. The version is a timestamp of bundle creation in RFC3339 :param str dest_name: The destination file path for the download :param iterable metadata_files: one or more shell patterns against which all metadata files in the bundle will be matched case-sensitively. A file is considered a metadata file if the `indexed` property in the manifest is set. If and only if a metadata file matches any of the patterns in `metadata_files` will it be downloaded. :param iterable data_files: one or more shell patterns against which all data files in the bundle will be matched case-sensitively. A file is considered a data file if the `indexed` property in the manifest is not set. The file will be downloaded only if a data file matches any of the patterns in `data_files` will it be downloaded. :param int num_retries: The initial quota of download failures to accept before exiting due to failures. The number of retries increase and decrease as file chucks succeed and fail. :param float min_delay_seconds: The minimum number of seconds to wait in between retries. Download a bundle and save it to the local filesystem as a directory. By default, all data and metadata files are downloaded. To disable the downloading of data files, use `--data-files ''` if using the CLI (or `data_files=()` if invoking `download` programmatically). Likewise for metadata files. If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and decreases each time we successfully read a block. We set a quota for the number of failures that goes up with every successful block read and down with each failure.
def check_y(y, link, dist, min_samples=1, verbose=True): """ tool to ensure that the targets: - are in the domain of the link function - are numerical - have at least min_samples - is finite Parameters ---------- y : array-like link : Link object dist : Distribution object min_samples : int, default: 1 verbose : bool, default: True whether to print warnings Returns ------- y : array containing validated y-data """ y = np.ravel(y) y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1, name='y data', verbose=verbose) with warnings.catch_warnings(): warnings.simplefilter("ignore") if np.any(np.isnan(link.link(y, dist))): raise ValueError('y data is not in domain of {} link function. ' \ 'Expected domain: {}, but found {}' \ .format(link, get_link_domain(link, dist), [float('%.2f'%np.min(y)), float('%.2f'%np.max(y))])) return y
tool to ensure that the targets: - are in the domain of the link function - are numerical - have at least min_samples - is finite Parameters ---------- y : array-like link : Link object dist : Distribution object min_samples : int, default: 1 verbose : bool, default: True whether to print warnings Returns ------- y : array containing validated y-data
def facade(factory): """Declare a method as a facade factory.""" wrapper = FacadeDescriptor(factory.__name__, factory) return update_wrapper(wrapper, factory)
Declare a method as a facade factory.
def question_image_filepath(instance, filename): """ Function DocString """ return '/'.join(['images', str(instance.question_level), str(instance.question_level_id), binascii.b2a_hex(os.urandom(15)), filename])
Function DocString
def __repair_unconnected_nodes(self): """ Adds a (``dominance_relation``) edge from the sentence root node to all previously unconnected nodes (token nodes, that either represent a punctuation mark or are part of a headline 'sentence' that has no full syntax structure annotation). """ unconnected_node_ids = get_unconnected_nodes(self) if dg.istoken(self, self.root): # This sentence has no hierarchical structure, i.e. the root # node is also a terminal / token node. # We will add a virtual root node to compensate for this. self.root = self.ns+':VROOT' self.add_node(self.root, layers={'tiger', 'tiger:syntax', 'tiger:sentence', 'tiger:sentence:root'}) for unconnected_node_id in unconnected_node_ids: self.add_edge(self.root, unconnected_node_id, layers={self.ns, self.ns+':sentence', self.ns+':unconnected'}, edge_type=EdgeTypes.dominance_relation)
Adds a (``dominance_relation``) edge from the sentence root node to all previously unconnected nodes (token nodes, that either represent a punctuation mark or are part of a headline 'sentence' that has no full syntax structure annotation).
def from_raw_message(cls, rawmessage): """Create a message from a raw byte stream.""" if (rawmessage[5] & MESSAGE_FLAG_EXTENDED_0X10) == MESSAGE_FLAG_EXTENDED_0X10: if len(rawmessage) >= ExtendedSend.receivedSize: msg = ExtendedSend.from_raw_message(rawmessage) else: msg = None else: msg = StandardSend(rawmessage[2:5], {'cmd1': rawmessage[6], 'cmd2': rawmessage[7]}, flags=rawmessage[5], acknak=rawmessage[8:9]) return msg
Create a message from a raw byte stream.
def is_distributed(partition_column, lower_bound, upper_bound): """ Check if is possible distribute a query given that args Args: partition_column: column used to share the data between the workers lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column Returns: True for distributed or False if not """ if ( (partition_column is not None) and (lower_bound is not None) and (upper_bound is not None) ): if upper_bound > lower_bound: return True else: raise InvalidArguments("upper_bound must be greater than lower_bound.") elif (partition_column is None) and (lower_bound is None) and (upper_bound is None): return False else: raise InvalidArguments( "Invalid combination of partition_column, lower_bound, upper_bound." "All these arguments should be passed (distributed) or none of them (standard pandas)." )
Check if is possible distribute a query given that args Args: partition_column: column used to share the data between the workers lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column Returns: True for distributed or False if not
def vcf_writer(parser, keep, extract, args): """Writes the data in VCF format.""" # The output output = sys.stdout if args.output == "-" else open(args.output, "w") try: # Getting the samples samples = np.array(parser.get_samples(), dtype=str) k = _get_sample_select(samples=samples, keep=keep) # Writing the VCF header output.write(_VCF_HEADER.format( date=datetime.today().strftime("%Y%m%d"), version=__version__, samples="\t".join(samples[k]), )) # The data generator generator = _get_generator(parser=parser, extract=extract, keep=k, check_maf=args.maf) # The number of markers extracted nb_extracted = 0 for data in generator: # Keeping only the required genotypes genotypes = data.genotypes # Computing the alternative allele frequency af = np.nanmean(genotypes) / 2 print(data.variant.chrom, data.variant.pos, data.variant.name, data.reference, data.coded, ".", "PASS", "AF={}".format(af), "GT:DS", sep="\t", end="", file=output) for geno in genotypes: if np.isnan(geno): output.write("\t./.:.") else: rounded_geno = int(round(geno, 0)) output.write("\t{}:{}".format( _VCF_GT_MAP[rounded_geno], geno, )) output.write("\n") nb_extracted += 1 if nb_extracted == 0: logger.warning("No markers matched the extract list") finally: output.close()
Writes the data in VCF format.
def network_sampling(n, filename, directory=None, snowball=False, user=None): """ Selects a few users and exports a CSV of indicators for them. TODO: Returns the network/graph between the selected users. Parameters ---------- n : int Number of users to select. filename : string File to export to. directory: string Directory to select users from if using the default random selection. snowball: starts from a specified user, iterates over neighbors, and does a BFS until n neighbors are reached """ if snowball: if user is None: raise ValueError("Must specify a starting user from whom to initiate the snowball") else: users, agenda = [user], [user] while len(agenda) > 0: parent = agenda.pop() dealphebetized_network = sorted(parent.network.items(), key=lambda k: random.random()) for neighbor in dealphebetized_network: if neighbor[1] not in users and neighbor[1] is not None and len(users) < n: users.append(neighbor[1]) if neighbor[1].network: agenda.push(neighbor[1]) else: files = [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))] shuffled_files = sorted(files, key=lambda k: random.random()) user_names = shuffled_files[:n] users = [bc.read_csv(u[:-4], directory) for u in user_names] if len(users) < n: raise ValueError("Specified more users than records that exist, only {} records available".format(len(users))) bc.to_csv([bc.utils.all(u) for u in users], filename)
Selects a few users and exports a CSV of indicators for them. TODO: Returns the network/graph between the selected users. Parameters ---------- n : int Number of users to select. filename : string File to export to. directory: string Directory to select users from if using the default random selection. snowball: starts from a specified user, iterates over neighbors, and does a BFS until n neighbors are reached
def _deleteCompletedMeasurement(self, measurementId): """ Deletes the named measurement from the completed measurement store if it exists. :param measurementId: :return: String: error messages Integer: count of measurements deleted """ message, count, deleted = self.deleteFrom(measurementId, self.completeMeasurements) if count is 0: message, count, deleted = self.deleteFrom(measurementId, self.failedMeasurements) return message, count, deleted
Deletes the named measurement from the completed measurement store if it exists. :param measurementId: :return: String: error messages Integer: count of measurements deleted
def _publish_metrics(self, prev_keys, key, data, publishfn=None): """Recursively publish keys""" if key not in data: return value = data[key] keys = prev_keys + [key] if not publishfn: publishfn = self.publish if isinstance(value, dict): for new_key in value: self._publish_metrics(keys, new_key, value) elif isinstance(value, int) or isinstance(value, float): publishfn('.'.join(keys), value) elif isinstance(value, long): publishfn('.'.join(keys), float(value))
Recursively publish keys
def all_groupings(partition): """Return all possible groupings of states for a particular coarse graining (partition) of a network. Args: partition (tuple[tuple]): A partition of micro-elements into macro elements. Yields: tuple[tuple[tuple]]: A grouping of micro-states into macro states of system. TODO: document exactly how to interpret the grouping. """ if not all(partition): raise ValueError('Each part of the partition must have at least one ' 'element.') micro_groupings = [_partitions_list(len(part) + 1) if len(part) > 1 else [[[0], [1]]] for part in partition] for grouping in itertools.product(*micro_groupings): if all(len(element) < 3 for element in grouping): yield tuple(tuple(tuple(tuple(state) for state in states) for states in grouping))
Return all possible groupings of states for a particular coarse graining (partition) of a network. Args: partition (tuple[tuple]): A partition of micro-elements into macro elements. Yields: tuple[tuple[tuple]]: A grouping of micro-states into macro states of system. TODO: document exactly how to interpret the grouping.
def to_dict(self, experiment): """Create a Json-like object for an experiment. Extends the basic object with subject, image group, and (optional) functional data identifiers. Parameters ---------- experiment : ExperimentHandle Returns ------- Json Object Json-like object, i.e., dictionary. """ # Get the basic Json object from the super class json_obj = super(DefaultExperimentManager, self).to_dict(experiment) # Add associated object references json_obj['subject'] = experiment.subject_id json_obj['images'] = experiment.image_group_id if not experiment.fmri_data_id is None: json_obj['fmri'] = experiment.fmri_data_id return json_obj
Create a Json-like object for an experiment. Extends the basic object with subject, image group, and (optional) functional data identifiers. Parameters ---------- experiment : ExperimentHandle Returns ------- Json Object Json-like object, i.e., dictionary.
def safe_dump(data, stream=None, **kwds): """implementation of safe dumper using Ordered Dict Yaml Dumper""" return yaml.dump(data, stream=stream, Dumper=ODYD, **kwds)
implementation of safe dumper using Ordered Dict Yaml Dumper
def set_logger(self, **fields): """Change the name of the logger that log.* should call Args: **fields: Extra fields to be logged. Logger name will be: ".".join([<module_name>, <cls_name>] + fields_sorted_on_key) """ names = [self.__module__, self.__class__.__name__] for field, value in sorted(fields.items()): names.append(value) # names should be something like this for one field: # ["malcolm.modules.scanning.controllers.runnablecontroller", # "RunnableController", "BL45P-ML-SCAN-01"] self.log = logging.getLogger(".".join(names)) if fields: self.log.addFilter(FieldFilter(fields)) return self.log
Change the name of the logger that log.* should call Args: **fields: Extra fields to be logged. Logger name will be: ".".join([<module_name>, <cls_name>] + fields_sorted_on_key)
def set_callbacks(self, worker_start_callback: callable, worker_end_callback: callable, are_async: bool = False): """ :param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread. """ # We are setting self.worker_start_callback and self.worker_end_callback # to lambdas instead of saving them in private vars and moving the lambda logic # to a member function for, among other reasons, making callback updates atomic, # ie. once a callback has been posted, it will be executed as it was in that # moment, any call to set_callbacks will only affect callbacks posted since they # were updated, but not to any pending callback. # If callback is async, execute the start callback in the calling thread scheduler = self.immediate if are_async else self.background self.worker_start_callback = lambda worker: scheduler(Work( lambda: worker_start_callback(worker), "worker_start_callback:" + worker.name )) # As the end callback is called *just* before the thread dies, # there is no problem running it on the thread self.worker_end_callback = lambda worker: self.immediate(Work( lambda: worker_end_callback(worker), "worker_end_callback:" + worker.name ))
:param are_async: True if the callbacks execute asynchronously, posting any heavy work to another thread.
def get_physical_port(self): """Returns the link aggregation object or the ethernet port object.""" obj = None if self.is_link_aggregation(): obj = UnityLinkAggregation.get(self._cli, self.get_id()) else: obj = UnityEthernetPort.get(self._cli, self.get_id()) return obj
Returns the link aggregation object or the ethernet port object.
def get_asset_notification_session(self, asset_receiver, proxy): """Gets the notification session for notifications pertaining to asset changes. arg: asset_receiver (osid.repository.AssetReceiver): the notification callback arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetNotificationSession) - an AssetNotificationSession raise: NullArgument - asset_receiver is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_notification() is false compliance: optional - This method must be implemented if supports_asset_notification() is true. """ if asset_receiver is None: raise NullArgument() if not self.supports_asset_notification(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.AssetNotificationSession(asset_receiver, proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
Gets the notification session for notifications pertaining to asset changes. arg: asset_receiver (osid.repository.AssetReceiver): the notification callback arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetNotificationSession) - an AssetNotificationSession raise: NullArgument - asset_receiver is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_notification() is false compliance: optional - This method must be implemented if supports_asset_notification() is true.
def add_resource( self, base_rule, base_view, alternate_view=None, alternate_rule=None, id_rule=None, app=None, ): """Add route or routes for a resource. :param str base_rule: The URL rule for the resource. This will be prefixed by the API prefix. :param base_view: Class-based view for the resource. :param alternate_view: If specified, an alternate class-based view for the resource. Usually, this will be a detail view, when the base view is a list view. :param alternate_rule: If specified, the URL rule for the alternate view. This will be prefixed by the API prefix. This is mutually exclusive with id_rule, and must not be specified if alternate_view is not specified. :type alternate_rule: str or None :param id_rule: If specified, a suffix to append to base_rule to get the alternate view URL rule. If alternate_view is specified, and alternate_rule is not, then this defaults to '<id>'. This is mutually exclusive with alternate_rule, and must not be specified if alternate_view is not specified. :type id_rule: str or None :param app: If specified, the application to which to add the route(s). Otherwise, this will be the bound application, if present. """ if alternate_view: if not alternate_rule: id_rule = id_rule or DEFAULT_ID_RULE alternate_rule = posixpath.join(base_rule, id_rule) else: assert id_rule is None else: assert alternate_rule is None assert id_rule is None app = self._get_app(app) endpoint = self._get_endpoint(base_view, alternate_view) # Store the view rules for reference. Doesn't support multiple routes # mapped to same view. views = app.extensions['resty'].views base_rule_full = '{}{}'.format(self.prefix, base_rule) base_view_func = base_view.as_view(endpoint) if not alternate_view: app.add_url_rule(base_rule_full, view_func=base_view_func) views[base_view] = Resource(base_view, base_rule_full) return alternate_rule_full = '{}{}'.format(self.prefix, alternate_rule) alternate_view_func = alternate_view.as_view(endpoint) @functools.wraps(base_view_func) def view_func(*args, **kwargs): if flask.request.url_rule.rule == base_rule_full: return base_view_func(*args, **kwargs) else: return alternate_view_func(*args, **kwargs) app.add_url_rule( base_rule_full, view_func=view_func, endpoint=endpoint, methods=base_view.methods, ) app.add_url_rule( alternate_rule_full, view_func=view_func, endpoint=endpoint, methods=alternate_view.methods, ) views[base_view] = Resource(base_view, base_rule_full) views[alternate_view] = Resource(alternate_view, alternate_rule_full)
Add route or routes for a resource. :param str base_rule: The URL rule for the resource. This will be prefixed by the API prefix. :param base_view: Class-based view for the resource. :param alternate_view: If specified, an alternate class-based view for the resource. Usually, this will be a detail view, when the base view is a list view. :param alternate_rule: If specified, the URL rule for the alternate view. This will be prefixed by the API prefix. This is mutually exclusive with id_rule, and must not be specified if alternate_view is not specified. :type alternate_rule: str or None :param id_rule: If specified, a suffix to append to base_rule to get the alternate view URL rule. If alternate_view is specified, and alternate_rule is not, then this defaults to '<id>'. This is mutually exclusive with alternate_rule, and must not be specified if alternate_view is not specified. :type id_rule: str or None :param app: If specified, the application to which to add the route(s). Otherwise, this will be the bound application, if present.
def _isVerbExpansible( verbObj, clauseTokens, clauseID ): ''' Kontrollib, kas tavaline verb on laiendatav etteantud osalauses: *) verbi kontekstis (osalauses) on veel teisi verbe; *) verb kuulub etteantud osalausesse; *) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi); *) tegemist pole maks|mas|mast|mata-verbiga; *) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas; Tagastab True, kui k6ik tingimused t2idetud; ''' global _verbInfNonExpansible # Leiame, kas fraas kuulub antud osalausesse ning on laiendatav if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \ re.match('^(verb)$', verbObj[PATTERN][-1], re.I): # Leiame viimasele s6nale vastava token'i lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]] if not lastToken: raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) )) lastToken = lastToken[0] # Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi) # NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada: # Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 . # Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ... if not _verbInfNonExpansible.matches(lastToken): # Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi: # kui on, siis esialgu targu seda fraasi laiendama ei hakka: if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&': return False return True # # TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused, # kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt. # # ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 . # return False
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses: *) verbi kontekstis (osalauses) on veel teisi verbe; *) verb kuulub etteantud osalausesse; *) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi); *) tegemist pole maks|mas|mast|mata-verbiga; *) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas; Tagastab True, kui k6ik tingimused t2idetud;
def _createConfig(self): """ Creates a config tree item (CTI) hierarchy containing default children. """ rootItem = MainGroupCti('debug inspector') if DEBUGGING: # Some test config items. import numpy as np from argos.config.untypedcti import UntypedCti from argos.config.stringcti import StringCti from argos.config.intcti import IntCti from argos.config.floatcti import FloatCti, SnFloatCti from argos.config.boolcti import BoolCti, BoolGroupCti from argos.config.choicecti import ChoiceCti from argos.config.qtctis import PenCti grpItem = GroupCti("group") rootItem.insertChild(grpItem) lcItem = UntypedCti('line color', 123) grpItem.insertChild(lcItem) disabledItem = rootItem.insertChild(StringCti('disabled', "Can't touch me")) disabledItem.enabled=False grpItem.insertChild(IntCti('line-1 color', 7, minValue = -5, stepSize=2, prefix="@", suffix="%", specialValueText="I'm special")) rootItem.insertChild(StringCti('letter', 'aa', maxLength = 1)) grpItem.insertChild(FloatCti('width', 2, minValue =5, stepSize=0.45, decimals=3, prefix="@", suffix="%", specialValueText="so very special")) grpItem.insertChild(SnFloatCti('scientific', defaultData=-np.inf)) gridItem = rootItem.insertChild(BoolGroupCti('grid', True)) gridItem.insertChild(BoolCti('X-Axis', True)) gridItem.insertChild(BoolCti('Y-Axis', False)) rootItem.insertChild(ChoiceCti('hobbit', 2, editable=True, configValues=['Frodo', 'Sam', 'Pippin', 'Merry'])) myPen = QtGui.QPen(QtGui.QColor('#1C8857')) myPen.setWidth(2) myPen.setStyle(Qt.DashDotDotLine) rootItem.insertChild(PenCti('line', False, resetTo=myPen)) return rootItem
Creates a config tree item (CTI) hierarchy containing default children.
def generate_tensor_filename(self, field_name, file_num, compressed=True): """ Generate a filename for a tensor. """ file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
Generate a filename for a tensor.
def estimate_chi2mixture(self, lrt): """ estimates the parameters of a mixture of a chi-squared random variable of degree 0 and a scaled chi-squared random variable of degree d (1-mixture)*chi2(0) + (mixture)*scale*chi2(dof), where scale is the scaling parameter for the scales chi-square distribution dof are the degrees of freedom of the second component mixture is the probability of beeing in the first component input: lrt [Ntests] vector of test statistics """ """ step 1: estimate the probability of being in component one """ self.mixture = 1-(lrt<=self.tol).mean() n_false = SP.sum(lrt>self.tol) """ step 2: only use the largest qmax fraction of test statistics to estimate the remaining parameters """ n_fitting = SP.ceil(self.qmax * n_false) lrt_sorted = -SP.sort(-lrt)[:n_fitting] q = SP.linspace(0, 1,n_false)[1:n_fitting+1] log_q = SP.log10(q) """ step 3: fitting scale and dof by minimizing the squared error of the log10 p-values with their theorietical values [uniform distribution] """ MSE_opt = SP.inf MSE = SP.zeros((self.n_intervals,self.n_intervals)) for i,scale in enumerate(SP.linspace(self.scale_min,self.scale_max,self.n_intervals)): for j,dof in enumerate(SP.linspace(self.dof_min,self.dof_max,self.n_intervals)): p = STATS.chi2.sf(lrt_sorted/scale,dof) log_p = SP.log10(p) MSE[i,j] = SP.mean((log_q - log_p)**2) if MSE[i,j] < MSE_opt: MSE_opt = MSE[i,j] self.scale = scale self.dof = dof
estimates the parameters of a mixture of a chi-squared random variable of degree 0 and a scaled chi-squared random variable of degree d (1-mixture)*chi2(0) + (mixture)*scale*chi2(dof), where scale is the scaling parameter for the scales chi-square distribution dof are the degrees of freedom of the second component mixture is the probability of beeing in the first component input: lrt [Ntests] vector of test statistics
def begin(self): """ This method will implement the handshake of the Bitcoin protocol. It will send the Version message, and block until it receives a VerAck. Once we receive the version, we'll send the verack, and begin downloading. """ log.debug("handshake (version %s)" % PROTOCOL_VERSION) version = Version() version.services = 0 # can't send blocks log.debug("send Version") self.send_message(version)
This method will implement the handshake of the Bitcoin protocol. It will send the Version message, and block until it receives a VerAck. Once we receive the version, we'll send the verack, and begin downloading.
def load_block_type(self, block_type): """ Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`. """ return XBlock.load_class(block_type, self.default_class, self.select)
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.