code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp)
Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5
def spkopa(filename): """ Open an existing SPK file for subsequent write. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html :param filename: The name of an existing SPK file. :type filename: str :return: A handle attached to the SPK file opened to append. :rtype: int """ filename = stypes.stringToCharP(filename) handle = ctypes.c_int() libspice.spkopa_c(filename, ctypes.byref(handle)) return handle.value
Open an existing SPK file for subsequent write. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html :param filename: The name of an existing SPK file. :type filename: str :return: A handle attached to the SPK file opened to append. :rtype: int
def __execute_scale(self, surface, size_to_scale_from): """Execute the scaling operation""" x = size_to_scale_from[0] * self.__scale[0] y = size_to_scale_from[1] * self.__scale[1] scaled_value = (int(x), int(y)) ## #Find out what scaling technique we should use. ## if self.image.get_bitsize >= 24: ## #We have sufficient bit depth to run smooth scale ## self.image = pygame.transform.smoothscale(self.image, scaled_value) ## else: ##Surface doesn't support smooth scale, revert to regular scale self.image = pygame.transform.scale(self.image, scaled_value) self.__resize_surface_extents()
Execute the scaling operation
def run_tasks(cls): """Internal task-runner class method, called by :py:func:`sisy.consumers.run_heartbeat`""" now = timezone.now() tasks = cls.objects.filter(enabled=True) for task in tasks: if task.next_run == HAS_NOT_RUN: task.calc_next_run() if task.next_run < now: if (task.start_running < now): if (task.end_running > now): task.run_asap() else: task.enabled = False task.save() Channel(KILL_TASK_CHANNEL).send({'id': task.pk})
Internal task-runner class method, called by :py:func:`sisy.consumers.run_heartbeat`
def transform_streams_for_comparison(outputs): """Makes failure output for streams better by having key be the stream name""" new_outputs = [] for output in outputs: if (output.output_type == 'stream'): # Transform output new_outputs.append({ 'output_type': 'stream', output.name: output.text, }) else: new_outputs.append(output) return new_outputs
Makes failure output for streams better by having key be the stream name
def return_secondary_learner(self): """Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.secondary_learner_hyperparameters) return estimator
Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object
def ltrimboth (l,proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l """ lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut]
Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l
def get_host_cache(service_instance=None): ''' Returns the host cache configuration on the proxy host. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.get_host_cache ''' # Default to getting all disks if no filtering is done ret_dict = {} host_ref = _get_proxy_target(service_instance) hostname = __proxy__['esxi.get_details']()['esxi_host'] hci = salt.utils.vmware.get_host_cache(host_ref) if not hci: log.debug('Host cache not configured on host \'%s\'', hostname) ret_dict['enabled'] = False return ret_dict # TODO Support multiple host cache info objects (on multiple datastores) return {'enabled': True, 'datastore': {'name': hci.key.name}, 'swap_size': '{}MiB'.format(hci.swapSize)}
Returns the host cache configuration on the proxy host. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.get_host_cache
def get_html_text_editor( name, id=None, content='', textual_content=None, width='300px', height='200px', enabled=True, file_upload_url=None, toolbar_set="Basic", custom_configurations_path='/js/ckeditor/invenio-ckeditor-config.js', ln=None): """ Returns a wysiwyg editor (CKEditor) to embed in html pages. Fall back to a simple textarea when the library is not installed, or when the user's browser is not compatible with the editor, or when 'enable' is False, or when javascript is not enabled. NOTE that the output also contains a hidden field named 'editor_type' that contains the kind of editor used, 'textarea' or 'ckeditor'. Based on 'editor_type' you might want to take different actions, like replace CRLF with <br/> when editor_type equals to 'textarea', but not when editor_type equals to 'ckeditor'. @param name: *str* the name attribute of the returned editor @param id: *str* the id attribute of the returned editor (when applicable) @param content: *str* the default content of the editor. @param textual_content: *str* a content formatted for the case where the wysiwyg editor is not available for user. When not specified, use value of 'content' @param width: *str* width of the editor in an html compatible unit: Eg: '400px', '50%'. @param height: *str* height of the editor in an html compatible unit: Eg: '400px', '50%'. @param enabled: *bool* if the wysiwyg editor is return (True) or if a simple texteara is returned (False) @param file_upload_url: *str* the URL used to upload new files via the editor upload panel. You have to implement the handler for your own use. The URL handler will get form variables 'File' as POST for the uploaded file, and 'Type' as GET for the type of file ('file', 'image', 'flash', 'media') When value is not given, the file upload is disabled. @param toolbar_set: *str* the name of the toolbar layout to use. CKeditor comes by default with 'Basic' and 'Default'. To define other sets, customize the config file in /opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js @param custom_configurations_path: *str* value for the CKeditor config variable 'CustomConfigurationsPath', which allows to specify the path of a file that contains a custom configuration for the editor. The path is relative to /opt/invenio/var/www/ @return: the HTML markup of the editor """ ln = default_ln(ln) if textual_content is None: textual_content = content editor = '' if enabled and ckeditor_available: # Prepare upload path settings file_upload_script = '' if file_upload_url is not None: file_upload_script = ''', filebrowserLinkUploadUrl: '%(file_upload_url)s', filebrowserImageUploadUrl: '%(file_upload_url)s?type=Image', filebrowserFlashUploadUrl: '%(file_upload_url)s?type=Flash' ''' % {'file_upload_url': file_upload_url} # Prepare code to instantiate an editor editor += ''' <script type="text/javascript" language="javascript">//<![CDATA[ /* Load the script only once, or else multiple instance of the editor on the same page will not work */ var INVENIO_CKEDITOR_ALREADY_LOADED if (INVENIO_CKEDITOR_ALREADY_LOADED != 1) { document.write('<script type="text/javascript" src="%(CFG_SITE_URL)s/vendors/ckeditor/ckeditor.js"><\/script>'); INVENIO_CKEDITOR_ALREADY_LOADED = 1; } //]]></script> <input type="hidden" name="editor_type" id="%(id)seditortype" value="textarea" /> <textarea rows="100" cols="80" id="%(id)s" name="%(name)s" style="width:%(width)s;height:%(height)s">%(textual_content)s</textarea> <textarea rows="100" cols="80" id="%(id)shtmlvalue" name="%(name)shtmlvalue" style="display:none;width:%(width)s;height:%(height)s">%(html_content)s</textarea> <script type="text/javascript">//<![CDATA[ var CKEDITOR_BASEPATH = '/ckeditor/'; CKEDITOR.replace( '%(name)s', {customConfig: '%(custom_configurations_path)s', toolbar: '%(toolbar)s', width: '%(width)s', height:'%(height)s', language: '%(ln)s' %(file_upload_script)s }); CKEDITOR.on('instanceReady', function( evt ) { /* If CKeditor was correctly loaded, display the nice HTML representation */ var oEditor = evt.editor; editor_id = oEditor.id editor_name = oEditor.name var html_editor = document.getElementById(editor_name + 'htmlvalue'); oEditor.setData(html_editor.value); var editor_type_field = document.getElementById(editor_name + 'editortype'); editor_type_field.value = 'ckeditor'; var writer = oEditor.dataProcessor.writer; writer.indentationChars = ''; /*Do not indent source code with tabs*/ oEditor.resetDirty(); /* Workaround: http://dev.ckeditor.com/ticket/3674 */ evt.editor.on( 'contentDom', function( ev ) { ev.removeListener(); evt.editor.resetDirty(); } ); /* End workaround */ }) //]]></script> ''' % \ {'textual_content': cgi.escape(textual_content), 'html_content': content, 'width': width, 'height': height, 'name': name, 'id': id or name, 'custom_configurations_path': custom_configurations_path, 'toolbar': toolbar_set, 'file_upload_script': file_upload_script, 'CFG_SITE_URL': cfg['CFG_SITE_URL'], 'ln': ln} else: # CKedior is not installed textarea = '<textarea rows="100" cols="80" %(id)s name="%(name)s" style="width:%(width)s;height:%(height)s">%(content)s</textarea>' \ % {'content': cgi.escape(textual_content), 'width': width, 'height': height, 'name': name, 'id': id and ('id="%s"' % id) or ''} editor += textarea editor += '<input type="hidden" name="editor_type" value="textarea" />' return editor
Returns a wysiwyg editor (CKEditor) to embed in html pages. Fall back to a simple textarea when the library is not installed, or when the user's browser is not compatible with the editor, or when 'enable' is False, or when javascript is not enabled. NOTE that the output also contains a hidden field named 'editor_type' that contains the kind of editor used, 'textarea' or 'ckeditor'. Based on 'editor_type' you might want to take different actions, like replace CRLF with <br/> when editor_type equals to 'textarea', but not when editor_type equals to 'ckeditor'. @param name: *str* the name attribute of the returned editor @param id: *str* the id attribute of the returned editor (when applicable) @param content: *str* the default content of the editor. @param textual_content: *str* a content formatted for the case where the wysiwyg editor is not available for user. When not specified, use value of 'content' @param width: *str* width of the editor in an html compatible unit: Eg: '400px', '50%'. @param height: *str* height of the editor in an html compatible unit: Eg: '400px', '50%'. @param enabled: *bool* if the wysiwyg editor is return (True) or if a simple texteara is returned (False) @param file_upload_url: *str* the URL used to upload new files via the editor upload panel. You have to implement the handler for your own use. The URL handler will get form variables 'File' as POST for the uploaded file, and 'Type' as GET for the type of file ('file', 'image', 'flash', 'media') When value is not given, the file upload is disabled. @param toolbar_set: *str* the name of the toolbar layout to use. CKeditor comes by default with 'Basic' and 'Default'. To define other sets, customize the config file in /opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js @param custom_configurations_path: *str* value for the CKeditor config variable 'CustomConfigurationsPath', which allows to specify the path of a file that contains a custom configuration for the editor. The path is relative to /opt/invenio/var/www/ @return: the HTML markup of the editor
def switch_toggle(context, ain): """Toggle an actor's power state""" context.obj.login() actor = context.obj.get_actor_by_ain(ain) if actor: if actor.get_state(): actor.switch_off() click.echo("State for {} is now OFF".format(ain)) else: actor.switch_on() click.echo("State for {} is now ON".format(ain)) else: click.echo("Actor not found: {}".format(ain))
Toggle an actor's power state
def _remove_event_source(awsclient, evt_source, lambda_arn): """ Given an event_source dictionary, create the object and remove the event source. """ event_source_obj = _get_event_source_obj(awsclient, evt_source) if event_source_obj.exists(lambda_arn): event_source_obj.remove(lambda_arn)
Given an event_source dictionary, create the object and remove the event source.
def systemd( state, host, name, running=True, restarted=False, reloaded=False, command=None, enabled=None, daemon_reload=False, ): ''' Manage the state of systemd managed services. + name: name of the service to manage + running: whether the service should be running + restarted: whether the service should be restarted + reloaded: whether the service should be reloaded + command: custom command to pass like: ``/etc/rc.d/<name> <command>`` + enabled: whether this service should be enabled/disabled on boot + daemon_reload: reload the systemd daemon to read updated unit files ''' if daemon_reload: yield 'systemctl daemon-reload' yield _handle_service_control( name, host.fact.systemd_status, 'systemctl {1} {0}.service', running, restarted, reloaded, command, ) if isinstance(enabled, bool): is_enabled = host.fact.systemd_enabled.get(name, False) # Isn't enabled and want enabled? if not is_enabled and enabled is True: yield 'systemctl enable {0}.service'.format(name) # Is enabled and want disabled? elif is_enabled and enabled is False: yield 'systemctl disable {0}.service'.format(name)
Manage the state of systemd managed services. + name: name of the service to manage + running: whether the service should be running + restarted: whether the service should be restarted + reloaded: whether the service should be reloaded + command: custom command to pass like: ``/etc/rc.d/<name> <command>`` + enabled: whether this service should be enabled/disabled on boot + daemon_reload: reload the systemd daemon to read updated unit files
def validateDocumentFinal(self, ctxt): """Does the final step for the document validation once all the incremental validation steps have been completed basically it does the following checks described by the XML Rec Check all the IDREF/IDREFS attributes definition for validity """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o) return ret
Does the final step for the document validation once all the incremental validation steps have been completed basically it does the following checks described by the XML Rec Check all the IDREF/IDREFS attributes definition for validity
def get_serializer(self, instance=None, data=None, many=False, partial=False): """ Return the serializer instance that should be used for validating and deserializing input, and for serializing output. """ serializers = { 'node': NodeRequestListSerializer, 'vote': VoteRequestListSerializer, 'comment': CommentRequestListSerializer, 'rate': RatingRequestListSerializer, } context = self.get_serializer_context() service_code = context['request'].query_params.get('service_code', 'node') if service_code not in serializers.keys(): serializer_class = self.get_serializer_class() else: serializer_class = serializers[service_code] return serializer_class(instance, many=many, partial=partial, context=context)
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
def raise_(type_, value=None, traceback=None): # pylint: disable=W0613 """ Does the same as ordinary ``raise`` with arguments do in Python 2. But works in Python 3 (>= 3.3) also! Please checkout README on https://github.com/9seconds/pep3134 to get an idea about possible pitfals. But short story is: please be pretty carefull with tracebacks. If it is possible, use sys.exc_info instead. But in most cases it will work as you expect. """ if type_.__traceback__ is not traceback: raise type_.with_traceback(traceback) raise type_
Does the same as ordinary ``raise`` with arguments do in Python 2. But works in Python 3 (>= 3.3) also! Please checkout README on https://github.com/9seconds/pep3134 to get an idea about possible pitfals. But short story is: please be pretty carefull with tracebacks. If it is possible, use sys.exc_info instead. But in most cases it will work as you expect.
def FDMT(data, f_min, f_max, maxDT, dataType): """ This function implements the FDMT algorithm. Input: Input visibility array (nints, nbl, nchan, npol) f_min,f_max are the base-band begin and end frequencies. The frequencies should be entered in MHz maxDT - the maximal delay (in time bins) of the maximal dispersion. Appears in the paper as N_{\Delta} A typical input is maxDT = N_f dataType - a valid numpy dtype. reccomended: either int32, or int64. Output: The dispersion measure transform of the Input matrix. The output dimensions are [Input.shape[1],maxDT] For details, see algorithm 1 in Zackay & Ofek (2014) """ nint, nbl, nchan, npol = data.shape niters = int(np.log2(nchan)) assert nchan in 2**np.arange(30) and nint in 2**np.arange(30), "Input dimensions must be a power of 2" logger.info('Input data dimensions: {0}'.format(data.shape)) data = FDMT_initialization(data, f_min, f_max, maxDT, dataType) logger.info('Iterating {0} times to calculate to maxDT of {1}'.format(niters, maxDT)) for i_t in range(1, niters+1): data = FDMT_iteration(data, maxDT, nchan, f_min, f_max, i_t, dataType) [nint, dT, nbl, nchan, npol] = data.shape assert nchan == 1, 'Channel axis should have length 1 after all FDMT iterations.' # put dT axis first and remove chan axis return np.rollaxis(data[:,:,:,0,:], 1)
This function implements the FDMT algorithm. Input: Input visibility array (nints, nbl, nchan, npol) f_min,f_max are the base-band begin and end frequencies. The frequencies should be entered in MHz maxDT - the maximal delay (in time bins) of the maximal dispersion. Appears in the paper as N_{\Delta} A typical input is maxDT = N_f dataType - a valid numpy dtype. reccomended: either int32, or int64. Output: The dispersion measure transform of the Input matrix. The output dimensions are [Input.shape[1],maxDT] For details, see algorithm 1 in Zackay & Ofek (2014)
def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]: "Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest." split_params = [] for l in layer_groups: l1,l2 = [],[] for c in l.children(): if isinstance(c, no_wd_types): l2 += list(trainable_params(c)) elif isinstance(c, bias_types): bias = c.bias if hasattr(c, 'bias') else None l1 += [p for p in trainable_params(c) if not (p is bias)] if bias is not None: l2.append(bias) else: l1 += list(trainable_params(c)) #Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order #for the optimizer load of state_dict l1,l2 = uniqueify(l1),uniqueify(l2) split_params += [l1, l2] return split_params
Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest.
async def ensure_process(self): """ Start the process """ # We don't want multiple requests trying to start the process at the same time # FIXME: Make sure this times out properly? # Invariant here should be: when lock isn't being held, either 'proc' is in state & # running, or not. with (await self.state['proc_lock']): if 'proc' not in self.state: # FIXME: Prevent races here # FIXME: Handle graceful exits of spawned processes here cmd = self.get_cmd() server_env = os.environ.copy() # Set up extra environment variables for process server_env.update(self.get_env()) timeout = self.get_timeout() proc = SupervisedProcess(self.name, *cmd, env=server_env, ready_func=self._http_ready_func, ready_timeout=timeout, log=self.log) self.state['proc'] = proc try: await proc.start() is_ready = await proc.ready() if not is_ready: await proc.kill() raise web.HTTPError(500, 'could not start {} in time'.format(self.name)) except: # Make sure we remove proc from state in any error condition del self.state['proc'] raise
Start the process
def load_auth_from_file(filename): """Initializes the auth settings for accessing MyAnimelist through its official API from a given filename. :param filename The name of the file containing your MyAnimeList credentials REQUIREMENTS: The file must... ...username for your MAL account. ...password for your MAL account. ...Have both your username and password ...separated by newline(s) or space(s). :return A tuple containing your credentials. """ with open(filename) as auth_file: lines = auth_file.read().splitlines() lines = [line.strip() for line in lines if len(line) != 0] if len(lines) == 2: credentials = (lines[0], lines[1]) elif len(lines) == 1: user_pass = lines[0].split() credentials = (user_pass[0], user_pass[1]) elif len(lines) == 0 or len(lines) > 2: raise ValueError(constants.INVALID_AUTH_FILE) if helpers.verif_auth(credentials, header): return credentials else: raise ValueError(constants.INVALID_CREDENTIALS)
Initializes the auth settings for accessing MyAnimelist through its official API from a given filename. :param filename The name of the file containing your MyAnimeList credentials REQUIREMENTS: The file must... ...username for your MAL account. ...password for your MAL account. ...Have both your username and password ...separated by newline(s) or space(s). :return A tuple containing your credentials.
def setFixedHeight(self, height): """ Sets the fixed height for this item to the inputed height amount. :param height | <int> """ super(XViewPanelItem, self).setFixedHeight(height) self._dragLabel.setFixedHeight(height) self._titleLabel.setFixedHeight(height) self._searchButton.setFixedHeight(height) self._closeButton.setFixedHeight(height)
Sets the fixed height for this item to the inputed height amount. :param height | <int>
def rpc_call(self, request, method=None, params=None, **kwargs): """ Call a RPC method. return object: a result """ args = [] kwargs = dict() if isinstance(params, dict): kwargs.update(params) else: args = list(as_tuple(params)) method_key = "{0}.{1}".format(self.scheme_name, method) if method_key not in self.methods: raise AssertionError("Unknown method: {0}".format(method)) method = self.methods[method_key] if hasattr(method, 'request'): args.insert(0, request) return method(*args, **kwargs)
Call a RPC method. return object: a result
def play(self, call_params): """REST Play something on a Call Helper """ path = '/' + self.api_version + '/Play/' method = 'POST' return self.request(path, method, call_params)
REST Play something on a Call Helper
def sub_article_folders(self): """ Returns all valid ArticleFolder sitting inside of :attr:`ArticleFolder.dir_path`. """ l = list() for p in Path.sort_by_fname( Path(self.dir_path).select_dir(recursive=False) ): af = ArticleFolder(dir_path=p.abspath) try: if af.title is not None: l.append(af) except: pass return l
Returns all valid ArticleFolder sitting inside of :attr:`ArticleFolder.dir_path`.
def importance(self, attribute, examples): """ AIMA implies that importance should be information gain. Since AIMA only defines it for binary features this implementation was based on the wikipedia article: http://en.wikipedia.org/wiki/Information_gain_in_decision_trees """ gain_counter = OnlineInformationGain(attribute, self.target) for example in examples: gain_counter.add(example) return gain_counter.get_gain()
AIMA implies that importance should be information gain. Since AIMA only defines it for binary features this implementation was based on the wikipedia article: http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
def DEFAULT_RENAMER(L, Names=None): """ Renames overlapping column names of numpy ndarrays with structured dtypes Rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. Default renamer function used by :func:`tabular.spreadsheet.join` **Parameters** **L** : list or dictionary Numpy recarrays with columns to be renamed. **Returns** **D** : dictionary of dictionaries Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above. """ if isinstance(L,dict): Names = L.keys() LL = L.values() else: if Names == None: Names = range(len(L)) else: assert len(Names) == len(L) LL = L commons = Commons([l.dtype.names for l in LL]) D = {} for (i,l) in zip(Names, LL): d = {} for c in commons: if c in l.dtype.names: d[c] = c + '_' + str(i) if d: D[i] = d return D
Renames overlapping column names of numpy ndarrays with structured dtypes Rename the columns by using a simple convention: * If `L` is a list, it will append the number in the list to the key associated with the array. * If `L` is a dictionary, the algorithm will append the string representation of the key associated with an array to the overlapping columns from that array. Default renamer function used by :func:`tabular.spreadsheet.join` **Parameters** **L** : list or dictionary Numpy recarrays with columns to be renamed. **Returns** **D** : dictionary of dictionaries Dictionary mapping each input numpy recarray to a dictionary mapping each original column name to its new name following the convention above.
def request_issuance(self, csr): """ Request a certificate. Authorizations should have already been completed for all of the names requested in the CSR. Note that unlike `acme.client.Client.request_issuance`, the certificate resource will have the body data as raw bytes. .. seealso:: `txacme.util.csr_for_names` .. todo:: Delayed issuance is not currently supported, the server must issue the requested certificate immediately. :param csr: A certificate request message: normally `txacme.messages.CertificateRequest` or `acme.messages.CertificateRequest`. :rtype: Deferred[`acme.messages.CertificateResource`] :return: The issued certificate. """ action = LOG_ACME_REQUEST_CERTIFICATE() with action.context(): return ( DeferredContext( self._client.post( self.directory[csr], csr, content_type=DER_CONTENT_TYPE, headers=Headers({b'Accept': [DER_CONTENT_TYPE]}))) .addCallback(self._expect_response, http.CREATED) .addCallback(self._parse_certificate) .addActionFinish())
Request a certificate. Authorizations should have already been completed for all of the names requested in the CSR. Note that unlike `acme.client.Client.request_issuance`, the certificate resource will have the body data as raw bytes. .. seealso:: `txacme.util.csr_for_names` .. todo:: Delayed issuance is not currently supported, the server must issue the requested certificate immediately. :param csr: A certificate request message: normally `txacme.messages.CertificateRequest` or `acme.messages.CertificateRequest`. :rtype: Deferred[`acme.messages.CertificateResource`] :return: The issued certificate.
def build(self, tag, **kwargs): """ Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging. """ self.push_log("Building image '{0}'.".format(tag)) set_raise_on_error(kwargs) try: return super(DockerFabricClient, self).build(tag, **kwargs) except DockerStatusError as e: error(e.message)
Identical to :meth:`dockermap.client.base.DockerClientWrapper.build` with additional logging.
def print_math(math_expression_lst, name = "math.html", out='html', formatter = lambda x: x): """ Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook. """ try: shutil.rmtree('viz') except: pass pth = get_cur_path()+print_math_template_path shutil.copytree(pth, 'viz') # clean_str = formatter(math_expression_lst) html_loc = None if out == "html": html_loc = pth+"standalone_index.html" if out == "notebook": from IPython.display import display, HTML html_loc = pth+"notebook_index.html" html = open(html_loc).read() html = html.replace("__MATH_LIST__", json.dumps(math_expression_lst)) if out == "notebook": display(HTML(html)) elif out == "html": with open(name, "w+") as out_f: out_f.write(html)
Converts LaTeX math expressions into an html layout. Creates a html file in the directory where print_math is called by default. Displays math to jupyter notebook if "notebook" argument is specified. Args: math_expression_lst (list): A list of LaTeX math (string) to be rendered by KaTeX out (string): {"html"|"notebook"}: HTML by default. Specifies output medium. formatter (function): function that cleans up the string for KaTeX. Returns: A HTML file in the directory where this function is called, or displays HTML output in a notebook.
def generateXY(self, **kwargs): """ Generate source catalog from input image using DAOFIND-style algorithm """ #x,y,flux,sharp,round = idlphot.find(array,self.pars['hmin'],self.pars['fwhm'], # roundlim=self.pars['roundlim'], sharplim=self.pars['sharplim']) print(" # Source finding for '{}', EXT={} started at: {}" .format(self.fnamenoext, self.wcs.extname, util._ptime()[0])) if self.pars['computesig']: # compute sigma for this image sigma = self._compute_sigma() else: sigma = self.pars['skysigma'] skymode = sigma**2 log.info(' Finding sources using sky sigma = %f'%sigma) if self.pars['threshold'] in [None,"INDEF",""," "]: hmin = skymode else: hmin = sigma*self.pars['threshold'] if 'mask' in kwargs and kwargs['mask'] is not None: dqmask = np.asarray(kwargs['mask'], dtype=bool) else: dqmask = None # get the mask for source finding: mask = self._combine_exclude_mask(dqmask) x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind( self.source, hmin, self.pars['conv_width'], skymode, sharplim=[self.pars['sharplo'],self.pars['sharphi']], roundlim=[self.pars['roundlo'],self.pars['roundhi']], peakmin=self.pars['peakmin'], peakmax=self.pars['peakmax'], fluxmin=self.pars['fluxmin'], fluxmax=self.pars['fluxmax'], nsigma=self.pars['nsigma'], ratio=self.pars['ratio'], theta=self.pars['theta'], mask=mask, use_sharp_round=self.use_sharp_round, nbright=self.nbright ) if len(x) == 0: if not self.pars['computesig']: sigma = self._compute_sigma() hmin = sigma * self.pars['threshold'] log.info('No sources found with original thresholds. Trying automatic settings.') x, y, flux, src_id, sharp, round1, round2 = tweakutils.ndfind( self.source, hmin, self.pars['conv_width'], skymode, sharplim=[self.pars['sharplo'],self.pars['sharphi']], roundlim=[self.pars['roundlo'],self.pars['roundhi']], peakmin=self.pars['peakmin'], peakmax=self.pars['peakmax'], fluxmin=self.pars['fluxmin'], fluxmax=self.pars['fluxmax'], nsigma=self.pars['nsigma'], ratio=self.pars['ratio'], theta=self.pars['theta'], mask = mask, use_sharp_round = self.use_sharp_round, nbright=self.nbright ) if len(x) == 0: xypostypes = 3*[float]+[int]+(3 if self.use_sharp_round else 0)*[float] self.xypos = [np.empty(0, dtype=i) for i in xypostypes] warnstr = textutil.textbox('WARNING: \n'+ 'No valid sources found with the current parameter values!') for line in warnstr.split('\n'): log.warning(line) print(warnstr) else: # convert the positions from numpy 0-based to FITS 1-based if self.use_sharp_round: self.xypos = [x+1, y+1, flux, src_id+self.start_id, sharp, round1, round2] else: self.xypos = [x+1, y+1, flux, src_id+self.start_id] log.info('###Source finding finished at: %s'%(util._ptime()[0])) self.in_units = 'pixels' # Not strictly necessary, but documents units when determined self.sharp = sharp self.round1 = round1 self.round2 = round2 self.numcols = 7 if self.use_sharp_round else 4 self.num_objects = len(x) self._apply_flux_limits = False
Generate source catalog from input image using DAOFIND-style algorithm
def to_line_string(self, closed=True): """ Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string. """ from imgaug.augmentables.lines import LineString if not closed or len(self.exterior) <= 1: return LineString(self.exterior, label=self.label) return LineString( np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0), label=self.label)
Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string.
def open(safe_file): """Return a SentinelDataSet object.""" if os.path.isdir(safe_file) or os.path.isfile(safe_file): return SentinelDataSet(safe_file) else: raise IOError("file not found: %s" % safe_file)
Return a SentinelDataSet object.
def quaternion_from_euler(angles, order='yzy'): """Generate a quaternion from a set of Euler angles. Args: angles (array_like): Array of Euler angles. order (str): Order of Euler rotations. 'yzy' is default. Returns: Quaternion: Quaternion representation of Euler rotation. """ angles = np.asarray(angles, dtype=float) quat = quaternion_from_axis_rotation(angles[0], order[0])\ * (quaternion_from_axis_rotation(angles[1], order[1]) * quaternion_from_axis_rotation(angles[2], order[2])) quat.normalize(inplace=True) return quat
Generate a quaternion from a set of Euler angles. Args: angles (array_like): Array of Euler angles. order (str): Order of Euler rotations. 'yzy' is default. Returns: Quaternion: Quaternion representation of Euler rotation.
def _extract_from_url(self, url): """Try to extract from the article URL - simple but might work as a fallback""" # Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py m = re.search(re_pub_date, url) if m: return self.parse_date_str(m.group(0)) return None
Try to extract from the article URL - simple but might work as a fallback
def checkout(self, ref, cb=None): """Checkout a bundle from the remote. Returns a file-like object""" if self.is_api: return self._checkout_api(ref, cb=cb) else: return self._checkout_fs(ref, cb=cb)
Checkout a bundle from the remote. Returns a file-like object
def fromrandom(shape=(10, 50, 50), npartitions=1, seed=42, engine=None): """ Generate random image data. Parameters ---------- shape : tuple, optional, default=(10, 50, 50) Dimensions of images. npartitions : int, optional, default=1 Number of partitions. seed : int, optional, default=42 Random seed. """ seed = hash(seed) def generate(v): random.seed(seed + v) return random.randn(*shape[1:]) return fromlist(range(shape[0]), accessor=generate, npartitions=npartitions, engine=engine)
Generate random image data. Parameters ---------- shape : tuple, optional, default=(10, 50, 50) Dimensions of images. npartitions : int, optional, default=1 Number of partitions. seed : int, optional, default=42 Random seed.
def draw(self, scr): 'Draw entire screen onto the `scr` curses object.' numHeaderRows = 1 scr.erase() # clear screen before every re-draw vd().refresh() if not self.columns: return color_current_row = CursesAttr(colors.color_current_row, 5) disp_column_sep = options.disp_column_sep rowattrs = {} # [rowidx] -> attr colattrs = {} # [colidx] -> attr isNull = isNullFunc() self.rowLayout = {} self.calcColLayout() vcolidx = 0 rows = list(self.rows[self.topRowIndex:self.topRowIndex+self.nVisibleRows]) for vcolidx, colinfo in sorted(self.visibleColLayout.items()): x, colwidth = colinfo col = self.visibleCols[vcolidx] if x < self.vd.windowWidth: # only draw inside window headerRow = 0 self.drawColHeader(scr, headerRow, vcolidx) y = headerRow + numHeaderRows for rowidx in range(0, min(len(rows), self.nVisibleRows)): dispRowIdx = self.topRowIndex + rowidx if dispRowIdx >= self.nRows: break self.rowLayout[dispRowIdx] = y row = rows[rowidx] cellval = col.getCell(row, colwidth-1) try: if isNull(cellval.value): cellval.note = options.disp_note_none cellval.notecolor = 'color_note_type' except TypeError: pass attr = self.colorize(col, row, cellval) # sepattr is the attr between cell/columns rowattr = rowattrs.get(rowidx) if rowattr is None: rowattr = rowattrs[rowidx] = self.colorize(None, row) sepattr = rowattr # must apply current row here, because this colorization requires cursorRowIndex if dispRowIdx == self.cursorRowIndex: attr = attr.update_attr(color_current_row) sepattr = sepattr.update_attr(color_current_row) note = getattr(cellval, 'note', None) if note: noteattr = attr.update_attr(colors.get_color(cellval.notecolor), 10) clipdraw(scr, y, x+colwidth-len(note), note, noteattr.attr, len(note)) clipdraw(scr, y, x, disp_column_fill+cellval.display, attr.attr, colwidth-(1 if note else 0)) vd.onMouse(scr, y, x, 1, colwidth, BUTTON3_RELEASED='edit-cell') sepchars = disp_column_sep if (self.keyCols and col is self.keyCols[-1]) or vcolidx == self.rightVisibleColIndex: sepchars = options.disp_keycol_sep if x+colwidth+len(sepchars) <= self.vd.windowWidth: scr.addstr(y, x+colwidth, sepchars, sepattr.attr) y += 1 if vcolidx+1 < self.nVisibleCols: scr.addstr(headerRow, self.vd.windowWidth-2, options.disp_more_right, colors.color_column_sep) catchapply(self.checkCursor)
Draw entire screen onto the `scr` curses object.
def hashify_files(files: list) -> dict: """Return mapping from file path to file hash.""" return {filepath.replace('\\', '/'): hash_tree(filepath) for filepath in listify(files)}
Return mapping from file path to file hash.
def all_referenced_targets(self, result): """Returns all targets referenced by this subvariant, either directly or indirectly, and either as sources, or as dependency properties. Targets referred with dependency property are returned a properties, not targets.""" if __debug__: from .property import Property assert is_iterable_typed(result, (VirtualTarget, Property)) # Find directly referenced targets. deps = self.build_properties().dependency() all_targets = self.sources_ + deps # Find other subvariants. r = [] for e in all_targets: if not e in result: result.add(e) if isinstance(e, property.Property): t = e.value else: t = e # FIXME: how can this be? cs = t.creating_subvariant() if cs: r.append(cs) r = unique(r) for s in r: if s != self: s.all_referenced_targets(result)
Returns all targets referenced by this subvariant, either directly or indirectly, and either as sources, or as dependency properties. Targets referred with dependency property are returned a properties, not targets.
def makevAndvPfuncs(self,policyFunc): ''' Constructs the marginal value function for this period. Parameters ---------- policyFunc : function Consumption and medical care function for this period, defined over market resources, permanent income level, and the medical need shock. Returns ------- vFunc : function Value function for this period, defined over market resources and permanent income. vPfunc : function Marginal value (of market resources) function for this period, defined over market resources and permanent income. ''' # Get state dimension sizes mCount = self.aXtraGrid.size pCount = self.pLvlGrid.size MedCount = self.MedShkVals.size # Make temporary grids to evaluate the consumption function temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount)) aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)), (mCount,1,MedCount)) pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount)) mGrid = temp_grid*pGrid + aMinGrid if self.pLvlGrid[0] == 0: mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount)) MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1)) probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1)) # Get optimal consumption (and medical care) for each state cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid) # Calculate expected value by "integrating" across medical shocks if self.vFuncBool: MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely) aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid) vNow = np.sum(vGrid*probsGrid,axis=2) # Calculate expected marginal value by "integrating" across medical shocks vPgrid = self.uP(cGrid) vPnow = np.sum(vPgrid*probsGrid,axis=2) # Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0) mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0])) vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow))) if self.vFuncBool: vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0) vNvrsPnow = vPnow*self.uinvP(vNow) vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0) # Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl vPnvrsFunc_by_pLvl = [] vNvrsFunc_by_pLvl = [] for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl pLvl = self.pLvlGrid[j] m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl) vPnvrs_temp = vPnvrsNow[:,j] vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp)) if self.vFuncBool: vNvrs_temp = vNvrsNow[:,j] vNvrsP_temp = vNvrsPnow[:,j] vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp)) vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid) vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl if self.vFuncBool: vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid) vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl # "Re-curve" the (marginal) value function vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA) if self.vFuncBool: vFunc = ValueFunc2D(vNvrsFunc,self.CRRA) else: vFunc = NullFunc() return vFunc, vPfunc
Constructs the marginal value function for this period. Parameters ---------- policyFunc : function Consumption and medical care function for this period, defined over market resources, permanent income level, and the medical need shock. Returns ------- vFunc : function Value function for this period, defined over market resources and permanent income. vPfunc : function Marginal value (of market resources) function for this period, defined over market resources and permanent income.
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, fft=False): """Compute the (cross) statistical inefficiency of (two) timeseries. Parameters ---------- A_n : np.ndarray, float A_n[n] is nth value of timeseries A. Length is deduced from vector. B_n : np.ndarray, float, optional, default=None B_n[n] is nth value of timeseries B. Length is deduced from vector. If supplied, the cross-correlation of timeseries A and B will be estimated instead of the autocorrelation of timeseries A. fast : bool, optional, default=False f True, will use faster (but less accurate) method to estimate correlation time, described in Ref. [1] (default: False). This is ignored when B_n=None and fft=True. mintime : int, optional, default=3 minimum amount of correlation function to compute (default: 3) The algorithm terminates after computing the correlation time out to mintime when the correlation function first goes negative. Note that this time may need to be increased if there is a strong initial negative peak in the correlation function. fft : bool, optional, default=False If fft=True and B_n=None, then use the fft based approach, as implemented in statisticalInefficiency_fft(). Returns ------- g : np.ndarray, g is the estimated statistical inefficiency (equal to 1 + 2 tau, where tau is the correlation time). We enforce g >= 1.0. Notes ----- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency. The fast method described in Ref [1] is used to compute g. References ---------- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted histogram analysis method for the analysis of simulated and parallel tempering simulations. JCTC 3(1):26-41, 2007. Examples -------- Compute statistical inefficiency of timeseries data with known correlation time. >>> from pymbar.testsystems import correlated_timeseries_example >>> A_n = correlated_timeseries_example(N=100000, tau=5.0) >>> g = statisticalInefficiency(A_n, fast=True) """ # Create numpy copies of input arguments. A_n = np.array(A_n) if fft and B_n is None: return statisticalInefficiency_fft(A_n, mintime=mintime) if B_n is not None: B_n = np.array(B_n) else: B_n = np.array(A_n) # Get the length of the timeseries. N = A_n.size # Be sure A_n and B_n have the same dimensions. if(A_n.shape != B_n.shape): raise ParameterError('A_n and B_n must have same dimensions.') # Initialize statistical inefficiency estimate with uncorrelated value. g = 1.0 # Compute mean of each timeseries. mu_A = A_n.mean() mu_B = B_n.mean() # Make temporary copies of fluctuation from mean. dA_n = A_n.astype(np.float64) - mu_A dB_n = B_n.astype(np.float64) - mu_B # Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1. sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1 # Trap the case where this covariance is zero, and we cannot proceed. if(sigma2_AB == 0): raise ParameterError('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency') # Accumulate the integrated correlation time by computing the normalized correlation time at # increasing values of t. Stop accumulating if the correlation function goes negative, since # this is unlikely to occur unless the correlation function has decayed to the point where it # is dominated by noise and indistinguishable from zero. t = 1 increment = 1 while (t < N - 1): # compute normalized fluctuation correlation function at time t C = np.sum(dA_n[0:(N - t)] * dB_n[t:N] + dB_n[0:(N - t)] * dA_n[t:N]) / (2.0 * float(N - t) * sigma2_AB) # Terminate if the correlation function has crossed zero and we've computed the correlation # function at least out to 'mintime'. if (C <= 0.0) and (t > mintime): break # Accumulate contribution to the statistical inefficiency. g += 2.0 * C * (1.0 - float(t) / float(N)) * float(increment) # Increment t and the amount by which we increment t. t += increment # Increase the interval if "fast mode" is on. if fast: increment += 1 # g must be at least unity if (g < 1.0): g = 1.0 # Return the computed statistical inefficiency. return g
Compute the (cross) statistical inefficiency of (two) timeseries. Parameters ---------- A_n : np.ndarray, float A_n[n] is nth value of timeseries A. Length is deduced from vector. B_n : np.ndarray, float, optional, default=None B_n[n] is nth value of timeseries B. Length is deduced from vector. If supplied, the cross-correlation of timeseries A and B will be estimated instead of the autocorrelation of timeseries A. fast : bool, optional, default=False f True, will use faster (but less accurate) method to estimate correlation time, described in Ref. [1] (default: False). This is ignored when B_n=None and fft=True. mintime : int, optional, default=3 minimum amount of correlation function to compute (default: 3) The algorithm terminates after computing the correlation time out to mintime when the correlation function first goes negative. Note that this time may need to be increased if there is a strong initial negative peak in the correlation function. fft : bool, optional, default=False If fft=True and B_n=None, then use the fft based approach, as implemented in statisticalInefficiency_fft(). Returns ------- g : np.ndarray, g is the estimated statistical inefficiency (equal to 1 + 2 tau, where tau is the correlation time). We enforce g >= 1.0. Notes ----- The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency. The fast method described in Ref [1] is used to compute g. References ---------- [1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted histogram analysis method for the analysis of simulated and parallel tempering simulations. JCTC 3(1):26-41, 2007. Examples -------- Compute statistical inefficiency of timeseries data with known correlation time. >>> from pymbar.testsystems import correlated_timeseries_example >>> A_n = correlated_timeseries_example(N=100000, tau=5.0) >>> g = statisticalInefficiency(A_n, fast=True)
def _get_ctypes(self): """ Returns all related objects for this model. """ ctypes = [] for related_object in self.model._meta.get_all_related_objects(): model = getattr(related_object, 'related_model', related_object.model) ctypes.append(ContentType.objects.get_for_model(model).pk) if model.__subclasses__(): for child in model.__subclasses__(): ctypes.append(ContentType.objects.get_for_model(child).pk) return ctypes
Returns all related objects for this model.
def VarintReader(buf, pos=0): """A 64 bit decoder from google.protobuf.internal.decoder.""" result = 0 shift = 0 while 1: b = buf[pos] result |= (ORD_MAP_AND_0X7F[b] << shift) pos += 1 if not ORD_MAP_AND_0X80[b]: return (result, pos) shift += 7 if shift >= 64: raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
A 64 bit decoder from google.protobuf.internal.decoder.
def read(self, fp): "Reads a dictionary from an input stream." base_size = struct.unpack(str("=I"), fp.read(4))[0] self._units.fromfile(fp, base_size)
Reads a dictionary from an input stream.
def pancake_sort(arr): """ Pancake_sort Sorting a given array mutation of selection sort reference: https://www.geeksforgeeks.org/pancake-sorting/ Overall time complexity : O(N^2) """ len_arr = len(arr) if len_arr <= 1: return arr for cur in range(len(arr), 1, -1): #Finding index of maximum number in arr index_max = arr.index(max(arr[0:cur])) if index_max+1 != cur: #Needs moving if index_max != 0: #reverse from 0 to index_max arr[:index_max+1] = reversed(arr[:index_max+1]) # Reverse list arr[:cur] = reversed(arr[:cur]) return arr
Pancake_sort Sorting a given array mutation of selection sort reference: https://www.geeksforgeeks.org/pancake-sorting/ Overall time complexity : O(N^2)
def iterate_pubmed_identifiers(graph) -> Iterable[str]: """Iterate over all PubMed identifiers in a graph. :param pybel.BELGraph graph: A BEL graph :return: An iterator over the PubMed identifiers in the graph """ return ( data[CITATION][CITATION_REFERENCE].strip() for _, _, data in graph.edges(data=True) if has_pubmed(data) )
Iterate over all PubMed identifiers in a graph. :param pybel.BELGraph graph: A BEL graph :return: An iterator over the PubMed identifiers in the graph
def runcode(code): """Run the given code line by line with printing, as list of lines, and return variable 'ans'.""" for line in code: print('# '+line) exec(line,globals()) print('# return ans') return ans
Run the given code line by line with printing, as list of lines, and return variable 'ans'.
def generic_pst(par_names=["par1"],obs_names=["obs1"],addreg=False): """generate a generic pst instance. This can used to later fill in the Pst parts programatically. Parameters ---------- par_names : (list) parameter names to setup obs_names : (list) observation names to setup Returns ------- new_pst : pyemu.Pst """ if not isinstance(par_names,list): par_names = list(par_names) if not isinstance(obs_names,list): obs_names = list(obs_names) new_pst = pyemu.Pst("pest.pst",load=False) pargp_data = populate_dataframe(["pargp"], new_pst.pargp_fieldnames, new_pst.pargp_defaults, new_pst.pargp_dtype) new_pst.parameter_groups = pargp_data par_data = populate_dataframe(par_names,new_pst.par_fieldnames, new_pst.par_defaults,new_pst.par_dtype) par_data.loc[:,"parnme"] = par_names par_data.index = par_names par_data.sort_index(inplace=True) new_pst.parameter_data = par_data obs_data = populate_dataframe(obs_names,new_pst.obs_fieldnames, new_pst.obs_defaults,new_pst.obs_dtype) obs_data.loc[:,"obsnme"] = obs_names obs_data.index = obs_names obs_data.sort_index(inplace=True) new_pst.observation_data = obs_data new_pst.template_files = ["file.tpl"] new_pst.input_files = ["file.in"] new_pst.instruction_files = ["file.ins"] new_pst.output_files = ["file.out"] new_pst.model_command = ["model.bat"] new_pst.prior_information = new_pst.null_prior #new_pst.other_lines = ["* singular value decomposition\n","1\n", # "{0:d} {1:15.6E}\n".format(new_pst.npar_adj,1.0E-6), # "1 1 1\n"] if addreg: new_pst.zero_order_tikhonov() return new_pst
generate a generic pst instance. This can used to later fill in the Pst parts programatically. Parameters ---------- par_names : (list) parameter names to setup obs_names : (list) observation names to setup Returns ------- new_pst : pyemu.Pst
def main(argv=None): """Main command line interface.""" if argv is None: argv = sys.argv[1:] cli = CommandLineTool() return cli.run(argv)
Main command line interface.
def get_record(self, path=None, no_pdf=False, test=False, refextract_callback=None): """Convert a record to MARCXML format. :param path: path to a record. :type path: string :param test: flag to determine if it is a test call. :type test: bool :param refextract_callback: callback to be used to extract unstructured references. It should return a marcxml formated string of the reference. :type refextract_callback: callable :returns: marcxml formated string. """ xml_doc = self.get_article(path) rec = create_record() title = self.get_title(xml_doc) if title: record_add_field(rec, '245', subfields=[('a', title)]) (journal, dummy, volume, issue, first_page, last_page, year, start_date, doi) = self.get_publication_information(xml_doc, path) if not journal: journal = self.get_article_journal(xml_doc) if start_date: record_add_field(rec, '260', subfields=[('c', start_date), ('t', 'published')]) else: record_add_field( rec, '260', subfields=[('c', time.strftime('%Y-%m-%d'))]) if doi: record_add_field(rec, '024', ind1='7', subfields=[('a', doi), ('2', 'DOI')]) license, license_url = self.get_license(xml_doc) if license and license_url: record_add_field(rec, '540', subfields=[('a', license), ('u', license_url)]) elif license_url: record_add_field(rec, '540', subfields=[('u', license_url)]) self.logger.info("Creating record: %s %s" % (path, doi)) authors = self.get_authors(xml_doc) first_author = True for author in authors: author_name = (author['surname'], author.get( 'given_name') or author.get('initials')) subfields = [('a', '%s, %s' % author_name)] if 'orcid' in author: subfields.append(('j', author['orcid'])) if 'affiliation' in author: for aff in author["affiliation"]: subfields.append(('v', aff)) if self.extract_nations: add_nations_field(subfields) if author.get('email'): subfields.append(('m', author['email'])) if first_author: record_add_field(rec, '100', subfields=subfields) first_author = False else: record_add_field(rec, '700', subfields=subfields) abstract = self.get_abstract(xml_doc) if abstract: record_add_field(rec, '520', subfields=[('a', abstract), ('9', 'Elsevier')]) record_copyright = self.get_copyright(xml_doc) if record_copyright: record_add_field(rec, '542', subfields=[('f', record_copyright)]) keywords = self.get_keywords(xml_doc) if self.CONSYN: for tag in xml_doc.getElementsByTagName('ce:collaboration'): collaboration = get_value_in_tag(tag, 'ce:text') if collaboration: record_add_field(rec, '710', subfields=[('g', collaboration)]) # We add subjects also as author keywords subjects = xml_doc.getElementsByTagName('dct:subject') for subject in subjects: for listitem in subject.getElementsByTagName('rdf:li'): keyword = xml_to_text(listitem) if keyword not in keywords: keywords.append(keyword) for keyword in keywords: record_add_field(rec, '653', ind1='1', subfields=[('a', keyword), ('9', 'author')]) journal, dummy = fix_journal_name(journal.strip(), self.journal_mappings) subfields = [] doctype = self.get_doctype(xml_doc) try: page_count = int(last_page) - int(first_page) + 1 record_add_field(rec, '300', subfields=[('a', str(page_count))]) except ValueError: # do nothing pass if doctype == 'err': subfields.append(('m', 'Erratum')) elif doctype == 'add': subfields.append(('m', 'Addendum')) elif doctype == 'pub': subfields.append(('m', 'Publisher Note')) elif doctype == 'rev': record_add_field(rec, '980', subfields=[('a', 'Review')]) if journal: subfields.append(('p', journal)) if first_page and last_page: subfields.append(('c', '%s-%s' % (first_page, last_page))) elif first_page: subfields.append(('c', first_page)) if volume: subfields.append(('v', volume)) if year: subfields.append(('y', year)) record_add_field(rec, '773', subfields=subfields) if not test: if license: url = 'http://www.sciencedirect.com/science/article/pii/'\ + path.split('/')[-1][:-4] record_add_field(rec, '856', ind1='4', subfields=[('u', url), ('y', 'Elsevier server')]) record_add_field(rec, 'FFT', subfields=[('a', path), ('t', 'INSPIRE-PUBLIC'), ('d', 'Fulltext')]) else: record_add_field(rec, 'FFT', subfields=[('a', path), ('t', 'Elsevier'), ('o', 'HIDDEN')]) record_add_field(rec, '980', subfields=[('a', 'HEP')]) record_add_field(rec, '980', subfields=[('a', 'Citeable')]) record_add_field(rec, '980', subfields=[('a', 'Published')]) self._add_references(xml_doc, rec, refextract_callback) else: licence = 'http://creativecommons.org/licenses/by/3.0/' record_add_field(rec, '540', subfields=[('a', 'CC-BY-3.0'), ('u', licence)]) if keywords: for keyword in keywords: record_add_field( rec, '653', ind1='1', subfields=[('a', keyword), ('9', 'author')]) pages = '' if first_page and last_page: pages = '{0}-{1}'.format(first_page, last_page) elif first_page: pages = first_page subfields = filter(lambda x: x[1] and x[1] != '-', [('p', journal), ('v', volume), ('n', issue), ('c', pages), ('y', year)]) record_add_field(rec, '773', subfields=subfields) if not no_pdf: from invenio.search_engine import perform_request_search query = '0247_a:"%s" AND NOT 980:DELETED"' % (doi,) prev_version = perform_request_search(p=query) old_pdf = False if prev_version: from invenio.bibdocfile import BibRecDocs prev_rec = BibRecDocs(prev_version[0]) try: pdf_path = prev_rec.get_bibdoc('main') pdf_path = pdf_path.get_file( ".pdf;pdfa", exact_docformat=True) pdf_path = pdf_path.fullpath old_pdf = True record_add_field(rec, 'FFT', subfields=[('a', pdf_path), ('n', 'main'), ('f', '.pdf;pdfa')]) message = ('Leaving previously delivered PDF/A for: ' + doi) self.logger.info(message) except: pass try: if exists(join(path, 'main_a-2b.pdf')): pdf_path = join(path, 'main_a-2b.pdf') record_add_field(rec, 'FFT', subfields=[('a', pdf_path), ('n', 'main'), ('f', '.pdf;pdfa')]) self.logger.debug('Adding PDF/A to record: %s' % (doi,)) elif exists(join(path, 'main.pdf')): pdf_path = join(path, 'main.pdf') record_add_field(rec, 'FFT', subfields=[('a', pdf_path)]) else: if not old_pdf: message = "Record " + doi message += " doesn't contain PDF file." self.logger.warning(message) raise MissingFFTError(message) except MissingFFTError: message = "Elsevier paper: %s is missing PDF." % (doi,) register_exception(alert_admin=True, prefix=message) version = self.get_elsevier_version(find_package_name(path)) record_add_field(rec, '583', subfields=[('l', version)]) xml_path = join(path, 'main.xml') record_add_field(rec, 'FFT', subfields=[('a', xml_path)]) record_add_field(rec, '980', subfields=[('a', 'SCOAP3'), ('b', 'Elsevier')]) try: return record_xml_output(rec) except UnicodeDecodeError: message = "Found a bad char in the file for the article " + doi sys.stderr.write(message) return ""
Convert a record to MARCXML format. :param path: path to a record. :type path: string :param test: flag to determine if it is a test call. :type test: bool :param refextract_callback: callback to be used to extract unstructured references. It should return a marcxml formated string of the reference. :type refextract_callback: callable :returns: marcxml formated string.
def separate(self): '''Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances. ''' collections = [] start = None end = None for index in self.indexes: if start is None: start = index end = start continue if index != (end + 1): collections.append( Collection(self.head, self.tail, self.padding, indexes=set(range(start, end + 1))) ) start = index end = index if start is None: collections.append( Collection(self.head, self.tail, self.padding) ) else: collections.append( Collection(self.head, self.tail, self.padding, indexes=range(start, end + 1)) ) return collections
Return contiguous parts of collection as separate collections. Return as list of :py:class:`~clique.collection.Collection` instances.
def cover(ctx, html=False): '''Run tests suite with coverage''' params = '--cov-report term --cov-report html' if html else '' with ctx.cd(ROOT): ctx.run('pytest --cov flask_fs {0}'.format(params), pty=True)
Run tests suite with coverage
def get_fernet(): """ Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet """ global _fernet log = LoggingMixin().log if _fernet: return _fernet try: from cryptography.fernet import Fernet, MultiFernet, InvalidToken global InvalidFernetToken InvalidFernetToken = InvalidToken except BuiltinImportError: log.warning( "cryptography not found - values will not be stored encrypted." ) _fernet = NullFernet() return _fernet try: fernet_key = configuration.conf.get('core', 'FERNET_KEY') if not fernet_key: log.warning( "empty cryptography key - values will not be stored encrypted." ) _fernet = NullFernet() else: _fernet = MultiFernet([ Fernet(fernet_part.encode('utf-8')) for fernet_part in fernet_key.split(',') ]) _fernet.is_encrypted = True except (ValueError, TypeError) as ve: raise AirflowException("Could not create Fernet object: {}".format(ve)) return _fernet
Deferred load of Fernet key. This function could fail either because Cryptography is not installed or because the Fernet key is invalid. :return: Fernet object :raises: airflow.exceptions.AirflowException if there's a problem trying to load Fernet
def draw_linecollection(data, obj): """Returns Pgfplots code for a number of patch objects. """ content = [] edgecolors = obj.get_edgecolors() linestyles = obj.get_linestyles() linewidths = obj.get_linewidths() paths = obj.get_paths() for i, path in enumerate(paths): color = edgecolors[i] if i < len(edgecolors) else edgecolors[0] style = linestyles[i] if i < len(linestyles) else linestyles[0] width = linewidths[i] if i < len(linewidths) else linewidths[0] data, options = mypath.get_draw_options(data, obj, color, None, style, width) # TODO what about masks? data, cont, _, _ = mypath.draw_path( data, path, draw_options=options, simplify=False ) content.append(cont + "\n") return data, content
Returns Pgfplots code for a number of patch objects.
async def start(self): """Start serving access to devices over bluetooth.""" self._command_task.start() try: await self._cleanup_old_connections() except Exception: await self.stop() raise #FIXME: This is a temporary hack, get the actual device we are serving. iotile_id = next(iter(self.adapter.devices)) self.device = self.adapter.devices[iotile_id] self._logger.info("Serving device 0x%04X over BLED112", iotile_id) await self._update_advertisement() self.setup_client(self.CLIENT_ID, scan=False, broadcast=True)
Start serving access to devices over bluetooth.
def satisfies(self, other): # type: (Term) -> bool """ Returns whether this term satisfies another. """ return ( self.dependency.name == other.dependency.name and self.relation(other) == SetRelation.SUBSET )
Returns whether this term satisfies another.
def configure_mongodb(self): """ Configure MongoDB """ self._display_info("Trying default configuration") host = "localhost" database_name = "INGInious" should_ask = True if self.try_mongodb_opts(host, database_name): should_ask = self._ask_boolean( "Successfully connected to MongoDB. Do you want to edit the configuration anyway?", False) else: self._display_info("Cannot guess configuration for MongoDB.") while should_ask: self._display_question( "Please enter the MongoDB host. If you need to enter a password, here is the syntax:") self._display_question("mongodb://USERNAME:PASSWORD@HOST:PORT/AUTHENTIFICATION_DATABASE") host = self._ask_with_default("MongoDB host", host) database_name = self._ask_with_default("Database name", database_name) if not self.try_mongodb_opts(host, database_name): if self._ask_boolean("Cannot connect to MongoDB. Would you like to continue anyway?", False): break else: self._display_info("Successfully connected to MongoDB") break return {"mongo_opt": {"host": host, "database": database_name}}
Configure MongoDB
def dist(self): """Return the `Distribution` selected for Zinc based on execution strategy. :rtype: pants.java.distribution.distribution.Distribution """ underlying_dist = self.underlying_dist if self._execution_strategy != NailgunTaskBase.HERMETIC: # symlink .pants.d/.jdk -> /some/java/home/ jdk_home_symlink = os.path.relpath( os.path.join(self._zinc_factory.get_options().pants_workdir, '.jdk'), get_buildroot()) # Since this code can be run in multi-threading mode due to multiple # zinc workers, we need to make sure the file operations below is atomic. with self._lock: # Create the symlink if it does not exist if not os.path.exists(jdk_home_symlink): os.symlink(underlying_dist.home, jdk_home_symlink) # Recreate if the symlink exists but does not match `underlying_dist.home`. elif os.readlink(jdk_home_symlink) != underlying_dist.home: os.remove(jdk_home_symlink) os.symlink(underlying_dist.home, jdk_home_symlink) return Distribution(home_path=jdk_home_symlink) else: return underlying_dist
Return the `Distribution` selected for Zinc based on execution strategy. :rtype: pants.java.distribution.distribution.Distribution
def imgmin(self): """ Lowest value of input image. """ if not hasattr(self, '_imgmin'): imgmin = _np.min(self.images[0]) for img in self.images: imin = _np.min(img) if imin > imgmin: imgmin = imin self._imgmin = imgmin return _np.min(self.image)
Lowest value of input image.
def get_path(self): """Gets the path to the focused statistics. Each step is a hash of statistics object. """ path = deque() __, node = self.get_focus() while not node.is_root(): stats = node.get_value() path.appendleft(hash(stats)) node = node.get_parent() return path
Gets the path to the focused statistics. Each step is a hash of statistics object.
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full, energy_rate, time_to_empty, time_to_full, is_present, icon_name, warning_level): '''Convenience method to configure DisplayDevice properties This calls Set() for all properties that the DisplayDevice is defined to have, and is shorter if you have to completely set it up instead of changing just one or two properties. This is only available when mocking the 1.0 API. ''' if not self.api1: raise dbus.exceptions.DBusException( 'SetupDisplayDevice() can only be used with the 1.0 API', name=MOCK_IFACE + '.APIVersion') display_props = mockobject.objects[self.p_display_dev] display_props.Set(DEVICE_IFACE, 'Type', dbus.UInt32(type)) display_props.Set(DEVICE_IFACE, 'State', dbus.UInt32(state)) display_props.Set(DEVICE_IFACE, 'Percentage', percentage) display_props.Set(DEVICE_IFACE, 'Energy', energy) display_props.Set(DEVICE_IFACE, 'EnergyFull', energy_full) display_props.Set(DEVICE_IFACE, 'EnergyRate', energy_rate) display_props.Set(DEVICE_IFACE, 'TimeToEmpty', dbus.Int64(time_to_empty)) display_props.Set(DEVICE_IFACE, 'TimeToFull', dbus.Int64(time_to_full)) display_props.Set(DEVICE_IFACE, 'IsPresent', is_present) display_props.Set(DEVICE_IFACE, 'IconName', icon_name) display_props.Set(DEVICE_IFACE, 'WarningLevel', dbus.UInt32(warning_level))
Convenience method to configure DisplayDevice properties This calls Set() for all properties that the DisplayDevice is defined to have, and is shorter if you have to completely set it up instead of changing just one or two properties. This is only available when mocking the 1.0 API.
def save_user(self, idvalue, options=None): """ save user by a given id http://getstarted.sailthru.com/api/user """ options = options or {} data = options.copy() data['id'] = idvalue return self.api_post('user', data)
save user by a given id http://getstarted.sailthru.com/api/user
def configure_threecolor_image(self): """ configures the three color image according to the requested parameters :return: nothing, just updates self.image """ order = {'red': 0, 'green': 1, 'blue': 2} self.image = np.zeros((self.shape[0], self.shape[1], 3)) for color, var in self.multicolorvars.items(): channel = var.get() # determine which channel should be plotted as this color self.image[:, :, order[color]] = self.data[channel] # scale the image by the power self.image[:, :, order[color]] = np.power(self.image[:, :, order[color]], self.multicolorpower[color].get()) # adjust the percentile thresholds lower = np.nanpercentile(self.image[:, :, order[color]], self.multicolormin[color].get()) upper = np.nanpercentile(self.image[:, :, order[color]], self.multicolormax[color].get()) self.image[np.where(self.image[:, :, order[color]] < lower)] = lower self.image[np.where(self.image[:, :, order[color]] > upper)] = upper # image values must be between (0,1) so scale image for color, index in order.items(): self.image[:, :, index] /= np.nanmax(self.image[:, :, index])
configures the three color image according to the requested parameters :return: nothing, just updates self.image
def add(self, data, conn_type, squash=True): """ Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. If `squash` is False the data is prepared and added as a child to this tree without further logic. Args: conn_type (str, optional ["AND", "OR"]): connection method """ if data in self.children: return data if not squash: self.children.append(data) return data if self.connector == conn_type: # We can reuse self.children to append or squash the node other. if (isinstance(data, QBase) and not data.negated and (data.connector == conn_type or len(data) == 1)): # We can squash the other node's children directly into this # node. We are just doing (AB)(CD) == (ABCD) here, with the # addition that if the length of the other node is 1 the # connector doesn't matter. However, for the len(self) == 1 # case we don't want to do the squashing, as it would alter # self.connector. self.children.extend(data.children) return self else: # We could use perhaps additional logic here to see if some # children could be used for pushdown here. self.children.append(data) return data else: obj = self._new_instance(self.children, self.connector, self.negated) self.connector = conn_type self.children = [obj, data] return data
Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. If `squash` is False the data is prepared and added as a child to this tree without further logic. Args: conn_type (str, optional ["AND", "OR"]): connection method
def get_all_dhcp_options(self, dhcp_options_ids=None): """ Retrieve information about your DhcpOptions. :type dhcp_options_ids: list :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's :rtype: list :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions` """ params = {} if dhcp_options_ids: self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') return self.get_list('DescribeDhcpOptions', params, [('item', DhcpOptions)])
Retrieve information about your DhcpOptions. :type dhcp_options_ids: list :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's :rtype: list :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
def adopt(self, grab): """ Copy the state of another `Grab` instance. Use case: create backup of current state to the cloned instance and then restore the state from it. """ self.load_config(grab.config) self.doc = grab.doc.copy(new_grab=self) for key in self.clonable_attributes: setattr(self, key, getattr(grab, key)) self.cookies = deepcopy(grab.cookies)
Copy the state of another `Grab` instance. Use case: create backup of current state to the cloned instance and then restore the state from it.
def dropdb(self, name): ''' Deletes an **entire database** (i.e. a table), losing all data. ''' if self.readonly: raise s_exc.IsReadOnly() while True: try: if not self.dbexists(name): return db = self.initdb(name) self.dirty = True self.xact.drop(db.db, delete=True) self.forcecommit() return except lmdb.MapFullError: self._handle_mapfull()
Deletes an **entire database** (i.e. a table), losing all data.
def assign(self, node): """ Translate an assign node into SQLQuery. :param node: a treebrd node :return: a SQLQuery object for the tree rooted at node """ child_object = self.translate(node.child) child_object.prefix = 'CREATE TEMPORARY TABLE {name}({attributes}) AS '\ .format(name=node.name, attributes=', '.join(node.attributes.names)) return child_object
Translate an assign node into SQLQuery. :param node: a treebrd node :return: a SQLQuery object for the tree rooted at node
def refresh(self): """ Updates this drive with data from the server :return: Success / Failure :rtype: bool """ if self.object_id is None: url = self.build_url(self._endpoints.get('default_drive')) else: url = self.build_url( self._endpoints.get('get_drive').format(id=self.object_id)) response = self.con.get(url) if not response: return False drive = response.json() self._update_data({self._cloud_data_key: drive}) return True
Updates this drive with data from the server :return: Success / Failure :rtype: bool
def do_EOF(self, args): """Exit on system end of file character""" if _debug: ConsoleCmd._debug("do_EOF %r", args) return self.do_exit(args)
Exit on system end of file character
def is_same_as(self, other_databox, headers=True, columns=True, header_order=True, column_order=True, ckeys=True): """ Tests that the important (i.e. savable) information in this databox is the same as that of the other_databox. Parameters ---------- other_databox Databox with which to compare. headers=True Make sure all header elements match. columns=True Make sure every element of every column matches. header_order=True Whether the order of the header elements must match. column_order=True Whether the order of the columns must match. This is only a sensible concern if ckeys=True. ckeys=True Whether the actual ckeys matter, or just the ordered columns of data. Note the == symbol runs this function with everything True. """ d = other_databox if not hasattr(other_databox, '_is_spinmob_databox'): return False # Proceed by testing things one at a time, returning false if one fails if headers: # Same number of elements if not len(self.hkeys) == len(d.hkeys): return False # Elements if header_order and not self.hkeys == d.hkeys: return False # Each value for k in self.hkeys: # Make sure the key exists if not k in d.hkeys: return False # Make sure it's the same. if not self.h(k) == d.h(k): return False if columns: # Same number of columns if not len(self.ckeys) == len(d.ckeys): return False # If we're checking columns by ckeys if ckeys: # Columns if column_order and not self.ckeys == d.ckeys: return False # Each value of each array for k in self.ckeys: # Make sure the key exists if not k in d.ckeys: return False # Check the values if not (_n.array(self[k]) == _n.array(d[k])).all(): return False # Otherwise we're ignoring ckeys else: for n in range(len(self.ckeys)): if not (_n.array(self[n]) == _n.array(d[n])).all(): return False # Passes all tests return True
Tests that the important (i.e. savable) information in this databox is the same as that of the other_databox. Parameters ---------- other_databox Databox with which to compare. headers=True Make sure all header elements match. columns=True Make sure every element of every column matches. header_order=True Whether the order of the header elements must match. column_order=True Whether the order of the columns must match. This is only a sensible concern if ckeys=True. ckeys=True Whether the actual ckeys matter, or just the ordered columns of data. Note the == symbol runs this function with everything True.
def is_allowed(func): """Check user password, when is correct, then run decorated function. :returns: decorated function """ @wraps(func) def _is_allowed(user, *args, **kwargs): password = kwargs.pop('password', None) if user.check_password(password): return func(user, *args, **kwargs) else: raise NotAllowedError() # add password parameter to function signature sig = inspect.signature(func) parms = list(sig.parameters.values()) parms.append(inspect.Parameter('password', inspect.Parameter.KEYWORD_ONLY, default=None)) _is_allowed.__signature__ = sig.replace(parameters=parms) return _is_allowed
Check user password, when is correct, then run decorated function. :returns: decorated function
def song(self): """ :class:`Song` object of next song to play """ song = self._connection.request( 'autoplayGetSong', {'weightModifierRange': [-9, 9], 'seedArtists': dict([(artist, 'p') for artist in self._artists]), 'tagID': self._radio, 'recentArtists': self._recent_artists, 'songQueueID': self._connection.session.queue, 'secondaryArtistWeightModifier': 0.75, 'country': self._connection.session.country, 'seedArtistWeightRange': [110, 130], 'songIDsAlreadySeen': self._songs_already_seen, 'maxDuration': 1500, 'minDuration': 60, 'frowns': []}, self._connection.header('autoplayGetSong', 'jsqueue'))[1] return Song( song['SongID'], song['SongName'], song['ArtistID'], song['ArtistName'], song['AlbumID'], song['AlbumName'], song['CoverArtUrl'], None, song['EstimateDuration'], None, self._connection)
:class:`Song` object of next song to play
def _predict(self, features): """Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes. """ from sklearn.exceptions import NotFittedError try: prediction = self.kernel.predict_classes(features)[:, 0] except NotFittedError: raise NotFittedError( "{} is not fitted yet. Call 'fit' with appropriate " "arguments before using this method.".format( type(self).__name__ ) ) return prediction
Predict matches and non-matches. Parameters ---------- features : numpy.ndarray The data to predict the class of. Returns ------- numpy.ndarray The predicted classes.
def to_categorical(y, nb_classes, num_classes=None): """ Converts a class vector (integers) to binary class matrix. This is adapted from the Keras function with the same name. :param y: class vector to be converted into a matrix (integers from 0 to nb_classes). :param nb_classes: nb_classes: total number of classes. :param num_classses: depricated version of nb_classes :return: A binary matrix representation of the input. """ if num_classes is not None: if nb_classes is not None: raise ValueError("Should not specify both nb_classes and its deprecated " "alias, num_classes") warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`." " `num_classes` may be removed on or after 2019-04-23.") nb_classes = num_classes del num_classes y = np.array(y, dtype='int').ravel() n = y.shape[0] categorical = np.zeros((n, nb_classes)) categorical[np.arange(n), y] = 1 return categorical
Converts a class vector (integers) to binary class matrix. This is adapted from the Keras function with the same name. :param y: class vector to be converted into a matrix (integers from 0 to nb_classes). :param nb_classes: nb_classes: total number of classes. :param num_classses: depricated version of nb_classes :return: A binary matrix representation of the input.
def register_pubkey(self): """ XXX Check that the pubkey received is in the group. """ p = pkcs_os2ip(self.dh_p) g = pkcs_os2ip(self.dh_g) pn = dh.DHParameterNumbers(p, g) y = pkcs_os2ip(self.dh_Ys) public_numbers = dh.DHPublicNumbers(y, pn) s = self.tls_session s.server_kx_pubkey = public_numbers.public_key(default_backend()) if not s.client_kx_ffdh_params: s.client_kx_ffdh_params = pn.parameters(default_backend())
XXX Check that the pubkey received is in the group.
def url_read_text(url, verbose=True): r""" Directly reads text data from url """ data = url_read(url, verbose) text = data.decode('utf8') return text
r""" Directly reads text data from url
def manage_service_check_result_brok(self, b): # pylint: disable=too-many-branches """A service check result brok has just arrived ...""" host_name = b.data.get('host_name', None) service_description = b.data.get('service_description', None) if not host_name or not service_description: return service_id = host_name+"/"+service_description logger.debug("service check result: %s", service_id) # If host and service initial status broks have not been received, ignore ... if not self.ignore_unknown and host_name not in self.hosts_cache: logger.warning("received service check result for an unknown host: %s", service_id) return if service_id not in self.services_cache and not self.ignore_unknown: logger.warning("received service check result for an unknown service: %s", service_id) return # Decode received metrics metrics = self.get_metrics_from_perfdata(service_description, b.data['perf_data']) if not metrics: logger.debug("no metrics to send ...") return # If checks latency is ignored if self.ignore_latency_limit >= b.data['latency'] > 0: check_time = int(b.data['last_chk']) - int(b.data['latency']) else: check_time = int(b.data['last_chk']) # Custom hosts variables hname = sanitize_name(host_name) if host_name in self.hosts_cache: if self.hosts_cache[host_name].get('_GRAPHITE_GROUP', None): hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_GROUP'), hname)) if self.hosts_cache[host_name].get('_GRAPHITE_PRE', None): hname = ".".join((self.hosts_cache[host_name].get('_GRAPHITE_PRE'), hname)) # Custom services variables desc = sanitize_name(service_description) if service_id in self.services_cache: if self.services_cache[service_id].get('_GRAPHITE_POST', None): desc = ".".join((desc, self.services_cache[service_id].get('_GRAPHITE_POST', None))) # Graphite data source if self.graphite_data_source: path = '.'.join((hname, self.graphite_data_source, desc)) else: path = '.'.join((hname, desc)) # Realm as a prefix if self.realms_prefix and self.hosts_cache[host_name].get('realm_name', None): path = '.'.join((self.hosts_cache[host_name].get('realm_name'), path)) realm_name = None if host_name in self.hosts_cache: realm_name = self.hosts_cache[host_name].get('realm_name', None) # Send metrics self.send_to_tsdb(realm_name, host_name, service_description, metrics, check_time, path)
A service check result brok has just arrived ...
def _get_qe(self, key, obj): """Instantiate a query engine, or retrieve a cached one. """ if key in self._cached: return self._cached[key] qe = create_query_engine(obj, self._class) self._cached[key] = qe return qe
Instantiate a query engine, or retrieve a cached one.
def get_assessment_part_ids_by_banks(self, bank_ids): """Gets the list of ``AssessmentPart Ids`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.id.IdList) - list of assessment part ``Ids`` raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resource_ids_by_bins id_list = [] for assessment_part in self.get_assessment_parts_by_banks(bank_ids): id_list.append(assessment_part.get_id()) return IdList(id_list)
Gets the list of ``AssessmentPart Ids`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.id.IdList) - list of assessment part ``Ids`` raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def public_key_sec(self): """Return the public key as sec, or None in case of failure.""" if self.is_coinbase(): return None opcodes = ScriptTools.opcode_list(self.script) if len(opcodes) == 2 and opcodes[0].startswith("[30"): # the second opcode is probably the public key as sec sec = h2b(opcodes[1][1:-1]) return sec return None
Return the public key as sec, or None in case of failure.
def read(self, filename=None): """Read and parse index file *filename*.""" self._init_filename(filename) data = odict() with open(self.real_filename) as ndx: current_section = None for line in ndx: line = line.strip() if len(line) == 0: continue m = self.SECTION.match(line) if m: current_section = m.group('name') data[current_section] = [] # can fail if name not legal python key continue if current_section is not None: data[current_section].extend(map(int, line.split())) super(NDX,self).update(odict([(name, self._transform(atomnumbers)) for name, atomnumbers in data.items()]))
Read and parse index file *filename*.
def draw(self): """Do not call directly.""" if self.hidden: return False if self.background_color is not None: render.fillrect(self.surface, self.background_color, rect=pygame.Rect((0, 0), self.frame.size)) for child in self.children: if not child.hidden: child.draw() topleft = child.frame.topleft if child.shadowed: shadow_size = theme.current.shadow_size shadow_topleft = (topleft[0] - shadow_size // 2, topleft[1] - shadow_size // 2) self.surface.blit(child.shadow_image, shadow_topleft) self.surface.blit(child.surface, topleft) if child.border_color and child.border_widths is not None: if (type(child.border_widths) is int and child.border_widths > 0): pygame.draw.rect(self.surface, child.border_color, child.frame, child.border_widths) else: tw, lw, bw, rw = child.get_border_widths() tl = (child.frame.left, child.frame.top) tr = (child.frame.right - 1, child.frame.top) bl = (child.frame.left, child.frame.bottom - 1) br = (child.frame.right - 1, child.frame.bottom - 1) if tw > 0: pygame.draw.line(self.surface, child.border_color, tl, tr, tw) if lw > 0: pygame.draw.line(self.surface, child.border_color, tl, bl, lw) if bw > 0: pygame.draw.line(self.surface, child.border_color, bl, br, bw) if rw > 0: pygame.draw.line(self.surface, child.border_color, tr, br, rw) return True
Do not call directly.
def set(self, column, value, useMethod=True, **context): """ Sets the value for this record at the inputted column name. If the columnName provided doesn't exist within the schema, then the ColumnNotFound error will be raised. :param columnName | <str> value | <variant> :return <bool> changed """ col = self.schema().column(column, raise_=False) if col is None: # allow setting of collections as well collector = self.schema().collector(column) if collector: my_context = self.context() for k, v in my_context.raw_values.items(): if k not in orb.Context.QueryFields: context.setdefault(k, v) sub_context = orb.Context(**context) method = collector.settermethod() if method and useMethod: return method(self, value, context=sub_context) else: records = self.get(collector.name(), context=sub_context) records.update(value, useMethod=useMethod, context=sub_context) # remove any preloaded values from the collector self.__preload.pop(collector.name(), None) return records else: raise errors.ColumnNotFound(schema=self.schema(), column=column) elif col.testFlag(col.Flags.ReadOnly): raise errors.ColumnReadOnly(schema=self.schema(), column=column) context = self.context(**context) if useMethod: method = col.settermethod() if method: keywords = list(funcutil.extract_keywords(method)) if 'locale' in keywords: return method(self, value, locale=context.locale) else: return method(self, value) if self.isRecord() and self.__delayed: self.__delayed = False self.read() with WriteLocker(self.__dataLock): orig, curr = self.__values.get(col.name(), (None, None)) value = col.store(value, context) # update the context based on the locale value if col.testFlag(col.Flags.I18n) and isinstance(curr, dict) and isinstance(value, dict): new_value = curr.copy() new_value.update(value) value = new_value try: change = curr != value except TypeError: change = True if change: self.__values[col.name()] = (orig, value) # broadcast the change event if change: if col.testFlag(col.Flags.I18n) and context.locale != 'all': old_value = curr.get(context.locale) if isinstance(curr, dict) else curr new_value = value.get(context.locale) if isinstance(value, dict) else value else: old_value = curr new_value = value event = orb.events.ChangeEvent(record=self, column=col, old=old_value, value=new_value) if self.processEvent(event): self.onChange(event) if event.preventDefault: with WriteLocker(self.__dataLock): orig, _ = self.__values.get(col.name(), (None, None)) self.__values[col.name()] = (orig, curr) return False else: return change else: return False
Sets the value for this record at the inputted column name. If the columnName provided doesn't exist within the schema, then the ColumnNotFound error will be raised. :param columnName | <str> value | <variant> :return <bool> changed
def dump_xearth_markers(markers, name='identifier'): """Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/ """ output = [] for identifier, point in markers.items(): line = ['%f %f ' % (point.latitude, point.longitude), ] if hasattr(point, 'name') and point.name: if name == 'identifier': line.append('"%s" # %s' % (identifier, point.name)) elif name == 'name': line.append('"%s" # %s' % (point.name, identifier)) elif name == 'comment': line.append('"%s" # %s' % (identifier, point.comment)) else: raise ValueError('Unknown name type %r' % name) if hasattr(point, 'altitude') and point.altitude: line.append(', alt %im' % point.altitude) else: line.append('"%s"' % identifier) output.append(''.join(line)) # Return the list sorted on the marker name return sorted(output, key=lambda x: x.split()[2])
Generate an Xearth compatible marker file. ``dump_xearth_markers()`` writes a simple Xearth_ marker file from a dictionary of :class:`trigpoints.Trigpoint` objects. It expects a dictionary in one of the following formats. For support of :class:`Trigpoint` that is:: {500936: Trigpoint(52.066035, -0.281449, 37.0, "Broom Farm"), 501097: Trigpoint(52.010585, -0.173443, 97.0, "Bygrave"), 505392: Trigpoint(51.910886, -0.186462, 136.0, "Sish Lane")} And generates output of the form:: 52.066035 -0.281449 "500936" # Broom Farm, alt 37m 52.010585 -0.173443 "501097" # Bygrave, alt 97m 51.910886 -0.186462 "205392" # Sish Lane, alt 136m Or similar to the following if the ``name`` parameter is set to ``name``:: 52.066035 -0.281449 "Broom Farm" # 500936 alt 37m 52.010585 -0.173443 "Bygrave" # 501097 alt 97m 51.910886 -0.186462 "Sish Lane" # 205392 alt 136m Point objects should be provided in the following format:: {"Broom Farm": Point(52.066035, -0.281449), "Bygrave": Point(52.010585, -0.173443), "Sish Lane": Point(51.910886, -0.186462)} And generates output of the form:: 52.066035 -0.281449 "Broom Farm" 52.010585 -0.173443 "Bygrave" 51.910886 -0.186462 "Sish Lane" Note: xplanet_ also supports xearth marker files, and as such can use the output from this function. See also: upoints.xearth.Xearths.import_locations Args: markers (dict): Dictionary of identifier keys, with :class:`Trigpoint` values name (str): Value to use as Xearth display string Returns: list: List of strings representing an Xearth marker file Raises: ValueError: Unsupported value for ``name`` .. _xearth: http://hewgill.com/xearth/original/ .. _xplanet: http://xplanet.sourceforge.net/
def git_checkout(repo_dir, ref, branch=None): """Do a git checkout of `ref` in `repo_dir`. If branch is specified it should be the name of the new branch. """ command = ['git', 'checkout', '--force'] if branch: command.extend(['-B', '{}'.format(branch)]) command.append(ref) return execute_git_command(command, repo_dir=repo_dir)
Do a git checkout of `ref` in `repo_dir`. If branch is specified it should be the name of the new branch.
def setup(self, settings): ''' Setup redis and tldextract ''' self.extract = tldextract.TLDExtract() self.redis_conn = redis.Redis(host=settings['REDIS_HOST'], port=settings['REDIS_PORT'], db=settings.get('REDIS_DB')) try: self.redis_conn.info() self.logger.debug("Connected to Redis in ZookeeperHandler") except ConnectionError: self.logger.error("Failed to connect to Redis in ZookeeperHandler") # plugin is essential to functionality sys.exit(1)
Setup redis and tldextract
def result(self): """Formats the result.""" self.__result.sort(cmp = self.__cmp, key = self.__key, reverse = self.__reverse) return self.__result
Formats the result.
def make_opfields( cls ): """ Calculate the virtulachain-required opfields dict. """ # construct fields opfields = {} for opname in SERIALIZE_FIELDS.keys(): opcode = NAME_OPCODES[opname] opfields[opcode] = SERIALIZE_FIELDS[opname] return opfields
Calculate the virtulachain-required opfields dict.
def allowed_values(self): """A tuple containing the allowed values for this Slot. The Python equivalent of the CLIPS slot-allowed-values function. """ data = clips.data.DataObject(self._env) lib.EnvSlotAllowedValues( self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the allowed values for this Slot. The Python equivalent of the CLIPS slot-allowed-values function.
def get_next(self, label): """Get the next section with the given label""" while self._get_current_label() != label: self._skip_section() return self._read_section()
Get the next section with the given label
def to_array(self): """ Serializes this Chat to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Chat, self).to_array() array['id'] = int(self.id) # type int array['type'] = u(self.type) # py2: type unicode, py3: type str if self.title is not None: array['title'] = u(self.title) # py2: type unicode, py3: type str if self.username is not None: array['username'] = u(self.username) # py2: type unicode, py3: type str if self.first_name is not None: array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str if self.last_name is not None: array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str if self.all_members_are_administrators is not None: array['all_members_are_administrators'] = bool(self.all_members_are_administrators) # type bool if self.photo is not None: array['photo'] = self.photo.to_array() # type ChatPhoto if self.description is not None: array['description'] = u(self.description) # py2: type unicode, py3: type str if self.invite_link is not None: array['invite_link'] = u(self.invite_link) # py2: type unicode, py3: type str if self.pinned_message is not None: array['pinned_message'] = self.pinned_message.to_array() # type Message if self.sticker_set_name is not None: array['sticker_set_name'] = u(self.sticker_set_name) # py2: type unicode, py3: type str if self.can_set_sticker_set is not None: array['can_set_sticker_set'] = bool(self.can_set_sticker_set) # type bool return array
Serializes this Chat to a dictionary. :return: dictionary representation of this object. :rtype: dict
def create_hosted_zone(self, name, caller_reference=None, comment=None): """ Creates and returns a new hosted zone. Once a hosted zone is created, its details can't be changed. :param str name: The name of the hosted zone to create. :keyword str caller_reference: A unique string that identifies the request and that allows failed create_hosted_zone requests to be retried without the risk of executing the operation twice. If no value is given, we'll generate a Type 4 UUID for you. :keyword str comment: An optional comment to attach to the zone. :rtype: tuple :returns: A tuple in the form of ``(hosted_zone, change_info)``. The ``hosted_zone`` variable contains a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance matching the newly created zone, and ``change_info`` is a dict with some details about the API request. """ body = xml_generators.create_hosted_zone_writer( connection=self, name=name, caller_reference=caller_reference, comment=comment ) root = self._send_request( path='hostedzone', data=body, method='POST', ) return xml_parsers.created_hosted_zone_parser( root=root, connection=self )
Creates and returns a new hosted zone. Once a hosted zone is created, its details can't be changed. :param str name: The name of the hosted zone to create. :keyword str caller_reference: A unique string that identifies the request and that allows failed create_hosted_zone requests to be retried without the risk of executing the operation twice. If no value is given, we'll generate a Type 4 UUID for you. :keyword str comment: An optional comment to attach to the zone. :rtype: tuple :returns: A tuple in the form of ``(hosted_zone, change_info)``. The ``hosted_zone`` variable contains a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance matching the newly created zone, and ``change_info`` is a dict with some details about the API request.
def i18n_install(lc=None): """ Install internationalization support for the clients using the specified locale. If there is no support for the locale, the default locale will be used. As last resort, a null translator will be installed. :param lc: locale to install. If None, the system default locale will be used. """ log.debug('i18n_install( {lc} ) called.'.format(lc=lc)) if lc is None: lc = i18n_system_locale() if lc is None: log.debug('i18n_install(): installing NullTranslations') translator = gettext.NullTranslations() else: child_locales = i18n_support_locale(lc) # Call i18n_support_locale to log the supported locales log.debug('i18n_install(): installing gettext.translation(domain={domain}, localedir={localedir}, ' 'languages={languages}, fallback={fallback})'.format(domain=project.PROJECT_TITLE.lower(), localedir=i18n_get_path(), languages=child_locales, fallback=True)) translator = gettext.translation( domain=project.PROJECT_TITLE.lower(), localedir=str(i18n_get_path()), languages=child_locales, fallback=True) translator.install(names=['ngettext'])
Install internationalization support for the clients using the specified locale. If there is no support for the locale, the default locale will be used. As last resort, a null translator will be installed. :param lc: locale to install. If None, the system default locale will be used.
def from_long(self, number): """Make PCI address from long. in number of type int """ if not isinstance(number, baseinteger): raise TypeError("number can only be an instance of type baseinteger") self._call("fromLong", in_p=[number])
Make PCI address from long. in number of type int
def buckets_insert(self, bucket, project_id=None): """Issues a request to create a new bucket. Args: bucket: the name of the bucket. project_id: the project to use when inserting the bucket. Returns: A parsed bucket information dictionary. Raises: Exception if there is an error performing the operation. """ args = {'project': project_id if project_id else self._project_id} data = {'name': bucket} url = Api._ENDPOINT + (Api._BUCKET_PATH % '') return datalab.utils.Http.request(url, args=args, data=data, credentials=self._credentials)
Issues a request to create a new bucket. Args: bucket: the name of the bucket. project_id: the project to use when inserting the bucket. Returns: A parsed bucket information dictionary. Raises: Exception if there is an error performing the operation.
def get_route(self, file_id): ''' a method to retrieve route information for file on telegram api :param file_id: string with id of file in a message send to bot :return: dictionary of response details with route details in [json][result] ''' title = '%s.get_route' % self.__class__.__name__ # validate inputs input_fields = { 'file_id': file_id, } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct key word arguments request_kwargs = { 'url': '%s/getFile' % self.api_endpoint, 'data': { 'file_id': file_id } } # send request response_details = self._post_request(**request_kwargs) return response_details
a method to retrieve route information for file on telegram api :param file_id: string with id of file in a message send to bot :return: dictionary of response details with route details in [json][result]
def _equivalent_node_iterator_helper(self, node: BaseEntity, visited: Set[BaseEntity]) -> BaseEntity: """Iterate over nodes and their data that are equal to the given node, starting with the original.""" for v in self[node]: if v in visited: continue if self._has_no_equivalent_edge(node, v): continue visited.add(v) yield v yield from self._equivalent_node_iterator_helper(v, visited)
Iterate over nodes and their data that are equal to the given node, starting with the original.
def monte_carlo_vol(self, ndraws=10000, rstate=None, return_overlap=True): """Using `ndraws` Monte Carlo draws, estimate the volume of the *union* of ellipsoids. If `return_overlap=True`, also returns the estimated fractional overlap with the unit cube.""" if rstate is None: rstate = np.random # Estimate volume using Monte Carlo integration. samples = [self.sample(rstate=rstate, return_q=True) for i in range(ndraws)] qsum = sum([q for (x, idx, q) in samples]) vol = 1. * ndraws / qsum * self.vol_tot if return_overlap: # Estimate the fractional amount of overlap with the # unit cube using the same set of samples. qin = sum([q * unitcheck(x) for (x, idx, q) in samples]) overlap = 1. * qin / qsum return vol, overlap else: return vol
Using `ndraws` Monte Carlo draws, estimate the volume of the *union* of ellipsoids. If `return_overlap=True`, also returns the estimated fractional overlap with the unit cube.
def read_wave(path): """Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate). """ with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate).
def bytes_available(device): """ Determines the number of bytes available for reading from an AlarmDecoder device :param device: the AlarmDecoder device :type device: :py:class:`~alarmdecoder.devices.Device` :returns: int """ bytes_avail = 0 if isinstance(device, alarmdecoder.devices.SerialDevice): if hasattr(device._device, "in_waiting"): bytes_avail = device._device.in_waiting else: bytes_avail = device._device.inWaiting() elif isinstance(device, alarmdecoder.devices.SocketDevice): bytes_avail = 4096 return bytes_avail
Determines the number of bytes available for reading from an AlarmDecoder device :param device: the AlarmDecoder device :type device: :py:class:`~alarmdecoder.devices.Device` :returns: int