code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def post(self, request, *args, **kwargs): """ Checks for expect event types before continuing """ serializer = EventSerializer(data=request.data) if not serializer.is_valid(): return Response( {"accepted": False, "reason": serializer.errors}, status=400 ) data = serializer.validated_data event_type = { "ack": "ack", "nack": "nack", "delivery_report": "delivery_succeeded", }.get(data["event_type"]) accepted, reason = process_event( data["user_message_id"], event_type, data["nack_reason"], data["timestamp"] ) return Response( {"accepted": accepted, "reason": reason}, status=200 if accepted else 400 )
Checks for expect event types before continuing
def catalogFactory(name, **kwargs): """ Factory for various catalogs. """ fn = lambda member: inspect.isclass(member) and member.__module__==__name__ catalogs = odict(inspect.getmembers(sys.modules[__name__], fn)) if name not in list(catalogs.keys()): msg = "%s not found in catalogs:\n %s"%(name,list(kernels.keys())) logger.error(msg) msg = "Unrecognized catalog: %s"%name raise Exception(msg) return catalogs[name](**kwargs)
Factory for various catalogs.
def _Login(): """Login to retrieve bearer token and set default accoutn and location aliases.""" if not clc.v2.V2_API_USERNAME or not clc.v2.V2_API_PASSWD: clc.v1.output.Status('ERROR',3,'V2 API username and password not provided') raise(clc.APIV2NotEnabled) session = clc._REQUESTS_SESSION session.headers['content-type'] = "application/json" r = session.request("POST", "%s/v2/%s" % (clc.defaults.ENDPOINT_URL_V2,"authentication/login"), json={"username": clc.v2.V2_API_USERNAME, "password": clc.v2.V2_API_PASSWD}, verify=API._ResourcePath('clc/cacert.pem')) if r.status_code == 200: clc._LOGIN_TOKEN_V2 = r.json()['bearerToken'] clc.ALIAS = r.json()['accountAlias'] clc.LOCATION = r.json()['locationAlias'] elif r.status_code == 400: raise(Exception("Invalid V2 API login. %s" % (r.json()['message']))) else: raise(Exception("Error logging into V2 API. Response code %s. message %s" % (r.status_code,r.json()['message'])))
Login to retrieve bearer token and set default accoutn and location aliases.
def cycles(cls, **kwargs): """ Classmethod for convienence in returning both the sunrise and sunset based on a location and date. Always calculates the sunrise and sunset on the given date, no matter the time passed into the function in the datetime object. Parameters: loc = Location4D (object) OR point = Shapely point (object) time = datetime in UTC (object) OR lat = latitude (float) lon = longitude (float) time = datetime in UTC (object) Returns: { 'sunrise': datetime in UTC, 'sunset': datetime in UTC } Sources: http://williams.best.vwh.net/sunrise_sunset_example.htm """ if "loc" not in kwargs: if "point" not in kwargs: if "lat" not in kwargs or "lon" not in kwargs: raise ValueError("You must supply some form of lat/lon coordinates") else: lat = kwargs.get("lat") lon = kwargs.get("lon") else: lat = kwargs.get("point").y lon = kwargs.get("point").x if "time" not in kwargs: raise ValueError("You must supply a datetime object") else: time = kwargs.get("time") else: lat = kwargs.get("loc").latitude lon = kwargs.get("loc").longitude time = kwargs.get("loc").time # Convert time to UTC. Save passed in timezone to return later. if time.tzinfo is None: time = time.replace(tzinfo=pytz.utc) original_zone = pytz.utc else: original_zone = time.tzinfo local_jd = time.timetuple().tm_yday utc_jd = time.astimezone(pytz.utc).timetuple().tm_yday # We ALWAYS want to return the sunrise/sunset for the day that was passed # in (with timezone accounted for), regardless of what the UTC day is. Modify # the UTC julian day here if need be. comp = cmp(utc_jd, local_jd) if comp == 1: utc_jd -= 1 elif comp == -1: utc_jd += 1 time = time.replace(hour=0, minute=0, second=0, microsecond=0) rising_h, rising_m = cls._calc(jd=utc_jd, lat=lat, lon=lon, stage=cls.RISING) setting_h, setting_m = cls._calc(jd=utc_jd, lat=lat, lon=lon, stage=cls.SETTING) # _calc returns UTC hours and minutes, so assume time is in UTC for a few lines... rising = time.replace(tzinfo=pytz.utc) + timedelta(hours=rising_h, minutes=rising_m) setting = time.replace(tzinfo=pytz.utc) + timedelta(hours=setting_h, minutes=setting_m) # LOOK: We may be adding 24 hours to the setting time. Why? if setting < rising: setting = setting + timedelta(hours=24) rising = rising.astimezone(original_zone) setting = setting.astimezone(original_zone) return { cls.RISING : rising, cls.SETTING : setting}
Classmethod for convienence in returning both the sunrise and sunset based on a location and date. Always calculates the sunrise and sunset on the given date, no matter the time passed into the function in the datetime object. Parameters: loc = Location4D (object) OR point = Shapely point (object) time = datetime in UTC (object) OR lat = latitude (float) lon = longitude (float) time = datetime in UTC (object) Returns: { 'sunrise': datetime in UTC, 'sunset': datetime in UTC } Sources: http://williams.best.vwh.net/sunrise_sunset_example.htm
def min(self): """ Returns the minimum value of the domain. :rtype: `float` or `np.inf` """ return int(self._min) if not np.isinf(self._min) else self._min
Returns the minimum value of the domain. :rtype: `float` or `np.inf`
async def _execute( self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes ) -> None: """Executes this request with the given output transforms.""" self._transforms = transforms try: if self.request.method not in self.SUPPORTED_METHODS: raise HTTPError(405) self.path_args = [self.decode_argument(arg) for arg in args] self.path_kwargs = dict( (k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items() ) # If XSRF cookies are turned on, reject form submissions without # the proper cookie if self.request.method not in ( "GET", "HEAD", "OPTIONS", ) and self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if result is not None: result = await result if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. future_set_result_unless_cancelled(self._prepared_future, None) if self._finished: return if _has_stream_request_body(self.__class__): # In streaming mode request.body is a Future that signals # the body has been completely received. The Future has no # result; the data has been passed to self.data_received # instead. try: await self.request._body_future except iostream.StreamClosedError: return method = getattr(self, self.request.method.lower()) result = method(*self.path_args, **self.path_kwargs) if result is not None: result = await result if self._auto_finish and not self._finished: self.finish() except Exception as e: try: self._handle_request_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True) finally: # Unset result to avoid circular references result = None if self._prepared_future is not None and not self._prepared_future.done(): # In case we failed before setting _prepared_future, do it # now (to unblock the HTTP server). Note that this is not # in a finally block to avoid GC issues prior to Python 3.4. self._prepared_future.set_result(None)
Executes this request with the given output transforms.
def len(self,resolution=1.0,units=None,conversion_function=convert_time, end_at_end=True): """ Calculates the length of the Label Dimension from its minimum, maximum and wether it is discrete. `resolution`: `units`: output units `conversion_function`: `end_at_end`: additional switch for continuous behaviour """ if units is not None: resolution = conversion_function(resolution,from_units=units,to_units=self.units) else: units = self.units if self.min is None: return int(self.max / resolution) if self.max is None: return 0 if units != '1' and end_at_end: return int(np.ceil((self.max - self.min) / resolution)) return int(np.ceil((self.max - self.min) / resolution) + 1)
Calculates the length of the Label Dimension from its minimum, maximum and wether it is discrete. `resolution`: `units`: output units `conversion_function`: `end_at_end`: additional switch for continuous behaviour
def upsert(self, name, value=None, seq=None): """Add one name/value entry to the main context of the rolne, but only if an entry with that name does not already exist. If the an entry with name exists, then the first entry found has it's value changed. NOTE: the upsert only updates the FIRST entry with the name found. The method returns True if an insertion occurs, otherwise False. Example of use: >>> # setup an example rolne first >>> my_var = rolne() >>> my_var.upsert("item", "zing") True >>> my_var["item", "zing"].append("color", "blue") >>> print my_var %rolne: item = zing color = blue <BLANKLINE> >>> my_var.upsert("item", "zing") False >>> print my_var %rolne: item = zing color = blue <BLANKLINE> >>> my_var.upsert("item", "broom") False >>> print my_var %rolne: item = broom color = blue <BLANKLINE> .. versionadded:: 0.1.1 :param name: The key name of the name/value pair. :param value: The key value of the name/value pair. If not passed, then the value is assumed to be None. :returns: Returns True if the name/value was newly inserted. Otherwise, it returns False indicated that an update was done instead. """ for ctr, entry in enumerate(self.data): if entry[TNAME]==name: new_tuple = (name, value, entry[TLIST], entry[TSEQ]) self.data[ctr]=new_tuple return False new_tuple = (name, value, [], lib._seq(self, seq)) self.data.append(new_tuple) return True
Add one name/value entry to the main context of the rolne, but only if an entry with that name does not already exist. If the an entry with name exists, then the first entry found has it's value changed. NOTE: the upsert only updates the FIRST entry with the name found. The method returns True if an insertion occurs, otherwise False. Example of use: >>> # setup an example rolne first >>> my_var = rolne() >>> my_var.upsert("item", "zing") True >>> my_var["item", "zing"].append("color", "blue") >>> print my_var %rolne: item = zing color = blue <BLANKLINE> >>> my_var.upsert("item", "zing") False >>> print my_var %rolne: item = zing color = blue <BLANKLINE> >>> my_var.upsert("item", "broom") False >>> print my_var %rolne: item = broom color = blue <BLANKLINE> .. versionadded:: 0.1.1 :param name: The key name of the name/value pair. :param value: The key value of the name/value pair. If not passed, then the value is assumed to be None. :returns: Returns True if the name/value was newly inserted. Otherwise, it returns False indicated that an update was done instead.
def run_forever(self): """ This method is used to run the websocket app continuously. It will execute callbacks as defined and try to stay connected with the provided APIs """ cnt = 0 while True: cnt += 1 self.url = next(self.urls) log.debug("Trying to connect to node %s" % self.url) try: # websocket.enableTrace(True) self.ws = websocket.WebSocketApp( self.url, on_message=self.on_message, # on_data=self.on_message, on_error=self.on_error, on_close=self.on_close, on_open=self.on_open, ) self.ws.run_forever() except websocket.WebSocketException as exc: if self.num_retries >= 0 and cnt > self.num_retries: raise NumRetriesReached() sleeptime = (cnt - 1) * 2 if cnt < 10 else 10 if sleeptime: log.warning( "Lost connection to node during wsconnect(): %s (%d/%d) " % (self.url, cnt, self.num_retries) + "Retrying in %d seconds" % sleeptime ) time.sleep(sleeptime) except KeyboardInterrupt: self.ws.keep_running = False raise except Exception as e: log.critical("{}\n\n{}".format(str(e), traceback.format_exc()))
This method is used to run the websocket app continuously. It will execute callbacks as defined and try to stay connected with the provided APIs
def __populate_repositories_of_interest(self, username): """Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the user for whom repositories are being suggested for. """ # Handle to the user to whom repositories need to be suggested. user = self.github.get_user(username) # Procure repositories starred by the user. self.user_starred_repositories.extend(user.get_starred()) # Repositories starred by users followed by the user. if self.deep_dive: for following_user in user.get_following(): self.user_following_starred_repositories.extend( following_user.get_starred() )
Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the user for whom repositories are being suggested for.
def get_indexes(self, default_indexes=None): """Returns the list of indexes to act on based on ES_INDEXES setting """ doctype = self.type.get_mapping_type_name() indexes = (settings.ES_INDEXES.get(doctype) or settings.ES_INDEXES['default']) if isinstance(indexes, six.string_types): indexes = [indexes] return super(S, self).get_indexes(default_indexes=indexes)
Returns the list of indexes to act on based on ES_INDEXES setting
def create_dialog(self): """ Create the dialog.""" box0 = QGroupBox('Info') self.name = FormStr() self.name.setText('sw') self.idx_group.activated.connect(self.update_channels) form = QFormLayout(box0) form.addRow('Event name', self.name) form.addRow('Channel group', self.idx_group) form.addRow('Channel(s)', self.idx_chan) form.addRow('Cycle(s)', self.idx_cycle) form.addRow('Stage(s)', self.idx_stage) box1 = QGroupBox('Parameters') mbox = QComboBox() method_list = SLOW_WAVE_METHODS for method in method_list: mbox.addItem(method) self.idx_method = mbox self.method = mbox.currentText() mbox.currentIndexChanged.connect(self.update_values) self.index['f1'] = FormFloat() self.index['f2'] = FormFloat() self.index['min_trough_dur'] = FormFloat() self.index['max_trough_dur'] = FormFloat() self.index['max_trough_amp'] = FormFloat() self.index['min_ptp'] = FormFloat() self.index['min_dur'] = FormFloat() self.index['max_dur'] = FormFloat() form = QFormLayout(box1) form.addRow('Method', mbox) form.addRow('Lowcut (Hz)', self.index['f1']) form.addRow('Highcut (Hz)', self.index['f2']) form.addRow('Min. trough duration (sec)', self.index['min_trough_dur']) form.addRow(' Max. trough duration (sec)', self.index['max_trough_dur']) form.addRow(' Max. trough amplitude (uV)', self.index['max_trough_amp']) form.addRow('Min. peak-to-peak amplitude (uV)', self.index['min_ptp']) form.addRow('Min. duration (sec)', self.index['min_dur']) form.addRow(' Max. duration (sec)', self.index['max_dur']) box3 = QGroupBox('Options') self.index['detrend'] = FormBool('Detrend (linear)') self.index['invert'] = FormBool('Invert detection (down-then-up)') self.index['excl_epoch'] = FormBool('Exclude Poor signal epochs') self.index['excl_event'] = FormMenu(['none', 'channel-specific', 'from any channel']) self.index['min_seg_dur'] = FormFloat(5) self.index['excl_epoch'].set_value(True) self.index['detrend'].set_value(True) form = QFormLayout(box3) form.addRow(self.index['excl_epoch']) form.addRow('Exclude Artefact events', self.index['excl_event']) form.addRow('Minimum subsegment duration', self.index['min_seg_dur']) form.addRow(self.index['detrend']) form.addRow(self.index['invert']) self.bbox.clicked.connect(self.button_clicked) btnlayout = QHBoxLayout() btnlayout.addStretch(1) btnlayout.addWidget(self.bbox) vlayout = QVBoxLayout() vlayout.addWidget(box1) vlayout.addWidget(box3) vlayout.addStretch(1) vlayout.addLayout(btnlayout) hlayout = QHBoxLayout() hlayout.addWidget(box0) hlayout.addLayout(vlayout) self.update_values() self.setLayout(hlayout)
Create the dialog.
def set_distribute_compositions(self, distribute_comps=None): """Sets the distribution rights. This sets distribute verbatim to ``true``. :param distribute_comps: right to distribute modifications :type distribute_comps: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_comps`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ if distribute_comps is None: raise NullArgument() metadata = Metadata(**settings.METADATA['distribute_compositions']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(distribute_comps, metadata, array=False): self._my_map['canDistributeCompositions'] = distribute_comps else: raise InvalidArgument()
Sets the distribution rights. This sets distribute verbatim to ``true``. :param distribute_comps: right to distribute modifications :type distribute_comps: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_comps`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
def p_field_id(self, p): '''field_id : INTCONSTANT ':' | ''' if len(p) == 3: if p[1] == 0: # Prevent users from ever using field ID 0. It's reserved for # internal use only. raise ThriftParserError( 'Line %d: Field ID 0 is reserved for internal use.' % p.lineno(1) ) p[0] = p[1] else: p[0] = None
field_id : INTCONSTANT ':' |
def list(declared, undeclared): """List configured queues.""" queues = current_queues.queues.values() if declared: queues = filter(lambda queue: queue.exists, queues) elif undeclared: queues = filter(lambda queue: not queue.exists, queues) queue_names = [queue.routing_key for queue in queues] queue_names.sort() for queue in queue_names: click.secho(queue)
List configured queues.
def Uninstall(self, package_name, keep_data=False, timeout_ms=None): """Removes a package from the device. Args: package_name: Package name of target package. keep_data: whether to keep the data and cache directories timeout_ms: Expected timeout for pushing and installing. Returns: The pm uninstall output. """ cmd = ['pm uninstall'] if keep_data: cmd.append('-k') cmd.append('"%s"' % package_name) return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)
Removes a package from the device. Args: package_name: Package name of target package. keep_data: whether to keep the data and cache directories timeout_ms: Expected timeout for pushing and installing. Returns: The pm uninstall output.
def rpc_get_completions(self, filename, source, offset): """Get a list of completion candidates for the symbol at offset. """ results = self._call_backend("rpc_get_completions", [], filename, get_source(source), offset) # Uniquify by name results = list(dict((res['name'], res) for res in results) .values()) results.sort(key=lambda cand: _pysymbol_key(cand["name"])) return results
Get a list of completion candidates for the symbol at offset.
def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any], logger: Logger) -> Dict[str, Any]: """ Implementation of AnyParser API """ raise Exception('This should never happen, since this parser relies on underlying parsers')
Implementation of AnyParser API
def _committors(sources, sinks, tprob): """ Get the forward committors of the reaction sources -> sinks. Parameters ---------- sources : array_like, int The set of unfolded/reactant states. sinks : array_like, int The set of folded/product states. tprob : np.ndarray Transition matrix Returns ------- forward_committors : np.ndarray The forward committors for the reaction sources -> sinks References ---------- .. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of transition paths. J. Stat. Phys. 123, 503-523 (2006). .. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory for Markov jump processes. Multiscale Model. Simul. 7, 1192-1219 (2009). .. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding pathways in network models of coarse-grained protein dynamics. J. Chem. Phys. 130, 205102 (2009). .. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations." PNAS 106.45 (2009): 19011-19016. """ n_states = np.shape(tprob)[0] sources = np.array(sources, dtype=int).reshape((-1, 1)) sinks = np.array(sinks, dtype=int).reshape((-1, 1)) # construct the committor problem lhs = np.eye(n_states) - tprob for a in sources: lhs[a, :] = 0.0 # np.zeros(n) lhs[:, a] = 0.0 lhs[a, a] = 1.0 for b in sinks: lhs[b, :] = 0.0 # np.zeros(n) lhs[:, b] = 0.0 lhs[b, b] = 1.0 ident_sinks = np.zeros(n_states) ident_sinks[sinks] = 1.0 rhs = np.dot(tprob, ident_sinks) rhs[sources] = 0.0 rhs[sinks] = 1.0 forward_committors = np.linalg.solve(lhs, rhs) return forward_committors
Get the forward committors of the reaction sources -> sinks. Parameters ---------- sources : array_like, int The set of unfolded/reactant states. sinks : array_like, int The set of folded/product states. tprob : np.ndarray Transition matrix Returns ------- forward_committors : np.ndarray The forward committors for the reaction sources -> sinks References ---------- .. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of transition paths. J. Stat. Phys. 123, 503-523 (2006). .. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E. Transition path theory for Markov jump processes. Multiscale Model. Simul. 7, 1192-1219 (2009). .. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive flux and folding pathways in network models of coarse-grained protein dynamics. J. Chem. Phys. 130, 205102 (2009). .. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations." PNAS 106.45 (2009): 19011-19016.
async def shuffle(self): """The shuffle command""" self.logger.debug("shuffle command") if not self.state == 'ready': return self.statuslog.debug("Shuffling") random.shuffle(self.queue) self.update_queue() self.statuslog.debug("Shuffled")
The shuffle command
async def prepare_container(self, size, container, elem_type=None): """ Prepares container for serialization :param size: :param container: :return: """ if not self.writing: if container is None: return gen_elem_array(size, elem_type) fvalue = get_elem(container) if fvalue is None: fvalue = [] fvalue += gen_elem_array(max(0, size - len(fvalue)), elem_type) set_elem(container, fvalue) return fvalue
Prepares container for serialization :param size: :param container: :return:
def select_peaks(data, events, limit): """Check whether event satisfies amplitude limit. Parameters ---------- data : ndarray (dtype='float') vector with data events : ndarray (dtype='int') N x 2+ matrix with peak/trough in second position limit : float low and high limit for spindle duration Returns ------- ndarray (dtype='int') N x 2+ matrix with peak/trough in second position """ selected = abs(data[events[:, 1]]) >= abs(limit) return events[selected, :]
Check whether event satisfies amplitude limit. Parameters ---------- data : ndarray (dtype='float') vector with data events : ndarray (dtype='int') N x 2+ matrix with peak/trough in second position limit : float low and high limit for spindle duration Returns ------- ndarray (dtype='int') N x 2+ matrix with peak/trough in second position
def update_installed_files(self, installed_files): """ Track the files installed by a package so pip knows how to remove the package. This method is used by :func:`install_binary_dist()` (which collects the list of installed files for :func:`update_installed_files()`). :param installed_files: A list of absolute pathnames (strings) with the files that were just installed. """ # Find the *.egg-info directory where installed-files.txt should be created. pkg_info_files = [fn for fn in installed_files if fnmatch.fnmatch(fn, '*.egg-info/PKG-INFO')] # I'm not (yet) sure how reliable the above logic is, so for now # I'll err on the side of caution and only act when the results # seem to be reliable. if len(pkg_info_files) != 1: logger.warning("Not tracking installed files (couldn't reliably determine *.egg-info directory)") else: egg_info_directory = os.path.dirname(pkg_info_files[0]) installed_files_path = os.path.join(egg_info_directory, 'installed-files.txt') logger.debug("Tracking installed files in %s ..", installed_files_path) with open(installed_files_path, 'w') as handle: for pathname in installed_files: handle.write('%s\n' % os.path.relpath(pathname, egg_info_directory))
Track the files installed by a package so pip knows how to remove the package. This method is used by :func:`install_binary_dist()` (which collects the list of installed files for :func:`update_installed_files()`). :param installed_files: A list of absolute pathnames (strings) with the files that were just installed.
def debugDumpAttr(self, output, depth): """Dumps debug information for the attribute """ libxml2mod.xmlDebugDumpAttr(output, self._o, depth)
Dumps debug information for the attribute
def profile(self, tile=None): """ Create a metadata dictionary for rasterio. Parameters ---------- tile : ``BufferedTile`` Returns ------- metadata : dictionary output profile dictionary used for rasterio. """ dst_metadata = dict(self._profile) if tile is not None: dst_metadata.update( width=tile.width, height=tile.height, affine=tile.affine, driver="PNG", crs=tile.crs ) return dst_metadata
Create a metadata dictionary for rasterio. Parameters ---------- tile : ``BufferedTile`` Returns ------- metadata : dictionary output profile dictionary used for rasterio.
def _render_templates(files, filetable, written_files, force, open_mode='w'): """Write template contents from filetable into files. Using filetable for the rendered templates, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file. """ for tpl_path, content in filetable: target_path = files[tpl_path] needdir = os.path.dirname(target_path) assert needdir, "Target should have valid parent dir" try: os.makedirs(needdir) except OSError as err: if err.errno != errno.EEXIST: raise if os.path.isfile(target_path): if force: LOG.warning("Forcing overwrite of existing file %s.", target_path) elif target_path in written_files: LOG.warning("Previous stencil has already written file %s.", target_path) else: print("Skipping existing file %s" % target_path) LOG.info("Skipping existing file %s", target_path) continue with open(target_path, open_mode) as newfile: print("Writing rendered file %s" % target_path) LOG.info("Writing rendered file %s", target_path) newfile.write(content) written_files.append(target_path)
Write template contents from filetable into files. Using filetable for the rendered templates, and the list of files, render all the templates into actual files on disk, forcing to overwrite the file as appropriate, and using the given open mode for the file.
def make_logger(scraper): """ Create two log handlers, one to output info-level ouput to the console, the other to store all logging in a JSON file which will later be used to generate reports. """ logger = logging.getLogger('') logger.setLevel(logging.DEBUG) requests_log = logging.getLogger("requests") requests_log.setLevel(logging.WARNING) json_handler = logging.FileHandler(log_path(scraper)) json_handler.setLevel(logging.DEBUG) json_formatter = jsonlogger.JsonFormatter(make_json_format()) json_handler.setFormatter(json_formatter) logger.addHandler(json_handler) console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) fmt = '%(name)s [%(levelname)-8s]: %(message)s' formatter = logging.Formatter(fmt) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger = logging.getLogger(scraper.name) logger = TaskAdapter(logger, scraper) return logger
Create two log handlers, one to output info-level ouput to the console, the other to store all logging in a JSON file which will later be used to generate reports.
def do_format(value, *args, **kwargs): """ Apply python string formatting on an object: .. sourcecode:: jinja {{ "%s - %s"|format("Hello?", "Foo!") }} -> Hello? - Foo! """ if args and kwargs: raise FilterArgumentError('can\'t handle positional and keyword ' 'arguments at the same time') return soft_unicode(value) % (kwargs or args)
Apply python string formatting on an object: .. sourcecode:: jinja {{ "%s - %s"|format("Hello?", "Foo!") }} -> Hello? - Foo!
def checkInputParameter(method, parameters, validParameters, requiredParameters=None): """ Helper function to check input by using before sending to the server :param method: Name of the API :type method: str :param validParameters: Allow parameters for the API call :type validParameters: list :param requiredParameters: Required parameters for the API call (Default: None) :type requiredParameters: list """ for parameter in parameters: if parameter not in validParameters: raise dbsClientException("Invalid input", "API %s does not support parameter %s. Supported parameters are %s" \ % (method, parameter, validParameters)) if requiredParameters is not None: if 'multiple' in requiredParameters: match = False for requiredParameter in requiredParameters['multiple']: if requiredParameter!='detail' and requiredParameter in parameters: match = True break if not match: raise dbsClientException("Invalid input", "API %s does require one of the parameters %s" \ % (method, requiredParameters['multiple'])) if 'forced' in requiredParameters: for requiredParameter in requiredParameters['forced']: if requiredParameter not in parameters: raise dbsClientException("Invalid input", "API %s does require the parameter %s. Forced required parameters are %s" \ % (method, requiredParameter, requiredParameters['forced'])) if 'standalone' in requiredParameters: overlap = [] for requiredParameter in requiredParameters['standalone']: if requiredParameter in parameters: overlap.append(requiredParameter) if len(overlap) != 1: raise dbsClientException("Invalid input", "API %s does requires only *one* of the parameters %s." \ % (method, requiredParameters['standalone']))
Helper function to check input by using before sending to the server :param method: Name of the API :type method: str :param validParameters: Allow parameters for the API call :type validParameters: list :param requiredParameters: Required parameters for the API call (Default: None) :type requiredParameters: list
def construct_rest_of_world(self, excluded, name=None, fp=None, geom=True): """Construct rest-of-world geometry and optionally write to filepath ``fp``. Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids).""" for location in excluded: assert location in self.locations, "Can't find location {}".format(location) included = self.all_faces.difference( set().union(*[set(self.data[loc]) for loc in excluded]) ) if not geom: return included elif not gis: warn(MISSING_GIS) return geom = _union(included)[1] if fp: self.write_geoms_to_file(fp, [geom], [name] if name else None) return fp else: return geom
Construct rest-of-world geometry and optionally write to filepath ``fp``. Excludes faces in location list ``excluded``. ``excluded`` must be an iterable of location strings (not face ids).
def gen3d_conformer(self): """ A combined method to first generate 3D structures from 0D or 2D structures and then find the minimum energy conformer: 1. Use OBBuilder to create a 3D structure using rules and ring templates 2. Do 250 steps of a steepest descent geometry optimization with the MMFF94 forcefield 3. Do 200 iterations of a Weighted Rotor conformational search (optimizing each conformer with 25 steps of a steepest descent) 4. Do 250 steps of a conjugate gradient geometry optimization. Warning from openbabel docs: For many applications where 100s if not 1000s of molecules need to be processed, gen3d is rather SLOW. Sometimes this function can cause a segmentation fault. A future version of Open Babel will provide options for slow/medium/fast 3D structure generation which will involve different compromises between speed and finding the global energy minimum. """ gen3d = ob.OBOp.FindType("Gen3D") gen3d.Do(self._obmol)
A combined method to first generate 3D structures from 0D or 2D structures and then find the minimum energy conformer: 1. Use OBBuilder to create a 3D structure using rules and ring templates 2. Do 250 steps of a steepest descent geometry optimization with the MMFF94 forcefield 3. Do 200 iterations of a Weighted Rotor conformational search (optimizing each conformer with 25 steps of a steepest descent) 4. Do 250 steps of a conjugate gradient geometry optimization. Warning from openbabel docs: For many applications where 100s if not 1000s of molecules need to be processed, gen3d is rather SLOW. Sometimes this function can cause a segmentation fault. A future version of Open Babel will provide options for slow/medium/fast 3D structure generation which will involve different compromises between speed and finding the global energy minimum.
def fig_to_svg(fig): """Helper function to convert matplotlib figure to SVG string Returns: str: figure as SVG string """ buf = io.StringIO() fig.savefig(buf, format='svg') buf.seek(0) return buf.getvalue()
Helper function to convert matplotlib figure to SVG string Returns: str: figure as SVG string
def restart(self): """ Restart the console This is needed when we switch projects to update PYTHONPATH and the selected interpreter """ self.master_clients = 0 self.create_new_client_if_empty = False for i in range(len(self.clients)): client = self.clients[-1] try: client.shutdown() except Exception as e: QMessageBox.warning(self, _('Warning'), _("It was not possible to restart the IPython console " "when switching to this project. The error was<br><br>" "<tt>{0}</tt>").format(e), QMessageBox.Ok) self.close_client(client=client, force=True) self.create_new_client(give_focus=False) self.create_new_client_if_empty = True
Restart the console This is needed when we switch projects to update PYTHONPATH and the selected interpreter
def _handle_browse(self, relpath, params): """Handle requests to browse the filesystem under the build root.""" abspath = os.path.normpath(os.path.join(self._root, relpath)) if not abspath.startswith(self._root): raise ValueError # Prevent using .. to get files from anywhere other than root. if os.path.isdir(abspath): self._serve_dir(abspath, params) elif os.path.isfile(abspath): self._serve_file(abspath, params)
Handle requests to browse the filesystem under the build root.
def rebin(self, *factors, **kwargs): """Return a new histogram that is 'rebinned' (zoomed) by factors (tuple of floats) along each dimensions factors: tuple with zoom factors along each axis. e.g. 2 = double number of bins, 0.5 = halve them. order: Order for spline interpolation in scipy.ndimage.zoom. Defaults to linear interpolation (order=1). The only accepted keyword argument is 'order'!!! (python 2 is not nice) The normalization is set to the normalization of the current histogram The factors don't have to be integers or fractions: scipy.ndimage.zoom deals with the rebinning arcana. """ if not HAVE_SCIPY: raise NotImplementedError("Rebinning requires scipy.ndimage") if any([x != 'order' for x in kwargs.keys()]): raise ValueError("Only 'order' keyword argument is accepted. Yeah, this is confusing.. blame python 2.") order = kwargs.get('order', 1) # Construct a new histogram mh = self.similar_blank_histogram() if not len(factors) == self.dimensions: raise ValueError("You must pass %d rebin factors to rebin a %d-dimensional histogram" % ( self.dimensions, self.dimensions )) # Zoom the bin edges. # It's a bit tricky for non-uniform bins: # we first construct a linear interpolator to take # fraction along axis -> axis coordinate according to current binning. # Then we feed it the new desired binning fractions. for i, f in enumerate(factors): x = self.bin_edges[i] mh.bin_edges[i] = np.interp( x=np.linspace(0, 1, (len(x) - 1) * f + 1), xp=np.linspace(0, 1, len(x)), fp=x) # Rebin the histogram using ndimage.zoom, then renormalize mh.histogram = zoom(self.histogram, factors, order=order) mh.histogram *= self.histogram.sum() / mh.histogram.sum() # mh.histogram /= np.product(factors) return mh
Return a new histogram that is 'rebinned' (zoomed) by factors (tuple of floats) along each dimensions factors: tuple with zoom factors along each axis. e.g. 2 = double number of bins, 0.5 = halve them. order: Order for spline interpolation in scipy.ndimage.zoom. Defaults to linear interpolation (order=1). The only accepted keyword argument is 'order'!!! (python 2 is not nice) The normalization is set to the normalization of the current histogram The factors don't have to be integers or fractions: scipy.ndimage.zoom deals with the rebinning arcana.
def _simple_dispatch(self, name, params): """ Dispatch method """ try: # Internal method func = self.funcs[name] except KeyError: # Other method pass else: # Internal method found if isinstance(params, (list, tuple)): return func(*params) else: return func(**params) # Call the other method outside the except block, to avoid messy logs # in case of error return self._dispatch_method(name, params)
Dispatch method
def expand_labels(labels, subtopic=False): '''Expand a set of labels that define a connected component. ``labels`` must define a *positive* connected component: it is all of the edges that make up the *single* connected component in the :class:`LabelStore`. expand will ignore subtopic assignments, and annotator_id will be an arbitrary one selected from ``labels``. Note that this function only returns the expanded labels, which is guaranteed to be disjoint with the given ``labels``. This requirement implies that ``labels`` is held in memory to ensure that no duplicates are returned. If ``subtopic`` is ``True``, then it is assumed that ``labels`` defines a ``subtopic`` connected component. In this case, subtopics are included in the expanded labels. :param labels: iterable of :class:`Label` for the connected component. :rtype: generator of expanded :class:`Label`s only ''' labels = list(labels) assert all(lab.value == CorefValue.Positive for lab in labels) # Anything to expand? if len(labels) == 0: return annotator = labels[0].annotator_id data_backed = set() connected_component = set() for label in labels: ident1, ident2 = idents_from_label(label, subtopic=subtopic) data_backed.add(normalize_pair(ident1, ident2)) connected_component.add(ident1) connected_component.add(ident2) # We do not want to rebuild the Labels we already have, # because they have true annotator_id and subtopic # fields that we may want to preserve. for ident1, ident2 in combinations(connected_component, 2): if normalize_pair(ident1, ident2) not in data_backed: (cid1, subid1), (cid2, subid2) = ident1, ident2 yield Label(cid1, cid2, annotator, CorefValue.Positive, subtopic_id1=subid1, subtopic_id2=subid2)
Expand a set of labels that define a connected component. ``labels`` must define a *positive* connected component: it is all of the edges that make up the *single* connected component in the :class:`LabelStore`. expand will ignore subtopic assignments, and annotator_id will be an arbitrary one selected from ``labels``. Note that this function only returns the expanded labels, which is guaranteed to be disjoint with the given ``labels``. This requirement implies that ``labels`` is held in memory to ensure that no duplicates are returned. If ``subtopic`` is ``True``, then it is assumed that ``labels`` defines a ``subtopic`` connected component. In this case, subtopics are included in the expanded labels. :param labels: iterable of :class:`Label` for the connected component. :rtype: generator of expanded :class:`Label`s only
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: bulk_resume /foreman_tasks/api/tasks/bulk_resume bulk_search /foreman_tasks/api/tasks/bulk_search summary /foreman_tasks/api/tasks/summary Otherwise, call ``super``. """ if which in ('bulk_resume', 'bulk_search', 'summary'): return '{0}/{1}'.format( super(ForemanTask, self).path('base'), which ) return super(ForemanTask, self).path(which)
Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: bulk_resume /foreman_tasks/api/tasks/bulk_resume bulk_search /foreman_tasks/api/tasks/bulk_search summary /foreman_tasks/api/tasks/summary Otherwise, call ``super``.
def concatenate_matrices(*matrices): """Return concatenation of series of transformation matrices. >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 >>> numpy.allclose(M, concatenate_matrices(M)) True >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) True """ M = numpy.identity(4) for i in matrices: M = numpy.dot(M, i) return M
Return concatenation of series of transformation matrices. >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 >>> numpy.allclose(M, concatenate_matrices(M)) True >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) True
def _augment_text_w_syntactic_info( self, text, text_layer ): ''' Augments given Text object with the syntactic information from the *text_layer*. More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token in the Text object; (!) Note: this method is added to provide some initial consistency with MaltParser based syntactic parsing; If a better syntactic parsing interface is achieved in the future, this method will be deprecated ... ''' j = 0 for sentence in text.divide( layer=WORDS, by=SENTENCES ): for i in range(len(sentence)): estnltkToken = sentence[i] vislcg3Token = text_layer[j] parse_found = False if PARSER_OUT in vislcg3Token: if len( vislcg3Token[PARSER_OUT] ) > 0: firstParse = vislcg3Token[PARSER_OUT][0] # Fetch information about the syntactic relation: estnltkToken['s_label'] = str(i) estnltkToken['s_head'] = str(firstParse[1]) # Fetch the name of the surface syntactic relation deprels = '|'.join( [p[0] for p in vislcg3Token[PARSER_OUT]] ) estnltkToken['s_rel'] = deprels parse_found = True if not parse_found: raise Exception("(!) Unable to retrieve syntactic analysis for the ",\ estnltkToken, ' from ', vislcg3Token ) j += 1
Augments given Text object with the syntactic information from the *text_layer*. More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token in the Text object; (!) Note: this method is added to provide some initial consistency with MaltParser based syntactic parsing; If a better syntactic parsing interface is achieved in the future, this method will be deprecated ...
def auctionWS(symbols=None, on_data=None): '''https://iextrading.com/developer/docs/#auction''' symbols = _strToList(symbols) sendinit = ({'symbols': symbols, 'channels': ['auction']},) return _stream(_wsURL('deep'), sendinit, on_data)
https://iextrading.com/developer/docs/#auction
def read(address, length): """ Prepares an i2c read transaction. :param address: Slave address. :type: address: int :param length: Number of bytes to read. :type: length: int :return: New :py:class:`i2c_msg` instance for read operation. :rtype: :py:class:`i2c_msg` """ arr = create_string_buffer(length) return i2c_msg( addr=address, flags=I2C_M_RD, len=length, buf=arr)
Prepares an i2c read transaction. :param address: Slave address. :type: address: int :param length: Number of bytes to read. :type: length: int :return: New :py:class:`i2c_msg` instance for read operation. :rtype: :py:class:`i2c_msg`
def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional
def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner()
Setup our binners. Cache these as we are an immutable object
def display_data_item(self, data_item: DataItem, source_display_panel=None, source_data_item=None): """Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes """ for display_panel in self.__document_controller.workspace_controller.display_panels: if display_panel.data_item == data_item._data_item: display_panel.request_focus() return DisplayPanel(display_panel) result_display_panel = self.__document_controller.next_result_display_panel() if result_display_panel: display_item = self.__document_controller.document_model.get_display_item_for_data_item(data_item._data_item) result_display_panel.set_display_panel_display_item(display_item) result_display_panel.request_focus() return DisplayPanel(result_display_panel) return None
Display a new data item and gives it keyboard focus. Uses existing display if it is already displayed. .. versionadded:: 1.0 Status: Provisional Scriptable: Yes
def run_script(pycode): """Run the Python in `pycode`, and return a dict of the resulting globals.""" # Fix up the whitespace in pycode. if pycode[0] == "\n": pycode = pycode[1:] pycode.rstrip() pycode = textwrap.dedent(pycode) # execute it. globs = {} six.exec_(pycode, globs, globs) # pylint: disable=W0122 return globs
Run the Python in `pycode`, and return a dict of the resulting globals.
def _check_status_errors(proto, content, error_traps=None): """Raises HTTPErrors based on error statuses sent from validator. Checks for common statuses and runs route specific error traps. """ if content.status == proto.OK: return try: if content.status == proto.INTERNAL_ERROR: raise errors.UnknownValidatorError() except AttributeError: # Not every protobuf has every status enum, so pass AttributeErrors pass try: if content.status == proto.NOT_READY: raise errors.ValidatorNotReady() except AttributeError: pass try: if content.status == proto.NO_ROOT: raise errors.HeadNotFound() except AttributeError: pass try: if content.status == proto.INVALID_PAGING: raise errors.PagingInvalid() except AttributeError: pass try: if content.status == proto.INVALID_SORT: raise errors.SortInvalid() except AttributeError: pass # Check custom error traps from the particular route message if error_traps is not None: for trap in error_traps: trap.check(content.status)
Raises HTTPErrors based on error statuses sent from validator. Checks for common statuses and runs route specific error traps.
def registerAtomType(self, parameters): """Register a new atom type. """ name = parameters['name'] if name in self._atomTypes: raise ValueError('Found multiple definitions for atom type: ' + name) atom_class = parameters['class'] mass = _convertParameterToNumber(parameters['mass']) element = None if 'element' in parameters: element, custom = self._create_element(parameters['element'], mass) if custom: self.non_element_types[element.symbol] = element self._atomTypes[name] = self.__class__._AtomType(name, atom_class, mass, element) if atom_class in self._atomClasses: type_set = self._atomClasses[atom_class] else: type_set = set() self._atomClasses[atom_class] = type_set type_set.add(name) self._atomClasses[''].add(name) name = parameters['name'] if 'def' in parameters: self.atomTypeDefinitions[name] = parameters['def'] if 'overrides' in parameters: overrides = set(atype.strip() for atype in parameters['overrides'].split(",")) if overrides: self.atomTypeOverrides[name] = overrides if 'des' in parameters: self.atomTypeDesc[name] = parameters['desc'] if 'doi' in parameters: dois = set(doi.strip() for doi in parameters['doi'].split(',')) self.atomTypeRefs[name] = dois
Register a new atom type.
def _lazy_migration(self, patch=None, meta=None, toa=None): """ Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list """ objects = yield self.revisions.find({"master_id": self.master_id}, limit=1) if len(objects) > 0: raise Return(objects) if not patch: patch = yield self.collection.find_one_by_id(self.master_id) if not toa: toa = long(time.mktime(datetime.datetime.now().timetuple())) meta["comment"] = "This document was migrated automatically." if isinstance(patch, dict) and patch.get("id"): del patch["id"] if isinstance(patch, dict) and patch.get("_id"): del patch["_id"] #Here we separate patch and snapshot, and make sure that the snapshot looks like the master document snapshot = copy.deepcopy(patch) snapshot["id"] = self.master_id snapshot["published"] = self.settings.get("scheduler", {}).get("lazy_migrated_published_by_default", False) #If no objects are returned, this is some legacy object that needs a first revision #Create it here legacy_revision = { "toa": toa, "processed": True, "collection": self.collection_name, "master_id": self.master_id, "action": self.INSERT_ACTION, "patch": self.collection._dictionary_to_cursor(patch), "snapshot": snapshot, "meta": meta, } response = yield self.revisions.insert(legacy_revision) if isinstance(response, str): raise Return([legacy_revision]) raise Return(None)
Handle when a revision scheduling is turned onto a collection that was previously not scheduleable. This method will create the first revision for each object before its every used in the context of scheduling. :param dict patch: The patch that should be used :param dict meta: Meta data for this action :param int toa: The time of action :return: A legacy revision for a document that was previously :rtype: list
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): """ Use Gauss-Hermite Quadrature to compute E_p(f) [ log p(y|f) ] d/dm E_p(f) [ log p(y|f) ] d/dv E_p(f) [ log p(y|f) ] where p(f) is a Gaussian with mean m and variance v. The shapes of Y, m and v should match. if no gh_points are passed, we construct them using defualt options """ if gh_points is None: gh_x, gh_w = self._gh_points() else: gh_x, gh_w = gh_points shape = m.shape m,v,Y = m.flatten(), v.flatten(), Y.flatten() #make a grid of points X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None] #evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid. # broadcast needs to be handled carefully. logp = self.logpdf(X,Y[:,None], Y_metadata=Y_metadata) dlogp_dx = self.dlogpdf_df(X, Y[:,None], Y_metadata=Y_metadata) d2logp_dx2 = self.d2logpdf_df2(X, Y[:,None], Y_metadata=Y_metadata) #clipping for numerical stability #logp = np.clip(logp,-1e9,1e9) #dlogp_dx = np.clip(dlogp_dx,-1e9,1e9) #d2logp_dx2 = np.clip(d2logp_dx2,-1e9,1e9) #average over the gird to get derivatives of the Gaussian's parameters #division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi) F = np.dot(logp, gh_w)/np.sqrt(np.pi) dF_dm = np.dot(dlogp_dx, gh_w)/np.sqrt(np.pi) dF_dv = np.dot(d2logp_dx2, gh_w)/np.sqrt(np.pi) dF_dv /= 2. if np.any(np.isnan(dF_dv)) or np.any(np.isinf(dF_dv)): stop if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)): stop if self.size: dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None], Y_metadata=Y_metadata) # Ntheta x (orig size) x N_{quad_points} dF_dtheta = np.dot(dF_dtheta, gh_w)/np.sqrt(np.pi) dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1]) else: dF_dtheta = None # Not yet implemented return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta
Use Gauss-Hermite Quadrature to compute E_p(f) [ log p(y|f) ] d/dm E_p(f) [ log p(y|f) ] d/dv E_p(f) [ log p(y|f) ] where p(f) is a Gaussian with mean m and variance v. The shapes of Y, m and v should match. if no gh_points are passed, we construct them using defualt options
def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. # # Once we have matched a raw string, we check the prefix of the # line to make sure that the line is not part of a single line # comment. It's done this way because we remove raw strings # before removing comments as opposed to removing comments # before removing raw strings. This is because there are some # cpplint checks that requires the comments to be preserved, but # we don't want to check comments that are inside raw strings. matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if (matched and not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings
Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings.
def set_redis(self, redis_url, redis_timeout=10): """ Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis """ self._redis_url = redis_url self._redis_timeout = redis_timeout if isinstance(redis_timeout, int) and redis_timeout > 0 else 10 try: self._redis = redis.StrictRedis.from_url(url=redis_url) self._redis.ping() except (ConnectionError, TimeoutError): raise CartolaFCError('Erro conectando ao servidor Redis.')
Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis
def authenticated_userid(request): """Helper function that can be used in ``db_key`` to support `self` as a collection key. """ user = getattr(request, 'user', None) key = user.pk_field() return getattr(user, key)
Helper function that can be used in ``db_key`` to support `self` as a collection key.
def git(self, *arguments): """ Return (exit code, output) from git. """ process = subprocess.Popen(['git'] + list(arguments), stdout=subprocess.PIPE, cwd=self.cwd) out = process.communicate()[0].decode('UTF-8') code = process.returncode return code, out
Return (exit code, output) from git.
def create_arrow(rows, cols, radius=0.1, length=1.0, cone_radius=None, cone_length=None): """Create a 3D arrow using a cylinder plus cone Parameters ---------- rows : int Number of rows. cols : int Number of columns. radius : float Base cylinder radius. length : float Length of the arrow. cone_radius : float Radius of the cone base. If None, then this defaults to 2x the cylinder radius. cone_length : float Length of the cone. If None, then this defaults to 1/3 of the arrow length. Returns ------- arrow : MeshData Vertices and faces computed for a cone surface. """ # create the cylinder md_cyl = None if cone_radius is None: cone_radius = radius*2.0 if cone_length is None: con_L = length/3.0 cyl_L = length*2.0/3.0 else: cyl_L = max(0, length - cone_length) con_L = min(cone_length, length) if cyl_L != 0: md_cyl = create_cylinder(rows, cols, radius=[radius, radius], length=cyl_L) # create the cone md_con = create_cone(cols, radius=cone_radius, length=con_L) verts = md_con.get_vertices() nbr_verts_con = verts.size//3 faces = md_con.get_faces() if md_cyl is not None: trans = np.array([[0.0, 0.0, cyl_L]]) verts = np.vstack((verts+trans, md_cyl.get_vertices())) faces = np.vstack((faces, md_cyl.get_faces()+nbr_verts_con)) return MeshData(vertices=verts, faces=faces)
Create a 3D arrow using a cylinder plus cone Parameters ---------- rows : int Number of rows. cols : int Number of columns. radius : float Base cylinder radius. length : float Length of the arrow. cone_radius : float Radius of the cone base. If None, then this defaults to 2x the cylinder radius. cone_length : float Length of the cone. If None, then this defaults to 1/3 of the arrow length. Returns ------- arrow : MeshData Vertices and faces computed for a cone surface.
def get_version(module='spyder_notebook'): """Get version.""" with open(os.path.join(HERE, module, '_version.py'), 'r') as f: data = f.read() lines = data.split('\n') for line in lines: if line.startswith('VERSION_INFO'): version_tuple = ast.literal_eval(line.split('=')[-1].strip()) version = '.'.join(map(str, version_tuple)) break return version
Get version.
def remove(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages via emerge --unmerge. name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if ``name`` is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package name> slot=4.4 fromrepo=gentoo salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() if name and not pkgs and (slot is not None or fromrepo is not None)and len(pkg_params) == 1: fullatom = name if slot is not None: targets = ['{0}:{1}'.format(fullatom, slot)] if fromrepo is not None: targets = ['{0}::{1}'.format(fullatom, fromrepo)] targets = [fullatom] else: targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = [] if salt.utils.systemd.has_scope(__context__) \ and __salt__['config.get']('systemd.scope', True): cmd.extend(['systemd-run', '--scope']) cmd.extend(['emerge', '--ask', 'n', '--quiet', '--unmerge', '--quiet-unmerge-warn']) cmd.extend(targets) out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Remove packages via emerge --unmerge. name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if ``name`` is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package name> slot=4.4 fromrepo=gentoo salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
def register_writer(klass): """ Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter """ if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter
def get_random_label(): """ Get a random label string to use when clustering jobs. """ return ''.join(random.choice(string.ascii_uppercase + string.digits) \ for _ in range(15))
Get a random label string to use when clustering jobs.
def _reset_bbox(self): """This function should only be called internally. It resets the viewers bounding box based on changes to pan or scale. """ scale_x, scale_y = self.get_scale_xy() pan_x, pan_y = self.get_pan(coord='data')[:2] win_wd, win_ht = self.get_window_size() # NOTE: need to set at least a minimum 1-pixel dimension on # the window or we get a scale calculation exception. See github # issue 431 win_wd, win_ht = max(1, win_wd), max(1, win_ht) self._calc_bg_dimensions(scale_x, scale_y, pan_x, pan_y, win_wd, win_ht)
This function should only be called internally. It resets the viewers bounding box based on changes to pan or scale.
def set_verify_depth(self, depth): """ Set the maximum depth for the certificate chain verification that shall be allowed for this Context object. :param depth: An integer specifying the verify depth :return: None """ if not isinstance(depth, integer_types): raise TypeError("depth must be an integer") _lib.SSL_CTX_set_verify_depth(self._context, depth)
Set the maximum depth for the certificate chain verification that shall be allowed for this Context object. :param depth: An integer specifying the verify depth :return: None
def apply_noise_model(prog, noise_model): """ Apply a noise model to a program and generated a 'noisy-fied' version of the program. :param Program prog: A Quil Program object. :param NoiseModel noise_model: A NoiseModel, either generated from an ISA or from a simple decoherence model. :return: A new program translated to a noisy gateset and with noisy readout as described by the noisemodel. :rtype: Program """ new_prog = _noise_model_program_header(noise_model) for i in prog: if isinstance(i, Gate): try: _, new_name = get_noisy_gate(i.name, tuple(i.params)) new_prog += Gate(new_name, [], i.qubits) except NoisyGateUndefined: new_prog += i else: new_prog += i return new_prog
Apply a noise model to a program and generated a 'noisy-fied' version of the program. :param Program prog: A Quil Program object. :param NoiseModel noise_model: A NoiseModel, either generated from an ISA or from a simple decoherence model. :return: A new program translated to a noisy gateset and with noisy readout as described by the noisemodel. :rtype: Program
def drawing_update(self): '''update line drawing''' from MAVProxy.modules.mavproxy_map import mp_slipmap if self.draw_callback is None: return self.draw_line.append(self.click_position) if len(self.draw_line) > 1: self.mpstate.map.add_object(mp_slipmap.SlipPolygon('drawing', self.draw_line, layer='Drawing', linewidth=2, colour=(128,128,255)))
update line drawing
def register_multi_flags_validator(flag_names, multi_flags_checker, message='Flags validation failed', flag_values=FLAGS): """Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name. """ v = gflags_validators.MultiFlagsValidator( flag_names, multi_flags_checker, message) _add_validator(flag_values, v)
Adds a constraint to multiple flags. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_names: [str], a list of the flag names to be checked. multi_flags_checker: callable, a function to validate the flag. input - dictionary, with keys() being flag_names, and value for each key being the value of the corresponding flag (string, boolean, etc). output - Boolean. Must return True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise gflags.ValidationError. message: Error text to be shown to the user if checker returns False. If checker raises gflags.ValidationError, message from the raised error will be shown. flag_values: An optional FlagValues instance to validate against. Raises: AttributeError: If a flag is not registered as a valid flag name.
def profile(*args, **kwargs): """ http endpoint decorator """ if _is_initialized(): def wrapper(f): return wrapHttpEndpoint(f) return wrapper raise Exception( "before measuring anything, you need to call init_app()")
http endpoint decorator
def __intermediate_bridge(self, interface, i): """ converts NetJSON bridge to UCI intermediate data structure """ # ensure type "bridge" is only given to one logical interface if interface['type'] == 'bridge' and i < 2: bridge_members = ' '.join(interface.pop('bridge_members')) # put bridge members in ifname attribute if bridge_members: interface['ifname'] = bridge_members # if no members, this is an empty bridge else: interface['bridge_empty'] = True del interface['ifname'] # bridge has already been defined # but we need to add more references to it elif interface['type'] == 'bridge' and i >= 2: # openwrt adds "br-" prefix to bridge interfaces # we need to take this into account when referring # to these physical names if 'br-' not in interface['ifname']: interface['ifname'] = 'br-{ifname}'.format(**interface) # do not repeat bridge attributes (they have already been processed) for attr in ['type', 'bridge_members', 'stp', 'gateway']: if attr in interface: del interface[attr] elif interface['type'] != 'bridge': del interface['type'] return interface
converts NetJSON bridge to UCI intermediate data structure
def get_bank_form(self, *args, **kwargs): """Pass through to provider BankAdminSession.get_bank_form_for_update""" # Implemented from kitosid template for - # osid.resource.BinAdminSession.get_bin_form_for_update_template # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'bank_record_types' in kwargs: return self.get_bank_form_for_create(*args, **kwargs) else: return self.get_bank_form_for_update(*args, **kwargs)
Pass through to provider BankAdminSession.get_bank_form_for_update
def generate_command(dag_id, task_id, execution_date, mark_success=False, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, local=False, pickle_id=None, file_path=None, raw=False, job_id=None, pool=None, cfg_path=None ): """ Generates the shell command required to execute this task instance. :param dag_id: DAG ID :type dag_id: unicode :param task_id: Task ID :type task_id: unicode :param execution_date: Execution date for the task :type execution_date: datetime :param mark_success: Whether to mark the task as successful :type mark_success: bool :param ignore_all_deps: Ignore all ignorable dependencies. Overrides the other ignore_* parameters. :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :type ignore_depends_on_past: bool :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :type ignore_task_deps: bool :param ignore_ti_state: Ignore the task instance's previous failure/success :type ignore_ti_state: bool :param local: Whether to run the task locally :type local: bool :param pickle_id: If the DAG was serialized to the DB, the ID associated with the pickled DAG :type pickle_id: unicode :param file_path: path to the file containing the DAG definition :param raw: raw mode (needs more details) :param job_id: job ID (needs more details) :param pool: the Airflow pool that the task should run in :type pool: unicode :param cfg_path: the Path to the configuration file :type cfg_path: basestring :return: shell command that can be used to run the task instance """ iso = execution_date.isoformat() cmd = ["airflow", "run", str(dag_id), str(task_id), str(iso)] cmd.extend(["--mark_success"]) if mark_success else None cmd.extend(["--pickle", str(pickle_id)]) if pickle_id else None cmd.extend(["--job_id", str(job_id)]) if job_id else None cmd.extend(["-A"]) if ignore_all_deps else None cmd.extend(["-i"]) if ignore_task_deps else None cmd.extend(["-I"]) if ignore_depends_on_past else None cmd.extend(["--force"]) if ignore_ti_state else None cmd.extend(["--local"]) if local else None cmd.extend(["--pool", pool]) if pool else None cmd.extend(["--raw"]) if raw else None cmd.extend(["-sd", file_path]) if file_path else None cmd.extend(["--cfg_path", cfg_path]) if cfg_path else None return cmd
Generates the shell command required to execute this task instance. :param dag_id: DAG ID :type dag_id: unicode :param task_id: Task ID :type task_id: unicode :param execution_date: Execution date for the task :type execution_date: datetime :param mark_success: Whether to mark the task as successful :type mark_success: bool :param ignore_all_deps: Ignore all ignorable dependencies. Overrides the other ignore_* parameters. :type ignore_all_deps: bool :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for Backfills) :type ignore_depends_on_past: bool :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and trigger rule :type ignore_task_deps: bool :param ignore_ti_state: Ignore the task instance's previous failure/success :type ignore_ti_state: bool :param local: Whether to run the task locally :type local: bool :param pickle_id: If the DAG was serialized to the DB, the ID associated with the pickled DAG :type pickle_id: unicode :param file_path: path to the file containing the DAG definition :param raw: raw mode (needs more details) :param job_id: job ID (needs more details) :param pool: the Airflow pool that the task should run in :type pool: unicode :param cfg_path: the Path to the configuration file :type cfg_path: basestring :return: shell command that can be used to run the task instance
def read(self, src): """ Download GeoJSON file of US counties from url (S3 bucket) """ geojson = None if not self.is_valid_src(src): error = "File < {0} > does not exists or does start with 'http'." raise ValueError(error.format(src)) if not self.is_url(src): return open(src, 'r').read().decode('latin-1').encode('utf-8') tmp = self.get_location(src) # if src poits to url that was already downloaded # read from local file instead if os.path.isfile(tmp): with open(tmp, 'r') as f: return f.read() # download file and write to local filesystem before returning response = urllib2.urlopen(src) data = response.read().decode('latin-1').encode('utf-8') with open(tmp, 'w') as f: f.write(data) return data
Download GeoJSON file of US counties from url (S3 bucket)
def update_user_ns(self, result): """Update user_ns with various things like _, __, _1, etc.""" # Avoid recursive reference when displaying _oh/Out if result is not self.shell.user_ns['_oh']: if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache: warn('Output cache limit (currently '+ `self.cache_size`+' entries) hit.\n' 'Flushing cache and resetting history counter...\n' 'The only history variables available will be _,__,___ and _1\n' 'with the current result.') self.flush() # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise # we cause buggy behavior for things like gettext). if '_' not in __builtin__.__dict__: self.___ = self.__ self.__ = self._ self._ = result self.shell.push({'_':self._, '__':self.__, '___':self.___}, interactive=False) # hackish access to top-level namespace to create _1,_2... dynamically to_main = {} if self.do_full_cache: new_result = '_'+`self.prompt_count` to_main[new_result] = result self.shell.push(to_main, interactive=False) self.shell.user_ns['_oh'][self.prompt_count] = result
Update user_ns with various things like _, __, _1, etc.
def CheckCronJobAccess(self, username, cron_job_id): """Checks whether a given user can access given cron job.""" self._CheckAccess( username, str(cron_job_id), rdf_objects.ApprovalRequest.ApprovalType.APPROVAL_TYPE_CRON_JOB)
Checks whether a given user can access given cron job.
def x_11paths_authorization(app_id, secret, context, utc=None): """ Calculate the authentication headers to be sent with a request to the API. :param app_id: :param secret: :param context :param utc: :return: array a map with the Authorization and Date headers needed to sign a Latch API request """ utc = utc or context.headers[X_11PATHS_DATE_HEADER_NAME] url_path = ensure_url_path_starts_with_slash(context.url_path) url_path_query = url_path if context.query_params: url_path_query += "?%s" % (url_encode(context.query_params, sort=True)) string_to_sign = (context.method.upper().strip() + "\n" + utc + "\n" + _get_11paths_serialized_headers(context.headers) + "\n" + url_path_query.strip()) if context.body_params and isinstance(context.renderer, FormRenderer): string_to_sign = string_to_sign + "\n" + url_encode(context.body_params, sort=True).replace("&", "") authorization_header_value = (AUTHORIZATION_METHOD + AUTHORIZATION_HEADER_FIELD_SEPARATOR + app_id + AUTHORIZATION_HEADER_FIELD_SEPARATOR + _sign_data(secret, string_to_sign)) return authorization_header_value
Calculate the authentication headers to be sent with a request to the API. :param app_id: :param secret: :param context :param utc: :return: array a map with the Authorization and Date headers needed to sign a Latch API request
def locked_context(self, key=None, default=dict): """ Executor context is a shared memory object. All workers share this. It needs a lock. Its used like this: with executor.context() as context: visited = context['visited'] visited.append(state.cpu.PC) context['visited'] = visited """ assert default in (list, dict, set) with self._lock: if key is None: yield self._shared_context else: sub_context = self._shared_context.get(key, None) if sub_context is None: sub_context = default() yield sub_context self._shared_context[key] = sub_context
Executor context is a shared memory object. All workers share this. It needs a lock. Its used like this: with executor.context() as context: visited = context['visited'] visited.append(state.cpu.PC) context['visited'] = visited
def download_and_calibrate_parallel(list_of_ids, n=None): """Download and calibrate in parallel. Parameters ---------- list_of_ids : list, optional container with img_ids to process n : int Number of cores for the parallel processing. Default: n_cores_system//2 """ setup_cluster(n_cores=n) c = Client() lbview = c.load_balanced_view() lbview.map_async(download_and_calibrate, list_of_ids) subprocess.Popen(["ipcluster", "stop", "--quiet"])
Download and calibrate in parallel. Parameters ---------- list_of_ids : list, optional container with img_ids to process n : int Number of cores for the parallel processing. Default: n_cores_system//2
def _tp_relfq_name(tp, tp_name=None, assumed_globals=None, update_assumed_globals=None, implicit_globals=None): # _type: (type, Optional[Union[Set[Union[type, types.ModuleType]], Mapping[Union[type, types.ModuleType], str]]], Optional[bool]) -> str """Provides the fully qualified name of a type relative to a set of modules and types that is assumed as globally available. If assumed_globals is None this always returns the fully qualified name. If update_assumed_globals is True, this will return the plain type name, but will add the type to assumed_globals (expected to be a set). This way a caller can query how to generate an appropriate import section. If update_assumed_globals is False, assumed_globals can alternatively be a mapping rather than a set. In that case the mapping is expected to be an alias table, mapping modules or types to their alias names desired for displaying. update_assumed_globals can be None (default). In that case this will return the plain type name if assumed_globals is None as well (default). This mode is there to have a less involved default behavior. """ if tp_name is None: tp_name = util.get_class_qualname(tp) if implicit_globals is None: implicit_globals = _implicit_globals else: implicit_globals = implicit_globals.copy() implicit_globals.update(_implicit_globals) if assumed_globals is None: if update_assumed_globals is None: return tp_name md = sys.modules[tp.__module__] if md in implicit_globals: return tp_name name = tp.__module__+'.'+tp_name pck = None if not (md.__package__ is None or md.__package__ == '' or name.startswith(md.__package__)): pck = md.__package__ return name if pck is None else pck+'.'+name if tp in assumed_globals: try: return assumed_globals[tp] except: return tp_name elif hasattr(tp, '__origin__') and tp.__origin__ in assumed_globals: try: return assumed_globals[tp.__origin__] except: return tp_name # For some reason Callable does not have __origin__, so we special-case # it here. Todo: Find a cleaner solution. elif is_Callable(tp) and typing.Callable in assumed_globals: try: return assumed_globals[typing.Callable] except: return tp_name elif update_assumed_globals == True: if not assumed_globals is None: if hasattr(tp, '__origin__') and not tp.__origin__ is None: toadd = tp.__origin__ elif is_Callable(tp): toadd = typing.Callable else: toadd = tp if not sys.modules[toadd.__module__] in implicit_globals: assumed_globals.add(toadd) return tp_name else: md = sys.modules[tp.__module__] if md in implicit_globals: return tp_name md_name = tp.__module__ if md in assumed_globals: try: md_name = assumed_globals[md] except: pass else: if not (md.__package__ is None or md.__package__ == '' or md_name.startswith(md.__package__)): md_name = md.__package__+'.'+tp.__module__ return md_name+'.'+tp_name
Provides the fully qualified name of a type relative to a set of modules and types that is assumed as globally available. If assumed_globals is None this always returns the fully qualified name. If update_assumed_globals is True, this will return the plain type name, but will add the type to assumed_globals (expected to be a set). This way a caller can query how to generate an appropriate import section. If update_assumed_globals is False, assumed_globals can alternatively be a mapping rather than a set. In that case the mapping is expected to be an alias table, mapping modules or types to their alias names desired for displaying. update_assumed_globals can be None (default). In that case this will return the plain type name if assumed_globals is None as well (default). This mode is there to have a less involved default behavior.
def extract_operations(self, migrations): """ Extract SQL operations from the given migrations """ operations = [] for migration in migrations: for operation in migration.operations: if isinstance(operation, RunSQL): statements = sqlparse.parse(dedent(operation.sql)) for statement in statements: operation = SqlObjectOperation.parse(statement) if operation: operations.append(operation) if self.verbosity >= 2: self.stdout.write(" > % -100s (%s)" % (operation, migration)) return operations
Extract SQL operations from the given migrations
def geo_field(queryset): """Returns the GeometryField for a django or spillway GeoQuerySet.""" for field in queryset.model._meta.fields: if isinstance(field, models.GeometryField): return field raise exceptions.FieldDoesNotExist('No GeometryField found')
Returns the GeometryField for a django or spillway GeoQuerySet.
def transform(self, flip_x, flip_y, swap_xy): """Transform view of the image. .. note:: Transforming the image is generally faster than rotating, if rotating in 90 degree increments. Also see :meth:`rotate`. Parameters ---------- flipx, flipy : bool If `True`, flip the image in the X and Y axes, respectively swapxy : bool If `True`, swap the X and Y axes. """ self.logger.debug("flip_x=%s flip_y=%s swap_xy=%s" % ( flip_x, flip_y, swap_xy)) with self.suppress_redraw: self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
Transform view of the image. .. note:: Transforming the image is generally faster than rotating, if rotating in 90 degree increments. Also see :meth:`rotate`. Parameters ---------- flipx, flipy : bool If `True`, flip the image in the X and Y axes, respectively swapxy : bool If `True`, swap the X and Y axes.
def resize_old(self, block_size, order=0, mode='constant', cval=False): ''' geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True) Returns resized georaster ''' if not cval: cval = np.nan if (self.raster.dtype.name.find('float') != -1 and np.max(np.abs([self.max(), self.min()])) > 1): raster2 = (self.raster-self.min())/(self.max()-self.min()) else: raster2 = self.raster.copy() raster2 = raster2.astype(float) raster2[self.raster.mask] = np.nan raster2 = resize(raster2, block_size, order=order, mode=mode, cval=cval) raster2 = np.ma.masked_array(raster2, mask=np.isnan(raster2), fill_value=self.raster.fill_value) raster2 = raster2*(self.max()-self.min())+self.min() raster2[raster2.mask] = self.nodata_value raster2.mask = np.logical_or(np.isnan(raster2.data), raster2.data == self.nodata_value) geot = list(self.geot) [geot[-1], geot[1]] = np.array([geot[-1], geot[1]])*self.shape/block_size return GeoRaster(raster2, tuple(geot), nodata_value=self.nodata_value,\ projection=self.projection, datatype=self.datatype)
geo.resize(new_shape, order=0, mode='constant', cval=np.nan, preserve_range=True) Returns resized georaster
def get_listed_projects(): """Find the projects listed in the Home Documentation's index.md file Returns: set(str): projects' names, with the '/' in their beginings """ index_path = Path().resolve() / "docs" / "index.md" with open(index_path, "r") as index_file: lines = index_file.readlines() listed_projects = set() project_section = False for _, l in enumerate(lines): idx = l.find(PROJECT_KEY) if idx >= 0: project_section = True if project_section: # Find first parenthesis after the key start = l.find("](") if start > 0: closing_parenthesis = sorted( [m.start() for m in re.finditer(r"\)", l) if m.start() > start] )[0] project = l[start + 2 : closing_parenthesis] listed_projects.add(project) # If the Projects section is over, stop iteration. # It will stop before seeing ## but wainting for it # Allows the user to use single # in the projects' descriptions if len(listed_projects) > 0 and l.startswith("#"): return listed_projects return listed_projects
Find the projects listed in the Home Documentation's index.md file Returns: set(str): projects' names, with the '/' in their beginings
def fetch_wallet_balances(wallets, fiat, **modes): """ Wallets must be list of two item lists. First item is crypto, second item is the address. example: [ ['btc', '1PZ3Ps9RvCmUW1s1rHE25FeR8vtKUrhEai'], ['ltc', 'Lb78JDGxMcih1gs3AirMeRW6jaG5V9hwFZ'] ] """ price_fetch = set([x[0] for x in wallets]) balances = {} prices = {} fetch_length = len(wallets) + len(price_fetch) helpers = {fiat.lower(): {}} if not modes.get('async', False): # synchronous fetching for crypto in price_fetch: try: p = get_current_price( crypto, fiat, helper_prices=helpers, report_services=True, **modes ) prices[crypto] = {'price': p} if crypto in ['btc', 'ltc', 'doge', 'uno']: helpers[fiat.lower()][crypto] = p except NoService as exc: prices[crypto] = {'error': str(exc)} for crypto, address in wallets: if address.replace('.', '').isdigit(): balances[address] = {'balance': float(address)} continue try: balances[address] = {'balance': get_address_balance(crypto, address.strip(), **modes)} except NoService as exc: balances[address] = {'error': str(exc)} else: # asynchronous fetching if modes.get('verbose', False): print("Need to make", fetch_length, "external calls") with futures.ThreadPoolExecutor(max_workers=int(fetch_length / 2)) as executor: future_to_key = dict( (executor.submit( get_current_price, crypto, fiat, report_services=True, **modes ), crypto) for crypto in price_fetch ) future_to_key.update(dict( (executor.submit( get_address_balance, crypto, address.strip(), **modes ), address) for crypto, address in wallets )) done, not_done = futures.wait(future_to_key, return_when=futures.ALL_COMPLETED) if len(not_done) > 0: print (not_done) import debug raise Exception("Broke") #not_done.pop().exception() for future in done: key = future_to_key[future] if len(key) > 5: # this will break if a crypto symbol is longer than 5 chars. which = balances else: which = prices res = future.result() which[key] = res ret = [] for crypto, address in wallets: error = None if 'balance' in balances[address]: crypto_value = balances[address]['balance'] else: crypto_value = 0 error = balances[address]['error'] if 'price' in prices[crypto]: sources, fiat_price = prices[crypto]['price'] else: sources, fiat_price = [], 0 error = prices[crypto]['error'] ret.append({ 'crypto': crypto, 'address': address, 'crypto_value': crypto_value, 'fiat_value': (crypto_value or 0) * (fiat_price or 0), 'conversion_price': fiat_price, 'price_source': sources[0].name if sources else "None", 'error': error }) return ret
Wallets must be list of two item lists. First item is crypto, second item is the address. example: [ ['btc', '1PZ3Ps9RvCmUW1s1rHE25FeR8vtKUrhEai'], ['ltc', 'Lb78JDGxMcih1gs3AirMeRW6jaG5V9hwFZ'] ]
def real_ip(self): """ The actual public IP of this host. """ if self._real_ip is None: response = get(ICANHAZIP) self._real_ip = self._get_response_text(response) return self._real_ip
The actual public IP of this host.
def get_supported_currency_choices(api_key): """ Pull a stripe account's supported currencies and returns a choices tuple of those supported currencies. :param api_key: The api key associated with the account from which to pull data. :type api_key: str """ import stripe stripe.api_key = api_key account = stripe.Account.retrieve() supported_payment_currencies = stripe.CountrySpec.retrieve(account["country"])[ "supported_payment_currencies" ] return [(currency, currency.upper()) for currency in supported_payment_currencies]
Pull a stripe account's supported currencies and returns a choices tuple of those supported currencies. :param api_key: The api key associated with the account from which to pull data. :type api_key: str
def add_fields( layer, absolute_values, static_fields, dynamic_structure): """Function to add fields needed in the output layer. :param layer: The vector layer. :type layer: QgsVectorLayer :param absolute_values: The absolute value structure. :type absolute_values: dict :param static_fields: The list of static fields to add. :type static_fields: list :param dynamic_structure: The list of dynamic fields to add to the layer. The list must be structured like this: dynamic_structure = [ [exposure_count_field, unique_exposure] ] where "exposure_count_field" is the dynamic to field to add and "unique_exposure" is the list of unique values to associate with this dynamic field. Because dynamic_structure is a ordered list, you can add many dynamic fields. :type dynamic_structure: list """ for new_dynamic_field in dynamic_structure: field_definition = new_dynamic_field[0] unique_values = new_dynamic_field[1] for column in unique_values: if (column == '' or (hasattr(column, 'isNull') and column.isNull())): column = 'NULL' field = create_field_from_definition(field_definition, column) layer.addAttribute(field) key = field_definition['key'] % column value = field_definition['field_name'] % column layer.keywords['inasafe_fields'][key] = value for static_field in static_fields: field = create_field_from_definition(static_field) layer.addAttribute(field) # noinspection PyTypeChecker layer.keywords['inasafe_fields'][static_field['key']] = ( static_field['field_name']) # For each absolute values for absolute_field in list(absolute_values.keys()): field_definition = definition(absolute_values[absolute_field][1]) field = create_field_from_definition(field_definition) layer.addAttribute(field) key = field_definition['key'] value = field_definition['field_name'] layer.keywords['inasafe_fields'][key] = value
Function to add fields needed in the output layer. :param layer: The vector layer. :type layer: QgsVectorLayer :param absolute_values: The absolute value structure. :type absolute_values: dict :param static_fields: The list of static fields to add. :type static_fields: list :param dynamic_structure: The list of dynamic fields to add to the layer. The list must be structured like this: dynamic_structure = [ [exposure_count_field, unique_exposure] ] where "exposure_count_field" is the dynamic to field to add and "unique_exposure" is the list of unique values to associate with this dynamic field. Because dynamic_structure is a ordered list, you can add many dynamic fields. :type dynamic_structure: list
def __setWildcardSymbol(self, value): """self.__wildcardSymbol variable setter""" errors = [] if not value is str and not value.split(): errors.append('wildcardSymbol_ERROR : Symbol : must be char or string!') else: self.__wildcardSymbol = value if errors: view.Tli.showErrors('SymbolError', errors)
self.__wildcardSymbol variable setter
def ignore_missing_email_protection_eku_cb(ok, ctx): """ For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify(). The latter requires that ExtendedKeyUsage extension, if present, contains 'emailProtection' OID. (Is it because S/MIME is/was the primary use case for PKCS7?) We do not want to fail the verification in this case. At present, M2Crypto lacks possibility of removing or modifying an existing extension. Let's assign a custom verification callback. """ # The error we want to ignore is indicated by X509_V_ERR_INVALID_PURPOSE. err = ctx.get_error() if err != m2.X509_V_ERR_INVALID_PURPOSE: return ok # PKCS7_verify() has this requriement only for the signing certificate. # Do not modify the behavior for certificates upper in the chain. if ctx.get_error_depth() > 0: return ok # There is another cause of ERR_INVALID_PURPOSE: incompatible keyUsage. # Do not modify the default behavior in this case. cert = ctx.get_current_cert() try: key_usage = cert.get_ext('keyUsage').get_value() if 'digitalSignature' not in key_usage \ and 'nonRepudiation' not in key_usage: return ok except LookupError: pass # Here, keyUsage is either absent, or contains the needed bit(s). # So ERR_INVALID_PURPOSE is caused by EKU not containing 'emailProtection'. # Ignore this error. return 1
For verifying PKCS7 signature, m2Crypto uses OpenSSL's PKCS7_verify(). The latter requires that ExtendedKeyUsage extension, if present, contains 'emailProtection' OID. (Is it because S/MIME is/was the primary use case for PKCS7?) We do not want to fail the verification in this case. At present, M2Crypto lacks possibility of removing or modifying an existing extension. Let's assign a custom verification callback.
def interactive(): """Interactive classifier.""" global n if request.method == 'GET' and request.args.get('heartbeat', '') != "": return request.args.get('heartbeat', '') if request.method == 'POST': logging.warning('POST to /interactive is deprecated. ' 'Use /worker instead') else: # Page where the user can enter a recording return render_template('canvas.html')
Interactive classifier.
def can_edit(self, user=None, request=None): """ Define if a user can edit or not the instance, according to his account or the request. """ can = False if request and not self.owner: if (getattr(settings, "LEAFLET_STORAGE_ALLOW_ANONYMOUS", False) and self.is_anonymous_owner(request)): can = True if user and user.is_authenticated(): # if user is authenticated, attach as owner self.owner = user self.save() msg = _("Your anonymous map has been attached to your account %s" % user) messages.info(request, msg) if self.edit_status == self.ANONYMOUS: can = True elif not user.is_authenticated(): pass elif user == self.owner: can = True elif self.edit_status == self.EDITORS and user in self.editors.all(): can = True return can
Define if a user can edit or not the instance, according to his account or the request.
def cache_cluster_exists(name, conn=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. Example: .. code-block:: bash salt myminion boto3_elasticache.cache_cluster_exists myelasticache ''' return bool(describe_cache_clusters(name=name, conn=conn, region=region, key=key, keyid=keyid, profile=profile))
Check to see if a cache cluster exists. Example: .. code-block:: bash salt myminion boto3_elasticache.cache_cluster_exists myelasticache
def tidy_all_the_variables(host, inventory_mgr): ''' removes all overridden and inherited variables from hosts and groups ''' global _vars _vars = dict() _vars[host] = inventory_mgr.inventory.get_host_vars(host) for group in host.get_groups(): remove_inherited_and_overridden_vars(_vars[host], group, inventory_mgr) remove_inherited_and_overridden_group_vars(group, inventory_mgr) return _vars
removes all overridden and inherited variables from hosts and groups
def put_comments(self, resource, comment, timeout=None): """ Post a comment on a file or URL. The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs, the comments may be malware analyses, false positive flags, disinfection instructions, etc. Imagine you have some automatic setup that can produce interesting results related to a given sample or URL that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup by automatically reviewing samples and URLs with the output of your automation. :param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want to comment on. :param comment: the actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot) and reference users using the "@" syntax (e.g. @VirusTotalTeam). :param timeout: The amount of time in seconds the request should wait before timing out. :return: If the comment was successfully posted the response code will be 1, 0 otherwise. """ params = {'apikey': self.api_key, 'resource': resource, 'comment': comment} try: response = requests.post(self.base + 'comments/put', params=params, proxies=self.proxies, timeout=timeout) except requests.RequestException as e: return dict(error=str(e)) return _return_response_and_status_code(response)
Post a comment on a file or URL. The initial idea of VirusTotal Community was that users should be able to make comments on files and URLs, the comments may be malware analyses, false positive flags, disinfection instructions, etc. Imagine you have some automatic setup that can produce interesting results related to a given sample or URL that you submit to VirusTotal for antivirus characterization, you might want to give visibility to your setup by automatically reviewing samples and URLs with the output of your automation. :param resource: either a md5/sha1/sha256 hash of the file you want to review or the URL itself that you want to comment on. :param comment: the actual review, you can tag it using the "#" twitter-like syntax (e.g. #disinfection #zbot) and reference users using the "@" syntax (e.g. @VirusTotalTeam). :param timeout: The amount of time in seconds the request should wait before timing out. :return: If the comment was successfully posted the response code will be 1, 0 otherwise.
def getdrawings(): """Get all the drawings.""" infos = Info.query.all() sketches = [json.loads(info.contents) for info in infos] return jsonify(drawings=sketches)
Get all the drawings.
def to_python(self, value: Union[Dict[str, int], int, None]) -> LocalizedIntegerValue: """Converts the value from a database value into a Python value.""" db_value = super().to_python(value) return self._convert_localized_value(db_value)
Converts the value from a database value into a Python value.
def path(self, category = None, image = None, feature = None): """ Constructs the path to categories, images and features. This path function assumes that the following storage scheme is used on the hard disk to access categories, images and features: - categories: /impath/category - images: /impath/category/category_image.png - features: /ftrpath/category/feature/category_image.mat The path function is called to query the location of categories, images and features before they are loaded. Thus, if your features are organized in a different way, you can simply replace this method such that it returns appropriate paths' and the LoadFromDisk loader will use your naming scheme. """ filename = None if not category is None: filename = join(self.impath, str(category)) if not image is None: assert not category is None, "The category has to be given if the image is given" filename = join(filename, '%s_%s.png' % (str(category), str(image))) if not feature is None: assert category != None and image != None, "If a feature name is given the category and image also have to be given." filename = join(self.ftrpath, str(category), feature, '%s_%s.mat' % (str(category), str(image))) return filename
Constructs the path to categories, images and features. This path function assumes that the following storage scheme is used on the hard disk to access categories, images and features: - categories: /impath/category - images: /impath/category/category_image.png - features: /ftrpath/category/feature/category_image.mat The path function is called to query the location of categories, images and features before they are loaded. Thus, if your features are organized in a different way, you can simply replace this method such that it returns appropriate paths' and the LoadFromDisk loader will use your naming scheme.
def run(command, parser, cl_args, unknown_args): """ run command """ location = cl_args['cluster/[role]/[env]'].split('/') if len(location) == 1: return show_cluster(cl_args, *location) elif len(location) == 2: return show_cluster_role(cl_args, *location) elif len(location) == 3: return show_cluster_role_env(cl_args, *location) else: Log.error('Invalid topologies selection') return False
run command
def Dependencies(lTOC, xtrapath=None, manifest=None): """ Expand LTOC to include all the closure of binary dependencies. LTOC is a logical table of contents, ie, a seq of tuples (name, path). Return LTOC expanded by all the binary dependencies of the entries in LTOC, except those listed in the module global EXCLUDES manifest should be a winmanifest.Manifest instance on Windows, so that all dependent assemblies can be added """ for nm, pth, typ in lTOC: if seen.get(nm.upper(), 0): continue logger.info("Analyzing %s", pth) seen[nm.upper()] = 1 if is_win: for ftocnm, fn in selectAssemblies(pth, manifest): lTOC.append((ftocnm, fn, 'BINARY')) for lib, npth in selectImports(pth, xtrapath): if seen.get(lib.upper(), 0) or seen.get(npth.upper(), 0): continue seen[npth.upper()] = 1 lTOC.append((lib, npth, 'BINARY')) return lTOC
Expand LTOC to include all the closure of binary dependencies. LTOC is a logical table of contents, ie, a seq of tuples (name, path). Return LTOC expanded by all the binary dependencies of the entries in LTOC, except those listed in the module global EXCLUDES manifest should be a winmanifest.Manifest instance on Windows, so that all dependent assemblies can be added
def bench_serpy(): """Beanchmark for 1000 objects with 2 fields. """ class FooSerializer(serpy.DictSerializer): """The serializer schema definition.""" # Use a Field subclass like IntField if you need more validation. attr_2 = serpy.IntField() attr_1 = serpy.StrField() return [FooSerializer(obj).data for obj in object_loader()]
Beanchmark for 1000 objects with 2 fields.
def to_float_with_default(value, default_value): """ Converts value into float or returns default when conversion is not possible. :param value: the value to convert. :param default_value: the default value. :return: float value or default value when conversion is not supported. """ result = FloatConverter.to_nullable_float(value) return result if result != None else default_value
Converts value into float or returns default when conversion is not possible. :param value: the value to convert. :param default_value: the default value. :return: float value or default value when conversion is not supported.
def send_stats(self, start, environ, response_interception, exception=None): """Send the actual timing stats. :param start: start time in seconds since the epoch as a floating point number :type start: float :param environ: wsgi environment :type environ: dict :param response_interception: dictionary in form {'status': '<response status>', 'response_headers': [<response headers], 'exc_info': <exc_info>} This is the interception of what was passed to start_response handler. :type response_interception: dict :param exception: optional exception happened during the iteration of the response :type exception: Exception """ # It could happen that start_response wasn't called or it failed, so we might have an empty interception if response_interception: # Create the timer object and send the data to statsd. key_name = self.get_key_name(environ, response_interception, exception=exception) timer = self.statsd_client.timer(key_name) timer._start_time = start timer.stop()
Send the actual timing stats. :param start: start time in seconds since the epoch as a floating point number :type start: float :param environ: wsgi environment :type environ: dict :param response_interception: dictionary in form {'status': '<response status>', 'response_headers': [<response headers], 'exc_info': <exc_info>} This is the interception of what was passed to start_response handler. :type response_interception: dict :param exception: optional exception happened during the iteration of the response :type exception: Exception
def read(self): """Read a wire format DNS message and build a dns.message.Message object.""" l = len(self.wire) if l < 12: raise ShortHeader (self.message.id, self.message.flags, qcount, ancount, aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12]) self.current = 12 if dns.opcode.is_update(self.message.flags): self.updating = True self._get_question(qcount) if self.question_only: return self._get_section(self.message.answer, ancount) self._get_section(self.message.authority, aucount) self._get_section(self.message.additional, adcount) if self.current != l: raise TrailingJunk if self.message.multi and self.message.tsig_ctx and \ not self.message.had_tsig: self.message.tsig_ctx.update(self.wire)
Read a wire format DNS message and build a dns.message.Message object.