code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def camera_list(self, **kwargs): """Return a list of cameras.""" api = self._api_info['camera'] payload = dict({ '_sid': self._sid, 'api': api['name'], 'method': 'List', 'version': api['version'], }, **kwargs) response = self._get_json_with_retry(api['url'], payload) cameras = [] for data in response['data']['cameras']: cameras.append(Camera(data, self._video_stream_url)) return cameras
Return a list of cameras.
def video_set_callbacks(self, lock, unlock, display, opaque): '''Set callbacks and private data to render decoded video to a custom area in memory. Use L{video_set_format}() or L{video_set_format_callbacks}() to configure the decoded format. @param lock: callback to lock video memory (must not be NULL). @param unlock: callback to unlock video memory (or NULL if not needed). @param display: callback to display video (or NULL if not needed). @param opaque: private pointer for the three callbacks (as first parameter). @version: LibVLC 1.1.1 or later. ''' return libvlc_video_set_callbacks(self, lock, unlock, display, opaque)
Set callbacks and private data to render decoded video to a custom area in memory. Use L{video_set_format}() or L{video_set_format_callbacks}() to configure the decoded format. @param lock: callback to lock video memory (must not be NULL). @param unlock: callback to unlock video memory (or NULL if not needed). @param display: callback to display video (or NULL if not needed). @param opaque: private pointer for the three callbacks (as first parameter). @version: LibVLC 1.1.1 or later.
def main(): """ NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted sites, samples, specimens, or measurements OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic, default='sites.txt' supported types=[measurements, specimens, samples, sites] -fsp FILE: specify specimen file name, (required if you want to plot measurements by sample) default='specimens.txt' -fsa FILE: specify sample file name, (required if you want to plot specimens by site) default='samples.txt' -fsi FILE: specify site file name, default='sites.txt' -flo FILE: specify location file name, default='locations.txt' -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic, unspecified assumed geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour -cm CM use color map CM [default is coolwarm] -sav save plot and quit quietly -no-tilt data are unoriented, allows plotting of measurement dec/inc NOTE all: entire file; sit: site; sam: sample; spc: specimen """ # extract arguments from sys.argv if '-h' in sys.argv: print(main.__doc__) sys.exit() dir_path = pmag.get_named_arg("-WD", default_val=".") input_dir_path = pmag.get_named_arg('-ID', '') if not input_dir_path: input_dir_path = dir_path in_file = pmag.get_named_arg("-f", default_val="sites.txt") in_file = pmag.resolve_file_name(in_file, input_dir_path) if "-ID" not in sys.argv: input_dir_path = os.path.split(in_file)[0] plot_by = pmag.get_named_arg("-obj", default_val="all").lower() spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt") samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt") site_file = pmag.get_named_arg("-fsi", default_val="sites.txt") loc_file = pmag.get_named_arg("-flo", default_val="locations.txt") ignore_tilt = False if '-no-tilt' in sys.argv: ignore_tilt = True color_map = "coolwarm" if '-c' in sys.argv: contour = True if '-cm' in sys.argv: ind = sys.argv.index('-cm') color_map = sys.argv[ind+1] else: color_map = 'coolwarm' else: contour = False interactive = True save_plots = False if '-sav' in sys.argv: save_plots = True interactive = False plot_ell = False if '-ell' in sys.argv: plot_ell = pmag.get_named_arg("-ell", "F") crd = pmag.get_named_arg("-crd", default_val="g") fmt = pmag.get_named_arg("-fmt", "svg") ipmag.eqarea_magic(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file, plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map, plot_ell, "all", interactive)
NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted sites, samples, specimens, or measurements OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic, default='sites.txt' supported types=[measurements, specimens, samples, sites] -fsp FILE: specify specimen file name, (required if you want to plot measurements by sample) default='specimens.txt' -fsa FILE: specify sample file name, (required if you want to plot specimens by site) default='samples.txt' -fsi FILE: specify site file name, default='sites.txt' -flo FILE: specify location file name, default='locations.txt' -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic, unspecified assumed geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour -cm CM use color map CM [default is coolwarm] -sav save plot and quit quietly -no-tilt data are unoriented, allows plotting of measurement dec/inc NOTE all: entire file; sit: site; sam: sample; spc: specimen
def _set_query_data_fast_1(self, page): """ set less expensive action=query response data PART 1 """ self.data['pageid'] = page.get('pageid') assessments = page.get('pageassessments') if assessments: self.data['assessments'] = assessments extract = page.get('extract') if extract: self.data['extract'] = extract extext = html2text.html2text(extract) if extext: self.data['extext'] = extext.strip() fullurl = page.get('fullurl') if fullurl: self.data['url'] = fullurl self.data['url_raw'] = fullurl + '?action=raw' length = page.get('length') if length: self.data['length'] = length self._extend_data('links', utils.get_links(page.get('links'))) self._update_data('modified', 'page', page.get('touched')) pageprops = page.get('pageprops') if pageprops: wikibase = pageprops.get('wikibase_item') if wikibase: self.data['wikibase'] = wikibase self.data['wikidata_url'] = utils.wikidata_url(wikibase) if 'disambiguation' in pageprops: self.data['disambiguation'] = len(self.data['links'])
set less expensive action=query response data PART 1
def _merge_points(self, function_address): """ Return the ordered merge points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list """ # we are entering a new function. now it's time to figure out how to optimally traverse the control flow # graph by generating the sorted merge points try: new_function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_merge_points: ordered_merge_points = CFGUtils.find_merge_points(function_address, new_function.endpoints, new_function.graph) self._function_merge_points[function_address] = ordered_merge_points return self._function_merge_points[function_address]
Return the ordered merge points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list
def from_df(cls, df_long, df_short): """ Builds TripleOrbitPopulation from DataFrame ``DataFrame`` objects must be of appropriate form to pass to :func:`OrbitPopulation.from_df`. :param df_long, df_short: :class:`pandas.DataFrame` objects to pass to :func:`OrbitPopulation.from_df`. """ pop = cls(1,1,1,1,1) #dummy population pop.orbpop_long = OrbitPopulation.from_df(df_long) pop.orbpop_short = OrbitPopulation.from_df(df_short) return pop
Builds TripleOrbitPopulation from DataFrame ``DataFrame`` objects must be of appropriate form to pass to :func:`OrbitPopulation.from_df`. :param df_long, df_short: :class:`pandas.DataFrame` objects to pass to :func:`OrbitPopulation.from_df`.
def str_presenter(dmpr, data): """Return correct str_presenter to write multiple lines to a yaml field. Source: http://stackoverflow.com/a/33300001 """ if is_multiline(data): return dmpr.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dmpr.represent_scalar('tag:yaml.org,2002:str', data)
Return correct str_presenter to write multiple lines to a yaml field. Source: http://stackoverflow.com/a/33300001
def get_tag(self, el): """Get tag.""" name = self.get_tag_name(el) return util.lower(name) if name is not None and not self.is_xml else name
Get tag.
def register(klass): """Registers a new optimizer. Once an optimizer is registered, we can create an instance of this optimizer with `create_optimizer` later. Examples -------- >>> @mx.optimizer.Optimizer.register ... class MyOptimizer(mx.optimizer.Optimizer): ... pass >>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer') >>> print(type(optim)) <class '__main__.MyOptimizer'> """ assert(isinstance(klass, type)) name = klass.__name__.lower() if name in Optimizer.opt_registry: warnings.warn('WARNING: New optimizer %s.%s is overriding ' 'existing optimizer %s.%s' % (klass.__module__, klass.__name__, Optimizer.opt_registry[name].__module__, Optimizer.opt_registry[name].__name__)) Optimizer.opt_registry[name] = klass return klass
Registers a new optimizer. Once an optimizer is registered, we can create an instance of this optimizer with `create_optimizer` later. Examples -------- >>> @mx.optimizer.Optimizer.register ... class MyOptimizer(mx.optimizer.Optimizer): ... pass >>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer') >>> print(type(optim)) <class '__main__.MyOptimizer'>
def status(self): """Gets the status of the job by querying the Python's future Returns: qiskit.providers.JobStatus: The current JobStatus Raises: JobError: If the future is in unexpected state concurrent.futures.TimeoutError: if timeout occurred. """ # The order is important here if self._future.running(): _status = JobStatus.RUNNING elif self._future.cancelled(): _status = JobStatus.CANCELLED elif self._future.done(): _status = JobStatus.DONE if self._future.exception() is None else JobStatus.ERROR else: # Note: There is an undocumented Future state: PENDING, that seems to show up when # the job is enqueued, waiting for someone to pick it up. We need to deal with this # state but there's no public API for it, so we are assuming that if the job is not # in any of the previous states, is PENDING, ergo INITIALIZING for us. _status = JobStatus.INITIALIZING return _status
Gets the status of the job by querying the Python's future Returns: qiskit.providers.JobStatus: The current JobStatus Raises: JobError: If the future is in unexpected state concurrent.futures.TimeoutError: if timeout occurred.
def GetStream(data=None): """ Get a MemoryStream instance. Args: data (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: MemoryStream: instance. """ if len(__mstreams_available__) == 0: if data: mstream = MemoryStream(data) mstream.seek(0) else: mstream = MemoryStream() __mstreams__.append(mstream) return mstream mstream = __mstreams_available__.pop() if data is not None and len(data): mstream.Cleanup() mstream.write(data) mstream.seek(0) return mstream
Get a MemoryStream instance. Args: data (bytes, bytearray, BytesIO): (Optional) data to create the stream from. Returns: MemoryStream: instance.
def parse_yaml(self, y): '''Parse a YAML speficication of a message sending object into this object. ''' self._targets = [] if 'targets' in y: for t in y['targets']: if 'waitTime' in t['condition']: new_target = WaitTime() elif 'preceding' in t['condition']: new_target = Preceding() else: new_target = Condition() new_target.parse_yaml(t) self._targets.append(new_target) return self
Parse a YAML speficication of a message sending object into this object.
def get_dataset(self, dataset_key): """Retrieve an existing dataset definition This method retrieves metadata about an existing :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :returns: Dataset definition, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> intro_dataset = api_client.get_dataset( ... 'jonloyens/an-intro-to-dataworld-dataset') # doctest: +SKIP >>> intro_dataset['title'] # doctest: +SKIP 'An Intro to data.world Dataset' """ try: return self._datasets_api.get_dataset( *(parse_dataset_key(dataset_key))).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Retrieve an existing dataset definition This method retrieves metadata about an existing :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :returns: Dataset definition, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> intro_dataset = api_client.get_dataset( ... 'jonloyens/an-intro-to-dataworld-dataset') # doctest: +SKIP >>> intro_dataset['title'] # doctest: +SKIP 'An Intro to data.world Dataset'
def room(model, solution=None, linear=False, delta=0.03, epsilon=1E-03): """ Compute a single solution based on regulatory on/off minimization (ROOM). Compute a new flux distribution that minimizes the number of active reactions needed to accommodate a previous reference solution. Regulatory on/off minimization (ROOM) is generally used to assess the impact of knock-outs. Thus the typical usage is to provide a wildtype flux distribution as reference and a model in knock-out state. Parameters ---------- model : cobra.Model The model state to compute a ROOM-based solution for. solution : cobra.Solution, optional A (wildtype) reference solution. linear : bool, optional Whether to use the linear ROOM formulation or not (default False). delta: float, optional The relative tolerance range (additive) (default 0.03). epsilon: float, optional The absolute tolerance range (multiplicative) (default 0.001). Returns ------- cobra.Solution A flux distribution with minimal active reaction changes compared to the reference. See Also -------- add_room : add ROOM constraints and objective """ with model: add_room(model=model, solution=solution, linear=linear, delta=delta, epsilon=epsilon) solution = model.optimize() return solution
Compute a single solution based on regulatory on/off minimization (ROOM). Compute a new flux distribution that minimizes the number of active reactions needed to accommodate a previous reference solution. Regulatory on/off minimization (ROOM) is generally used to assess the impact of knock-outs. Thus the typical usage is to provide a wildtype flux distribution as reference and a model in knock-out state. Parameters ---------- model : cobra.Model The model state to compute a ROOM-based solution for. solution : cobra.Solution, optional A (wildtype) reference solution. linear : bool, optional Whether to use the linear ROOM formulation or not (default False). delta: float, optional The relative tolerance range (additive) (default 0.03). epsilon: float, optional The absolute tolerance range (multiplicative) (default 0.001). Returns ------- cobra.Solution A flux distribution with minimal active reaction changes compared to the reference. See Also -------- add_room : add ROOM constraints and objective
def secant(a, b, fn, epsilon): """ One of the fasest root-finding algorithms. The method calculates the slope of the function fn and this enables it to converge to a solution very fast. However, if started too far away from a root, the method may not converge (returning a None). For this reason, it is recommended that this function be used first in any guess-and-check workflow and, if it fails to find a root, the bisect() method should be used. Args: a: The lowest possible boundary of the value you are tying to find. b: The highest possible boundary of the value you are tying to find. fn: A function representing the relationship between the value you are trying to find and the target condition you are trying to satisfy. It should typically be structured in the following way: `def fn(value_trying_to_find): funct(value_trying_to_find) - target_desired_from_funct` ...but the subtraction should be swtiched if value_trying_to_find has a negative relationship with the funct. epsilon: The acceptable error in the target_desired_from_funct. Returns: root: The value that gives the target_desired_from_funct. References ---------- [1] Wikipedia contributors. (2018, December 29). Root-finding algorithm. In Wikipedia, The Free Encyclopedia. Retrieved 18:16, December 30, 2018, from https://en.wikipedia.org/wiki/Root-finding_algorithm#Secant_method """ f1 = fn(a) if abs(f1) <= epsilon: return a f2 = fn(b) if abs(f2) <= epsilon: return b for i in range(100): slope = (f2 - f1) / (b - a) c = b - f2 / slope f3 = fn(c) if abs(f3) < epsilon: return c a = b b = c f1 = f2 f2 = f3 return None
One of the fasest root-finding algorithms. The method calculates the slope of the function fn and this enables it to converge to a solution very fast. However, if started too far away from a root, the method may not converge (returning a None). For this reason, it is recommended that this function be used first in any guess-and-check workflow and, if it fails to find a root, the bisect() method should be used. Args: a: The lowest possible boundary of the value you are tying to find. b: The highest possible boundary of the value you are tying to find. fn: A function representing the relationship between the value you are trying to find and the target condition you are trying to satisfy. It should typically be structured in the following way: `def fn(value_trying_to_find): funct(value_trying_to_find) - target_desired_from_funct` ...but the subtraction should be swtiched if value_trying_to_find has a negative relationship with the funct. epsilon: The acceptable error in the target_desired_from_funct. Returns: root: The value that gives the target_desired_from_funct. References ---------- [1] Wikipedia contributors. (2018, December 29). Root-finding algorithm. In Wikipedia, The Free Encyclopedia. Retrieved 18:16, December 30, 2018, from https://en.wikipedia.org/wiki/Root-finding_algorithm#Secant_method
def Lorentzian(x, a, x0, sigma, y0): """Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0`` """ return a / (1 + ((x - x0) / sigma) ** 2) + y0
Lorentzian peak Inputs: ------- ``x``: independent variable ``a``: scaling factor (extremal value) ``x0``: center ``sigma``: half width at half maximum ``y0``: additive constant Formula: -------- ``a/(1+((x-x0)/sigma)^2)+y0``
def reduce_l1(attrs, inputs, proto_obj): """Reduce input tensor by l1 normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) new_attrs = translation_utils._add_extra_attributes(new_attrs, {'ord' : 1}) return 'norm', new_attrs, inputs
Reduce input tensor by l1 normalization.
def assemble(self,roboset=None,color=None,format=None,bgset=None,sizex=300,sizey=300): """ Build our Robot! Returns the robot image itself. """ # Allow users to manually specify a robot 'set' that they like. # Ensure that this is one of the allowed choices, or allow all # If they don't set one, take the first entry from sets above. if roboset == 'any': roboset = self.sets[self.hasharray[1] % len(self.sets) ] elif roboset in self.sets: roboset = roboset else: roboset = self.sets[0] # Only set1 is setup to be color-seletable. The others don't have enough pieces in various colors. # This could/should probably be expanded at some point.. # Right now, this feature is almost never used. ( It was < 44 requests this year, out of 78M reqs ) if roboset == 'set1': if color in self.colors: roboset = 'set1/' + color else: randomcolor = self.colors[self.hasharray[0] % len(self.colors) ] roboset = 'set1/' + randomcolor # If they specified a background, ensure it's legal, then give it to them. if bgset in self.bgsets: bgset = bgset elif bgset == 'any': bgset = self.bgsets[ self.hasharray[2] % len(self.bgsets) ] # If we set a format based on extension earlier, use that. Otherwise, PNG. if format is None: format = self.format # Each directory in our set represents one piece of the Robot, such as the eyes, nose, mouth, etc. # Each directory is named with two numbers - The number before the # is the sort order. # This ensures that they always go in the same order when choosing pieces, regardless of OS. # The second number is the order in which to apply the pieces. # For instance, the head has to go down BEFORE the eyes, or the eyes would be hidden. # First, we'll get a list of parts of our robot. roboparts = self._get_list_of_files(self.resourcedir + 'sets/' + roboset) # Now that we've sorted them by the first number, we need to sort each sub-category by the second. roboparts.sort(key=lambda x: x.split("#")[1]) if bgset is not None: bglist = [] backgrounds = natsort.natsorted(os.listdir(self.resourcedir + 'backgrounds/' + bgset)) backgrounds.sort() for ls in backgrounds: if not ls.startswith("."): bglist.append(self.resourcedir + 'backgrounds/' + bgset + "/" + ls) background = bglist[self.hasharray[3] % len(bglist)] # Paste in each piece of the Robot. roboimg = Image.open(roboparts[0]) roboimg = roboimg.resize((1024,1024)) for png in roboparts: img = Image.open(png) img = img.resize((1024,1024)) roboimg.paste(img,(0,0),img) # If we're a BMP, flatten the image. if format == 'bmp': #Flatten bmps r, g, b, a = roboimg.split() roboimg = Image.merge("RGB", (r, g, b)) if bgset is not None: bg = Image.open(background) bg = bg.resize((1024,1024)) bg.paste(roboimg,(0,0),roboimg) roboimg = bg self.img = roboimg.resize((sizex,sizey),Image.ANTIALIAS) self.format = format
Build our Robot! Returns the robot image itself.
def resize(self, width, height): """ Pyqt specific resize callback. """ if not self.fbo: return # pyqt reports sizes in actual buffer size self.width = width // self.widget.devicePixelRatio() self.height = height // self.widget.devicePixelRatio() self.buffer_width = width self.buffer_height = height super().resize(width, height)
Pyqt specific resize callback.
def scolor(self): """ Set a unique color from a serie """ global palette color = palette[self.color_index] if len(palette) - 1 == self.color_index: self.color_index = 0 else: self.color_index += 1 self.color(color)
Set a unique color from a serie
def get(self, *raw_args, **raw_kwargs): """ Return the data for this function (using the cache if possible). This method is not intended to be overidden """ # We pass args and kwargs through a filter to allow them to be # converted into values that can be pickled. args = self.prepare_args(*raw_args) kwargs = self.prepare_kwargs(**raw_kwargs) # Build the cache key and attempt to fetch the cached item key = self.key(*args, **kwargs) item = self.cache.get(key) call = Call(args=raw_args, kwargs=raw_kwargs) if item is None: # Cache MISS - we can either: # a) fetch the data immediately, blocking execution until # the fetch has finished, or # b) trigger an async refresh and return an empty result if self.should_missing_item_be_fetched_synchronously(*args, **kwargs): logger.debug(("Job %s with key '%s' - cache MISS - running " "synchronous refresh"), self.class_path, key) result = self.refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.MISS, sync_fetch=True) else: logger.debug(("Job %s with key '%s' - cache MISS - triggering " "async refresh and returning empty result"), self.class_path, key) # To avoid cache hammering (ie lots of identical tasks # to refresh the same cache item), we reset the cache with an # empty result which will be returned until the cache is # refreshed. result = self.empty() self.store(key, self.timeout(*args, **kwargs), result) self.async_refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.MISS, sync_fetch=False) expiry, data = item delta = time.time() - expiry if delta > 0: # Cache HIT but STALE expiry - we can either: # a) fetch the data immediately, blocking execution until # the fetch has finished, or # b) trigger a refresh but allow the stale result to be # returned this time. This is normally acceptable. if self.should_stale_item_be_fetched_synchronously( delta, *args, **kwargs): logger.debug( ("Job %s with key '%s' - STALE cache hit - running " "synchronous refresh"), self.class_path, key) result = self.refresh(*args, **kwargs) return self.process_result( result, call=call, cache_status=self.STALE, sync_fetch=True) else: logger.debug( ("Job %s with key '%s' - STALE cache hit - triggering " "async refresh and returning stale result"), self.class_path, key) # We replace the item in the cache with a 'timeout' expiry - this # prevents cache hammering but guards against a 'limbo' situation # where the refresh task fails for some reason. timeout = self.timeout(*args, **kwargs) self.store(key, timeout, data) self.async_refresh(*args, **kwargs) return self.process_result( data, call=call, cache_status=self.STALE, sync_fetch=False) else: logger.debug("Job %s with key '%s' - cache HIT", self.class_path, key) return self.process_result(data, call=call, cache_status=self.HIT)
Return the data for this function (using the cache if possible). This method is not intended to be overidden
def _get_template_list(self): " Get the hierarchy of templates belonging to the object/box_type given. " t_list = [] if hasattr(self.obj, 'category_id') and self.obj.category_id: cat = self.obj.category base_path = 'box/category/%s/content_type/%s/' % (cat.path, self.name) if hasattr(self.obj, 'slug'): t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,)) t_list.append(base_path + '%s.html' % (self.box_type,)) t_list.append(base_path + 'box.html') base_path = 'box/content_type/%s/' % self.name if hasattr(self.obj, 'slug'): t_list.append(base_path + '%s/%s.html' % (self.obj.slug, self.box_type,)) t_list.append(base_path + '%s.html' % (self.box_type,)) t_list.append(base_path + 'box.html') t_list.append('box/%s.html' % self.box_type) t_list.append('box/box.html') return t_list
Get the hierarchy of templates belonging to the object/box_type given.
def annot_heatmap(ax,dannot, xoff=0,yoff=0, kws_text={},# zip annot_left='(',annot_right=')', annothalf='upper', ): """ kws_text={'marker','s','linewidth','facecolors','edgecolors'} """ for xtli,xtl in enumerate(ax.get_xticklabels()): xtl=xtl.get_text() for ytli,ytl in enumerate(ax.get_yticklabels()): ytl=ytl.get_text() if annothalf=='upper': ax.text(xtli+0.5+xoff,ytli+0.5+yoff,dannot.loc[xtl,ytl],**kws_text,ha='center') else: ax.text(ytli+0.5+yoff,xtli+0.5+xoff,dannot.loc[xtl,ytl],**kws_text,ha='center') return ax
kws_text={'marker','s','linewidth','facecolors','edgecolors'}
def set_forbidden_uptodate(self, uptodate): """Set all forbidden uptodate values :param uptodatees: a list with forbidden uptodate values :uptodate uptodatees: list :returns: None :ruptodate: None :raises: None """ if self._forbidden_uptodate == uptodate: return self._forbidden_uptodate = uptodate self.invalidateFilter()
Set all forbidden uptodate values :param uptodatees: a list with forbidden uptodate values :uptodate uptodatees: list :returns: None :ruptodate: None :raises: None
def __getRefererUrl(self, url=None): """ gets the referer url for the token handler """ if url is None: url = "http://www.arcgis.com/sharing/rest/portals/self" params = { "f" : "json", "token" : self.token } val = self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) self._referer_url = "arcgis.com"#"http://%s.%s" % (val['urlKey'], val['customBaseUrl']) self._token = None return self._referer_url
gets the referer url for the token handler
def extract_db_info(self, obj, keys): """Extract metadata from serialized file""" objl = self.convert_in(obj) # FIXME: this is too complex if isinstance(objl, self.__class__): return objl.update_meta_info() try: with builtins.open(objl, mode='r') as fd: state = json.load(fd) except IOError as e: raise e result = super(BaseStructuredCalibration, self).extract_db_info(state, keys) try: minfo = state['meta_info'] result['mode'] = minfo['mode_name'] origin = minfo['origin'] date_obs = origin['date_obs'] except KeyError: origin = {} date_obs = "1970-01-01T00:00:00.00" result['instrument'] = state['instrument'] result['uuid'] = state['uuid'] result['tags'] = state['tags'] result['type'] = state['type'] result['observation_date'] = conv.convert_date(date_obs) result['origin'] = origin return result
Extract metadata from serialized file
def prior_transform(self, unit_coords, priors, prior_args=[]): """An example of one way to use the `Prior` objects below to go from unit cube to parameter space, for nested sampling. This takes and returns a list instead of an array, to accomodate possible vector parameters. Thus one will need something like ``theta_array=np.concatenate(*theta)`` :param unit_coords: Coordinates on the unit prior hyper-cube. Iterable. :param priors: A list of `Prior` objects, iterable of same length as `unit_coords`. :param prior_args: (optional) A list of dictionaries of prior function keyword arguments. :returns theta: A list of parameter values corresponding to the given coordinates on the prior unit hypercube. """ theta = [] for i, (u, p) in enumerate(zip(unit_coords, priors)): func = p.unit_transform try: kwargs = prior_args[i] except(IndexError): kwargs = {} theta.append(func(u, **kwargs)) return theta
An example of one way to use the `Prior` objects below to go from unit cube to parameter space, for nested sampling. This takes and returns a list instead of an array, to accomodate possible vector parameters. Thus one will need something like ``theta_array=np.concatenate(*theta)`` :param unit_coords: Coordinates on the unit prior hyper-cube. Iterable. :param priors: A list of `Prior` objects, iterable of same length as `unit_coords`. :param prior_args: (optional) A list of dictionaries of prior function keyword arguments. :returns theta: A list of parameter values corresponding to the given coordinates on the prior unit hypercube.
def getPixels(self): """ Return a stream of pixels from current Canvas. """ array = self.toArray() (width, height, depth) = array.size for x in range(width): for y in range(height): yield Pixel(array, x, y)
Return a stream of pixels from current Canvas.
def on_error(e): # pragma: no cover """Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function. """ exname = {'RuntimeError': 'Runtime error', 'Value Error': 'Value error'} sys.stderr.write('{}: {}\n'.format(exname[e.__class__.__name__], str(e))) sys.stderr.write('See file slam_error.log for additional details.\n') sys.exit(1)
Error handler RuntimeError or ValueError exceptions raised by commands will be handled by this function.
def get_config(self): """ function to get current configuration """ config = { 'location': self.location, 'language': self.language, 'topic': self.topic, } return config
function to get current configuration
def _machinectl(cmd, output_loglevel='debug', ignore_retcode=False, use_vt=False): ''' Helper function to run machinectl ''' prefix = 'machinectl --no-legend --no-pager' return __salt__['cmd.run_all']('{0} {1}'.format(prefix, cmd), output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt)
Helper function to run machinectl
def invoke_function(self, script_hash, operation, params, **kwargs): """ Invokes a contract's function with given parameters and returns the result. :param script_hash: contract script hash :param operation: name of the operation to invoke :param params: list of paramaters to be passed in to the smart contract :type script_hash: str :type operation: str :type params: list :return: result of the invocation :rtype: dictionary """ contract_params = encode_invocation_params(params) raw_result = self._call( JSONRPCMethods.INVOKE_FUNCTION.value, [script_hash, operation, contract_params, ], **kwargs) return decode_invocation_result(raw_result)
Invokes a contract's function with given parameters and returns the result. :param script_hash: contract script hash :param operation: name of the operation to invoke :param params: list of paramaters to be passed in to the smart contract :type script_hash: str :type operation: str :type params: list :return: result of the invocation :rtype: dictionary
def _get_ned_sources_needing_metadata( self): """*Get the names of 50000 or less NED sources that still require metabase in the database* **Return:** - ``len(self.theseIds)`` -- the number of NED IDs returned *Usage:* .. code-block:: python numberSources = stream._get_ned_sources_needing_metadata() """ self.log.debug( 'starting the ``_get_ned_sources_needing_metadata`` method') tableName = self.dbTableName # SELECT THE DATA FROM NED TABLE sqlQuery = u""" select ned_name from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null) limit 50000; """ % locals() rows = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) self.theseIds = [] self.theseIds[:] = [r["ned_name"] for r in rows] self.log.debug( 'completed the ``_get_ned_sources_needing_metadata`` method') return len(self.theseIds)
*Get the names of 50000 or less NED sources that still require metabase in the database* **Return:** - ``len(self.theseIds)`` -- the number of NED IDs returned *Usage:* .. code-block:: python numberSources = stream._get_ned_sources_needing_metadata()
def _all_feature_values( self, column, feature, distinct=True, contig=None, strand=None): """ Cached lookup of all values for a particular feature property from the database, caches repeated queries in memory and stores them as a CSV. Parameters ---------- column : str Name of property (e.g. exon_id) feature : str Type of entry (e.g. exon) distinct : bool, optional Keep only unique values contig : str, optional Restrict query to particular contig strand : str, optional Restrict results to "+" or "-" strands Returns a list constructed from query results. """ return self.db.query_feature_values( column=column, feature=feature, distinct=distinct, contig=contig, strand=strand)
Cached lookup of all values for a particular feature property from the database, caches repeated queries in memory and stores them as a CSV. Parameters ---------- column : str Name of property (e.g. exon_id) feature : str Type of entry (e.g. exon) distinct : bool, optional Keep only unique values contig : str, optional Restrict query to particular contig strand : str, optional Restrict results to "+" or "-" strands Returns a list constructed from query results.
def create_record(self, rtype=None, name=None, content=None, **kwargs): """ Create record. If record already exists with the same content, do nothing. """ if not rtype and kwargs.get('type'): warnings.warn('Parameter "type" is deprecated, use "rtype" instead.', DeprecationWarning) rtype = kwargs.get('type') return self._create_record(rtype, name, content)
Create record. If record already exists with the same content, do nothing.
def get_value(self, entry_name: Text, entry_lines: Sequence[Text]) -> Optional[Text]: """See base class method.""" # Search through all lines and return the first matching one for line in entry_lines: match = self._regex.match(line) if match: return match.group(1) # nothing matched return None
See base class method.
def put( self, item: _T, timeout: Union[float, datetime.timedelta] = None ) -> "Future[None]": """Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. """ future = Future() # type: Future[None] try: self.put_nowait(item) except QueueFull: self._putters.append((item, future)) _set_timeout(future, timeout) else: future.set_result(None) return future
Put an item into the queue, perhaps waiting until there is room. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. ``timeout`` may be a number denoting a time (on the same scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time.
def delete(self, name): """ Deletes a given index. **Note**: This method is only supported in Splunk 5.0 and later. :param name: The name of the index to delete. :type name: ``string`` """ if self.service.splunk_version >= (5,): Collection.delete(self, name) else: raise IllegalOperationException("Deleting indexes via the REST API is " "not supported before Splunk version 5.")
Deletes a given index. **Note**: This method is only supported in Splunk 5.0 and later. :param name: The name of the index to delete. :type name: ``string``
def all(self): """ Returns list with all indexed identifiers. """ identifiers = [] query = text(""" SELECT identifier, type, name FROM identifier_index;""") for result in self.execute(query): vid, type_, name = result res = IdentifierSearchResult( score=1, vid=vid, type=type_, name=name) identifiers.append(res) return identifiers
Returns list with all indexed identifiers.
def sanitize(self): ''' Check if the current settings conform to the LISP specifications and fix them where possible. ''' # We override the MapRegisterMessage sa super(InfoMessage, self).sanitize() # R: R bit indicates this is a reply to an Info-Request (Info- # Reply). R bit is set to 0 in an Info-Request. When R bit is set # to 0, the AFI field (following the EID-prefix field) must be set # to 0. When R bit is set to 1, the packet contents follow the # format for an Info-Reply as described below. if not isinstance(self.is_reply, bool): raise ValueError('Is-reply flag must be a boolean') # Nonce: An 8-byte random value created by the sender of the Info- # Request. This nonce will be returned in the Info-Reply. The # nonce SHOULD be generated by a properly seeded pseudo-random (or # strong random) source. if len(bytes(self.nonce)) != 8: raise ValueError('Invalid nonce') # Key ID: A configured ID to find the configured Message # Authentication Code (MAC) algorithm and key value used for the # authentication function. See Section 14.4 for codepoint # assignments. if self.key_id not in (KEY_ID_NONE, KEY_ID_HMAC_SHA_1_96, KEY_ID_HMAC_SHA_256_128): raise ValueError('Invalid Key ID') # Authentication Data: The message digest used from the output of the # Message Authentication Code (MAC) algorithm. The entire Map- # Register payload is authenticated with this field preset to 0. # After the MAC is computed, it is placed in this field. # Implementations of this specification MUST include support for # HMAC-SHA-1-96 [RFC2404] and support for HMAC-SHA-256-128 [RFC6234] # is RECOMMENDED. if not isinstance(self.authentication_data, bytes): raise ValueError('Invalid authentication data') # TTL: The time in minutes the recipient of the Info-Reply will # store the RTR Information. if not isinstance(self.ttl, numbers.Integral) \ or self.ttl < 0 or self.ttl > 0xffffffff: raise ValueError('Invalid TTL') # EID-prefix: 4 octets if an IPv4 address-family, 16 octets if an IPv6 # address-family. if not isinstance(self.eid_prefix, (IPv4Network, IPv6Network)): raise ValueError('EID prefix must be IPv4 or IPv6') # When a Map-Server receives an Info-Request message, it responds with # an Info-Reply message. The Info-Reply message source port is 4342, # and destination port is taken from the source port of the triggering # Info-Request. Map-Server fills the NAT LCAF (LCAF Type = 7) fields # according to their description. The Map-Server uses AFI=0 for the # Private ETR RLOC Address field in the NAT LCAF. if self.is_reply: if not isinstance(self.reply, LCAFNATTraversalAddress): raise ValueError("An InfoMessage which is an Info-Reply must contain an LCAFNATTraversalAddress") else: if self.reply is not None: raise ValueError("An InfoMessage which is an Info-Request can not contain a reply")
Check if the current settings conform to the LISP specifications and fix them where possible.
def en_disable_breakpoint_by_number(self, bpnum, do_enable=True): "Enable or disable a breakpoint given its breakpoint number." success, msg, bp = self.get_breakpoint(bpnum) if not success: return success, msg if do_enable: endis = 'en' else: endis = 'dis' pass if bp.enabled == do_enable: return (False, ('Breakpoint (%r) previously %sabled' % (str(bpnum), endis,))) bp.enabled = do_enable return (True, '')
Enable or disable a breakpoint given its breakpoint number.
def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]: """ Convert vectors of probabilities to labels using confident threshold (if probability to belong with the class is bigger than confident_threshold, sample belongs with the class; if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability) Args: proba: list of samples where each sample is a vector of probabilities to belong with given classes confident_threshold (float): boundary of probability to belong with a class classes: array of classes' names Returns: list of lists of labels for each sample """ y = [] for sample in proba: to_add = np.where(sample > confident_threshold)[0] if len(to_add) > 0: y.append(np.array(classes)[to_add].tolist()) else: y.append(np.array([np.array(classes)[np.argmax(sample)]]).tolist()) return y
Convert vectors of probabilities to labels using confident threshold (if probability to belong with the class is bigger than confident_threshold, sample belongs with the class; if no probabilities bigger than confident threshold, sample belongs with the class with the biggest probability) Args: proba: list of samples where each sample is a vector of probabilities to belong with given classes confident_threshold (float): boundary of probability to belong with a class classes: array of classes' names Returns: list of lists of labels for each sample
def _get_migration_files(self, path): """ Get all of the migration files in a given path. :type path: str :rtype: list """ files = glob.glob(os.path.join(path, "[0-9]*_*.py")) if not files: return [] files = list(map(lambda f: os.path.basename(f).replace(".py", ""), files)) files = sorted(files) return files
Get all of the migration files in a given path. :type path: str :rtype: list
def get_status(self): """Reads the server status, the Virtual CPU status and the number of the clients connected. :returns: server status, cpu status, client count """ logger.debug("get server status") server_status = ctypes.c_int() cpu_status = ctypes.c_int() clients_count = ctypes.c_int() error = self.library.Srv_GetStatus(self.pointer, ctypes.byref(server_status), ctypes.byref(cpu_status), ctypes.byref(clients_count)) check_error(error) logger.debug("status server %s cpu %s clients %s" % (server_status.value, cpu_status.value, clients_count.value)) return snap7.snap7types.server_statuses[server_status.value], \ snap7.snap7types.cpu_statuses[cpu_status.value], \ clients_count.value
Reads the server status, the Virtual CPU status and the number of the clients connected. :returns: server status, cpu status, client count
def transferReporter(self, xferId, message): ''' the callback method used by the Aspera sdk during transfer to notify progress, error or successful completion ''' if self.is_stopped(): return True _asp_message = AsperaMessage(message) if not _asp_message.is_msg_type( [enumAsperaMsgType.INIT, enumAsperaMsgType.DONE, enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR, enumAsperaMsgType.STATS]): return _session_id = _asp_message.get_session_id() _msg = self.debug_id(xferId, _session_id) + " : " + _asp_message._msg_type logger.info(_msg) with self._session_lock: if _asp_message.is_msg_type([enumAsperaMsgType.INIT]): assert(_session_id not in self._sessions) _session = AsperaSession(_session_id) self._sessions[_session_id] = _session self.notify_init() else: _session = self._sessions[_session_id] if _asp_message.is_msg_type([enumAsperaMsgType.DONE]): if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()): self.notify_progress() _session.set_success() self.notify_done() elif _asp_message.is_msg_type([enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR]): _session.set_error(_asp_message.get_error_descr()) self.notify_done(error=True) elif _asp_message.is_msg_type([enumAsperaMsgType.STATS]): if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()): self.notify_progress()
the callback method used by the Aspera sdk during transfer to notify progress, error or successful completion
def decide_child_program(args_executable, args_child_program): """Decide which the child program really is (if any).""" # We get the logger here because it's not defined at module level logger = logging.getLogger('fades') if args_executable: # if --exec given, check that it's just the executable name, # not absolute or relative paths if os.path.sep in args_child_program: logger.error( "The parameter to --exec must be a file name (to be found " "inside venv's bin directory), not a file path: %r", args_child_program) raise FadesError("File path given to --exec parameter") # indicated --execute, local and not analyzable for dependencies analyzable_child_program = None child_program = args_child_program elif args_child_program is not None: # normal case, the child program is to be analyzed (being it local or remote) if args_child_program.startswith(("http://", "https://")): args_child_program = helpers.download_remote_script(args_child_program) else: if not os.access(args_child_program, os.R_OK): logger.error("'%s' not found. If you want to run an executable " "file from a library installed in the virtualenv " "check the `--exec` option in the help.", args_child_program) raise FadesError("child program not found.") analyzable_child_program = args_child_program child_program = args_child_program else: # not indicated executable, not child program, "interpreter" mode analyzable_child_program = None child_program = None return analyzable_child_program, child_program
Decide which the child program really is (if any).
def set_filter(self, slices, values): """ Sets Fourier-space filters for the image. The image is filtered by subtracting values from the image at slices. Parameters ---------- slices : List of indices or slice objects. The q-values in Fourier space to filter. values : np.ndarray The complete array of Fourier space peaks to subtract off. values should be the same size as the FFT of the image; only the portions of values at slices will be removed. Examples -------- To remove a two Fourier peaks in the data at q=(10, 10, 10) & (245, 245, 245), where im is the residuals of a model: * slices = [(10,10,10), (245, 245, 245)] * values = np.fft.fftn(im) * im.set_filter(slices, values) """ self.filters = [[sl,values[sl]] for sl in slices]
Sets Fourier-space filters for the image. The image is filtered by subtracting values from the image at slices. Parameters ---------- slices : List of indices or slice objects. The q-values in Fourier space to filter. values : np.ndarray The complete array of Fourier space peaks to subtract off. values should be the same size as the FFT of the image; only the portions of values at slices will be removed. Examples -------- To remove a two Fourier peaks in the data at q=(10, 10, 10) & (245, 245, 245), where im is the residuals of a model: * slices = [(10,10,10), (245, 245, 245)] * values = np.fft.fftn(im) * im.set_filter(slices, values)
def getAsKml(self, session): """ Retrieve the geometry in KML format. This method is a veneer for an SQL query that calls the ``ST_AsKml()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: KML string representation of geometry. """ statement = """ SELECT ST_AsKml({0}) AS kml FROM {1} WHERE id={2}; """.format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.kml
Retrieve the geometry in KML format. This method is a veneer for an SQL query that calls the ``ST_AsKml()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: KML string representation of geometry.
def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels everywhere (non-0).""" # If the last dimension is 1 then we're using L1/L2 loss. if common_layers.shape_list(predictions)[-1] == 1: return rounding_sequence_accuracy( predictions, labels, weights_fn=weights_fn) with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) # Flatten, keeping batch dim (and num_classes dim for predictions) # TPU argmax can only deal with a limited number of dimensions predictions_shape = common_layers.shape_list(padded_predictions) batch_size = predictions_shape[0] num_classes = predictions_shape[-1] flat_size = common_layers.list_product( common_layers.shape_list(padded_labels)[1:]) padded_predictions = tf.reshape( padded_predictions, [batch_size, common_layers.list_product(predictions_shape[1:-1]), num_classes]) padded_labels = tf.reshape(padded_labels, [batch_size, flat_size]) weights = tf.reshape(weights, [batch_size, flat_size]) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Percentage of times that predictions matches labels everywhere (non-0).
def ge(self, value): """Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field """ self.op = '>=' self.negate_op = '<' self.value = self._value(value) return self
Construct a greater than or equal to (``>=``) filter. :param value: Filter value :return: :class:`filters.Field <filters.Field>` object :rtype: filters.Field
def response_cookies(self): """ This will return all cookies set :return: dict {name, value} """ try: ret = {} for cookie_base_uris in self.response.cookies._cookies.values(): for cookies in cookie_base_uris.values(): for cookie in cookies.keys(): ret[cookie] = cookies[cookie].value return ret except Exception as e: self.error = ApiError( "Exception in making Request with:: %s\n%s" % ( e_, traceback.format_exc())) raise Exception(self.error)
This will return all cookies set :return: dict {name, value}
def arg(self, state, index, stack_base=None): """ Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC. """ session = self.arg_session if self.args is None: arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1] else: arg_loc = self.args[index] return arg_loc.get_value(state, stack_base=stack_base)
Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC.
def pretty_str(label, arr): """ Generates a pretty printed NumPy array with an assignment. Optionally transposes column vectors so they are drawn on one line. Strictly speaking arr can be any time convertible by `str(arr)`, but the output may not be what you want if the type of the variable is not a scalar or an ndarray. Examples -------- >>> pprint('cov', np.array([[4., .1], [.1, 5]])) cov = [[4. 0.1] [0.1 5. ]] >>> print(pretty_str('x', np.array([[1], [2], [3]]))) x = [[1 2 3]].T """ def is_col(a): """ return true if a is a column vector""" try: return a.shape[0] > 1 and a.shape[1] == 1 except (AttributeError, IndexError): return False if label is None: label = '' if label: label += ' = ' if is_col(arr): return label + str(arr.T).replace('\n', '') + '.T' rows = str(arr).split('\n') if not rows: return '' s = label + rows[0] pad = ' ' * len(label) for line in rows[1:]: s = s + '\n' + pad + line return s
Generates a pretty printed NumPy array with an assignment. Optionally transposes column vectors so they are drawn on one line. Strictly speaking arr can be any time convertible by `str(arr)`, but the output may not be what you want if the type of the variable is not a scalar or an ndarray. Examples -------- >>> pprint('cov', np.array([[4., .1], [.1, 5]])) cov = [[4. 0.1] [0.1 5. ]] >>> print(pretty_str('x', np.array([[1], [2], [3]]))) x = [[1 2 3]].T
def prettify_json(json_string): """Given a JSON string, it returns it as a safe formatted HTML""" try: data = json.loads(json_string) html = '<pre>' + json.dumps(data, sort_keys=True, indent=4) + '</pre>' except: html = json_string return mark_safe(html)
Given a JSON string, it returns it as a safe formatted HTML
def break_around_binary_operator(logical_line, tokens): r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) Okay: (width == 0 +\n height == 0) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) Okay: var = (1 &\n ~2) Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2) """ def is_binary_operator(token_type, text): # The % character is strictly speaking a binary operator, but the # common usage seems to be to put it next to the format parameters, # after a line break. return ((token_type == tokenize.OP or text in ['and', 'or']) and text not in "()[]{},:.;@=%~") line_break = False unary_context = True # Previous non-newline token types and text previous_token_type = None previous_text = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: continue if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: line_break = True else: if (is_binary_operator(token_type, text) and line_break and not unary_context and not is_binary_operator(previous_token_type, previous_text)): yield start, "W503 line break before binary operator" unary_context = text in '([{,;' line_break = False previous_token_type = token_type previous_text = text
r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) Okay: (width == 0 +\n height == 0) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) Okay: var = (1 &\n ~2) Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2)
def dpi(self): """ A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch resolution of this image. A default value of (72, 72) is used if the dpi is not specified in the image file. """ def int_dpi(dpi): """ Return an integer dots-per-inch value corresponding to *dpi*. If *dpi* is |None|, a non-numeric type, less than 1 or greater than 2048, 72 is returned. """ try: int_dpi = int(round(float(dpi))) if int_dpi < 1 or int_dpi > 2048: int_dpi = 72 except (TypeError, ValueError): int_dpi = 72 return int_dpi def normalize_pil_dpi(pil_dpi): """ Return a (horz_dpi, vert_dpi) 2-tuple corresponding to *pil_dpi*, the value for the 'dpi' key in the ``info`` dict of a PIL image. If the 'dpi' key is not present or contains an invalid value, ``(72, 72)`` is returned. """ if isinstance(pil_dpi, tuple): return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1])) return (72, 72) return normalize_pil_dpi(self._pil_props[2])
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch resolution of this image. A default value of (72, 72) is used if the dpi is not specified in the image file.
def list_build_configurations_for_product_version(product_id, version_id, page_size=200, page_index=0, sort="", q=""): """ List all BuildConfigurations associated with the given ProductVersion """ data = list_build_configurations_for_project_raw(product_id, version_id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildConfigurations associated with the given ProductVersion
def _add_point_scalar(self, scalars, name, set_active=False, deep=True): """ Adds point scalars to the mesh Parameters ---------- scalars : numpy.ndarray Numpy array of scalars. Must match number of points. name : str Name of point scalars to add. set_active : bool, optional Sets the scalars to the active plotting scalars. Default False. deep : bool, optional Does not copy scalars when False. A reference to the scalars must be kept to avoid a segfault. """ if not isinstance(scalars, np.ndarray): raise TypeError('Input must be a numpy.ndarray') if scalars.shape[0] != self.n_points: raise Exception('Number of scalars must match the number of ' + 'points') # need to track which arrays are boolean as all boolean arrays # must be stored as uint8 if scalars.dtype == np.bool: scalars = scalars.view(np.uint8) if name not in self._point_bool_array_names: self._point_bool_array_names.append(name) if not scalars.flags.c_contiguous: scalars = np.ascontiguousarray(scalars) vtkarr = numpy_to_vtk(scalars, deep=deep) vtkarr.SetName(name) self.GetPointData().AddArray(vtkarr) if set_active or self.active_scalar_info[1] is None: self.GetPointData().SetActiveScalars(name) self._active_scalar_info = [POINT_DATA_FIELD, name]
Adds point scalars to the mesh Parameters ---------- scalars : numpy.ndarray Numpy array of scalars. Must match number of points. name : str Name of point scalars to add. set_active : bool, optional Sets the scalars to the active plotting scalars. Default False. deep : bool, optional Does not copy scalars when False. A reference to the scalars must be kept to avoid a segfault.
def _update_dict(data, default_data, replace_data=False): '''Update algorithm definition type dictionaries''' if not data: data = default_data.copy() return data if not isinstance(data, dict): raise TypeError('Value not dict type') if len(data) > 255: raise ValueError('More than 255 values defined') for i in data.keys(): if not isinstance(i, int): raise TypeError('Index not int type') if i < 0 or i > 255: raise ValueError('Index value out of range') if not replace_data: data.update(default_data) return data
Update algorithm definition type dictionaries
def shot_chart_jointgrid(x, y, data=None, joint_type="scatter", title="", joint_color="b", cmap=None, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", court_lw=1, outer_lines=False, flip_court=False, joint_kde_shade=True, gridsize=None, marginals_color="b", marginals_type="both", marginals_kde_shade=True, size=(12, 11), space=0, despine=False, joint_kws=None, marginal_kws=None, **kwargs): """ Returns a JointGrid object containing the shot chart. This function allows for more flexibility in customizing your shot chart than the ``shot_chart_jointplot`` function. Parameters ---------- x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as columns from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. joint_type : { "scatter", "kde", "hex" }, optional The type of shot chart for the joint plot. title : str, optional The title for the plot. joint_color : matplotlib color, optional Color used to plot the shots on the joint plot. cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the value passed to ``color``. Used for KDE and Hexbin joint plots. {x, y}lim : two-tuples, optional The axis limits of the plot. The defaults represent the out of bounds lines and half court line. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. joint_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the joint plot. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. marginals_color : matplotlib color, optional Color used to plot the shots on the marginal plots. marginals_type : { "both", "hist", "kde"}, optional The type of plot for the marginal plots. marginals_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the marginal plots. size : tuple, optional The width and height of the plot in inches. space : numeric, optional The space between the joint and marginal plots. despine : boolean, optional If ``True``, removes the spines. {joint, marginal}_kws : dicts Additional kewyord arguments for joint and marginal plot components. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- grid : JointGrid The JointGrid object with the shot chart plotted on it. """ # The joint_kws and marginal_kws idea was taken from seaborn # Create the default empty kwargs for joint and marginal plots if joint_kws is None: joint_kws = {} joint_kws.update(kwargs) if marginal_kws is None: marginal_kws = {} # If a colormap is not provided, then it is based off of the joint_color if cmap is None: cmap = sns.light_palette(joint_color, as_cmap=True) # Flip the court so that the hoop is by the bottom of the plot if flip_court: xlim = xlim[::-1] ylim = ylim[::-1] # Create the JointGrid to draw the shot chart plots onto grid = sns.JointGrid(x=x, y=y, data=data, xlim=xlim, ylim=ylim, space=space) # Joint Plot # Create the main plot of the joint shot chart if joint_type == "scatter": grid = grid.plot_joint(plt.scatter, color=joint_color, **joint_kws) elif joint_type == "kde": grid = grid.plot_joint(sns.kdeplot, cmap=cmap, shade=joint_kde_shade, **joint_kws) elif joint_type == "hex": if gridsize is None: # Get the number of bins for hexbin using Freedman-Diaconis rule # This is idea was taken from seaborn, which got the calculation # from http://stats.stackexchange.com/questions/798/ from seaborn.distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins(x) y_bin = _freedman_diaconis_bins(y) gridsize = int(np.mean([x_bin, y_bin])) grid = grid.plot_joint(plt.hexbin, gridsize=gridsize, cmap=cmap, **joint_kws) else: raise ValueError("joint_type must be 'scatter', 'kde', or 'hex'.") # Marginal plots # Create the plots on the axis of the main plot of the joint shot chart. if marginals_type == "both": grid = grid.plot_marginals(sns.distplot, color=marginals_color, **marginal_kws) elif marginals_type == "hist": grid = grid.plot_marginals(sns.distplot, color=marginals_color, kde=False, **marginal_kws) elif marginals_type == "kde": grid = grid.plot_marginals(sns.kdeplot, color=marginals_color, shade=marginals_kde_shade, **marginal_kws) else: raise ValueError("marginals_type must be 'both', 'hist', or 'kde'.") # Set the size of the joint shot chart grid.fig.set_size_inches(size) # Extract the the first axes, which is the main plot of the # joint shot chart, and draw the court onto it ax = grid.fig.get_axes()[0] draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) # Get rid of the axis labels grid.set_axis_labels(xlabel="", ylabel="") # Get rid of all tick labels ax.tick_params(labelbottom="off", labelleft="off") # Set the title above the top marginal plot ax.set_title(title, y=1.2, fontsize=18) # Set the spines to match the rest of court lines, makes outer_lines # somewhate unnecessary for spine in ax.spines: ax.spines[spine].set_lw(court_lw) ax.spines[spine].set_color(court_color) # set the marginal spines to be the same as the rest of the spines grid.ax_marg_x.spines[spine].set_lw(court_lw) grid.ax_marg_x.spines[spine].set_color(court_color) grid.ax_marg_y.spines[spine].set_lw(court_lw) grid.ax_marg_y.spines[spine].set_color(court_color) if despine: ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) return grid
Returns a JointGrid object containing the shot chart. This function allows for more flexibility in customizing your shot chart than the ``shot_chart_jointplot`` function. Parameters ---------- x, y : strings or vector The x and y coordinates of the shots taken. They can be passed in as vectors (such as a pandas Series) or as columns from the pandas DataFrame passed into ``data``. data : DataFrame, optional DataFrame containing shots where ``x`` and ``y`` represent the shot location coordinates. joint_type : { "scatter", "kde", "hex" }, optional The type of shot chart for the joint plot. title : str, optional The title for the plot. joint_color : matplotlib color, optional Color used to plot the shots on the joint plot. cmap : matplotlib Colormap object or name, optional Colormap for the range of data values. If one isn't provided, the colormap is derived from the value passed to ``color``. Used for KDE and Hexbin joint plots. {x, y}lim : two-tuples, optional The axis limits of the plot. The defaults represent the out of bounds lines and half court line. court_color : matplotlib color, optional The color of the court lines. court_lw : float, optional The linewidth the of the court lines. outer_lines : boolean, optional If ``True`` the out of bound lines are drawn in as a matplotlib Rectangle. flip_court : boolean, optional If ``True`` orients the hoop towards the bottom of the plot. Default is ``False``, which orients the court where the hoop is towards the top of the plot. joint_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the joint plot. gridsize : int, optional Number of hexagons in the x-direction. The default is calculated using the Freedman-Diaconis method. marginals_color : matplotlib color, optional Color used to plot the shots on the marginal plots. marginals_type : { "both", "hist", "kde"}, optional The type of plot for the marginal plots. marginals_kde_shade : boolean, optional Default is ``True``, which shades in the KDE contours on the marginal plots. size : tuple, optional The width and height of the plot in inches. space : numeric, optional The space between the joint and marginal plots. despine : boolean, optional If ``True``, removes the spines. {joint, marginal}_kws : dicts Additional kewyord arguments for joint and marginal plot components. kwargs : key, value pairs Keyword arguments for matplotlib Collection properties or seaborn plots. Returns ------- grid : JointGrid The JointGrid object with the shot chart plotted on it.
def set_scene_config(self, scene_id, config): """reconfigure a scene by scene ID""" if not scene_id in self.state.scenes: # does that scene_id exist? err_msg = "Requested to reconfigure scene {sceneNum}, which does not exist".format(sceneNum=scene_id) logging.info(err_msg) return(False, 0, err_msg) if scene_id == self.state.activeSceneId: pass # TODO: maybe calculate next frame, esp. if static scene self.state.scenes[scene_id] = self.state.scenes[scene_id]._replace(config=config) sequence_number = self.zmq_publisher.publish_scene_config(scene_id, config) logging.debug("Reconfigured scene {sceneNum}".format(sceneNum=scene_id)) return (True, sequence_number, "OK")
reconfigure a scene by scene ID
def events(self): """ Access the events :returns: twilio.rest.taskrouter.v1.workspace.event.EventList :rtype: twilio.rest.taskrouter.v1.workspace.event.EventList """ if self._events is None: self._events = EventList(self._version, workspace_sid=self._solution['sid'], ) return self._events
Access the events :returns: twilio.rest.taskrouter.v1.workspace.event.EventList :rtype: twilio.rest.taskrouter.v1.workspace.event.EventList
def LineWrap(text, omit_sgr=False): """Break line to fit screen width, factoring in ANSI/SGR escape sequences. Args: text: String to line wrap. omit_sgr: Bool, to omit counting ANSI/SGR sequences in the length. Returns: Text with additional line wraps inserted for lines grater than the width. """ def _SplitWithSgr(text_line): """Tokenise the line so that the sgr sequences can be omitted.""" token_list = sgr_re.split(text_line) text_line_list = [] line_length = 0 for (index, token) in enumerate(token_list): # Skip null tokens. if token is '': continue if sgr_re.match(token): # Add sgr escape sequences without splitting or counting length. text_line_list.append(token) text_line = ''.join(token_list[index +1:]) else: if line_length + len(token) <= width: # Token fits in line and we count it towards overall length. text_line_list.append(token) line_length += len(token) text_line = ''.join(token_list[index +1:]) else: # Line splits part way through this token. # So split the token, form a new line and carry the remainder. text_line_list.append(token[:width - line_length]) text_line = token[width - line_length:] text_line += ''.join(token_list[index +1:]) break return (''.join(text_line_list), text_line) # We don't use textwrap library here as it insists on removing # trailing/leading whitespace (pre 2.6). (_, width) = TerminalSize() text = str(text) text_multiline = [] for text_line in text.splitlines(): # Is this a line that needs splitting? while ((omit_sgr and (len(StripAnsiText(text_line)) > width)) or (len(text_line) > width)): # If there are no sgr escape characters then do a straight split. if not omit_sgr: text_multiline.append(text_line[:width]) text_line = text_line[width:] else: (multiline_line, text_line) = _SplitWithSgr(text_line) text_multiline.append(multiline_line) if text_line: text_multiline.append(text_line) return '\n'.join(text_multiline)
Break line to fit screen width, factoring in ANSI/SGR escape sequences. Args: text: String to line wrap. omit_sgr: Bool, to omit counting ANSI/SGR sequences in the length. Returns: Text with additional line wraps inserted for lines grater than the width.
def add_entry(self, src, dst, duration=3600, src_port1=None, src_port2=None, src_proto='predefined_tcp', dst_port1=None, dst_port2=None, dst_proto='predefined_tcp'): """ Create a blacklist entry. A blacklist can be added directly from the engine node, or from the system context. If submitting from the system context, it becomes a global blacklist. This will return the properly formatted json to submit. :param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any' :param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any' :param int duration: length of time to blacklist Both the system and engine context blacklist allow kw to be passed to provide additional functionality such as adding source and destination ports or port ranges and specifying the protocol. The following parameters define the ``kw`` that can be passed. The following example shows creating an engine context blacklist using additional kw:: engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600, src_port1=1000, src_port2=1500, src_proto='predefined_udp', dst_port1=3, dst_port2=3000, dst_proto='predefined_udp') :param int src_port1: start source port to limit blacklist :param int src_port2: end source port to limit blacklist :param str src_proto: source protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') :param int dst_port1: start dst port to limit blacklist :param int dst_port2: end dst port to limit blacklist :param str dst_proto: dst protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') .. note:: if blocking a range of ports, use both src_port1 and src_port2, otherwise providing only src_port1 is adequate. The same applies to dst_port1 / dst_port2. In addition, if you provide src_portX but not dst_portX (or vice versa), the undefined port side definition will default to all ports. """ self.entries.setdefault('entries', []).append(prepare_blacklist( src, dst, duration, src_port1, src_port2, src_proto, dst_port1, dst_port2, dst_proto))
Create a blacklist entry. A blacklist can be added directly from the engine node, or from the system context. If submitting from the system context, it becomes a global blacklist. This will return the properly formatted json to submit. :param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any' :param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any' :param int duration: length of time to blacklist Both the system and engine context blacklist allow kw to be passed to provide additional functionality such as adding source and destination ports or port ranges and specifying the protocol. The following parameters define the ``kw`` that can be passed. The following example shows creating an engine context blacklist using additional kw:: engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600, src_port1=1000, src_port2=1500, src_proto='predefined_udp', dst_port1=3, dst_port2=3000, dst_proto='predefined_udp') :param int src_port1: start source port to limit blacklist :param int src_port2: end source port to limit blacklist :param str src_proto: source protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') :param int dst_port1: start dst port to limit blacklist :param int dst_port2: end dst port to limit blacklist :param str dst_proto: dst protocol. Either 'predefined_tcp' or 'predefined_udp'. (default: 'predefined_tcp') .. note:: if blocking a range of ports, use both src_port1 and src_port2, otherwise providing only src_port1 is adequate. The same applies to dst_port1 / dst_port2. In addition, if you provide src_portX but not dst_portX (or vice versa), the undefined port side definition will default to all ports.
def repr_def_class(self, class_data): """Create code like this:: class Person(Base): def __init__(self, person_id=None, name=None): self.person_id = person_id self.name = name """ classname = self.formatted_classname(class_data["classname"]) if classname not in self.classes: self.lines.append("") self.lines.append("class %s(%s):" % (classname, self.basename)) kwargs = list() setattr_arguments = list() for attr in self._classes[classname]: kwargs.append("%s=None" % attr) setattr_arguments.append( self.Tab2 + "self.%s = %s" % (attr, attr)) if len(kwargs): line = self.Tab + "def __init__(self, %s):" % ", ".join(kwargs) else: line = self.Tab + "def __init__(self):" self.lines.append(line) for setattr_argument in setattr_arguments: self.lines.append(setattr_argument) if len(setattr_arguments): self.lines.append("") self.classes.add(classname)
Create code like this:: class Person(Base): def __init__(self, person_id=None, name=None): self.person_id = person_id self.name = name
def addPolylineAnnot(self, points): """Add a 'Polyline' annotation for a sequence of points.""" CheckParent(self) val = _fitz.Page_addPolylineAnnot(self, points) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
Add a 'Polyline' annotation for a sequence of points.
def visit_Call(self, node): ''' Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) __builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D'] ''' self.generic_visit(node) f = node.func # special handler for bind functions if isinstance(f, ast.Attribute) and f.attr == "partial": return self.add(node, {node}) else: return_alias = self.call_return_alias(node) # expand collected aliases all_aliases = set() for value in return_alias: # no translation if isinstance(value, (ContainerOf, ast.FunctionDef, Intrinsic)): all_aliases.add(value) elif value in self.result: all_aliases.update(self.result[value]) else: try: ap = Aliases.access_path(value) all_aliases.update(self.aliases.get(ap, ())) except NotImplementedError: # should we do something better here? all_aliases.add(value) return self.add(node, all_aliases)
Resulting node alias to the return_alias of called function, if the function is already known by Pythran (i.e. it's an Intrinsic) or if Pythran already computed it's ``return_alias`` behavior. >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> fun = """ ... def f(a): return a ... def foo(b): c = f(b)""" >>> module = ast.parse(fun) The ``f`` function create aliasing between the returned value and its first argument. >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f(b) => ['b'] This also works with intrinsics, e.g ``dict.setdefault`` which may create alias between its third argument and the return value. >>> fun = 'def foo(a, d): __builtin__.dict.setdefault(d, 0, a)' >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) __builtin__.dict.setdefault(d, 0, a) => ['<unbound-value>', 'a'] Note that complex cases can arise, when one of the formal parameter is already known to alias to various values: >>> fun = """ ... def f(a, b): return a and b ... def foo(A, B, C, D): return f(A or B, C or D)""" >>> module = ast.parse(fun) >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Call) f((A or B), (C or D)) => ['A', 'B', 'C', 'D']
def plot_mv_grid_topology(self, technologies=False, **kwargs): """ Plots plain MV grid topology and optionally nodes by technology type (e.g. station or generator). Parameters ---------- technologies : :obj:`Boolean` If True plots stations, generators, etc. in the grid in different colors. If False does not plot any nodes. Default: False. For more information see :func:`edisgo.tools.plots.mv_grid_topology`. """ if self.network.pypsa is None: try: timesteps = self.network.timeseries.timeindex self.network.pypsa = pypsa_io.to_pypsa( self.network, mode=None, timesteps=timesteps) except: logging.warning( "pypsa representation of MV grid needed to plot MV " "grid topology.") if self.network.pypsa is not None: plots.mv_grid_topology( self.network.pypsa, self.network.config, node_color='technology' if technologies is True else None, filename=kwargs.get('filename', None), grid_district_geom=kwargs.get('grid_district_geom', True), background_map=kwargs.get('background_map', True), xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None), title=kwargs.get('title', ''))
Plots plain MV grid topology and optionally nodes by technology type (e.g. station or generator). Parameters ---------- technologies : :obj:`Boolean` If True plots stations, generators, etc. in the grid in different colors. If False does not plot any nodes. Default: False. For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
def merge_las(*las_files): """ Merges multiple las files into one merged = merge_las(las_1, las_2) merged = merge_las([las_1, las_2, las_3]) Parameters ---------- las_files: Iterable of LasData or LasData Returns ------- pylas.lasdatas.base.LasBase The result of the merging """ if len(las_files) == 1: las_files = las_files[0] if not las_files: raise ValueError("No files to merge") if not utils.files_have_same_dtype(las_files): raise ValueError("All files must have the same point format") header = las_files[0].header num_pts_merged = sum(len(las.points) for las in las_files) # scaled x, y, z have to be set manually # to be sure to have a good offset in the header merged = create_from_header(header) # TODO extra dimensions should be manged better here for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims: merged.add_extra_dim(dim_name, dim_type) merged.points = np.zeros(num_pts_merged, merged.points.dtype) merged_x = np.zeros(num_pts_merged, np.float64) merged_y = np.zeros(num_pts_merged, np.float64) merged_z = np.zeros(num_pts_merged, np.float64) offset = 0 for i, las in enumerate(las_files, start=1): slc = slice(offset, offset + len(las.points)) merged.points[slc] = las.points merged_x[slc] = las.x merged_y[slc] = las.y merged_z[slc] = las.z merged['point_source_id'][slc] = i offset += len(las.points) merged.x = merged_x merged.y = merged_y merged.z = merged_z return merged
Merges multiple las files into one merged = merge_las(las_1, las_2) merged = merge_las([las_1, las_2, las_3]) Parameters ---------- las_files: Iterable of LasData or LasData Returns ------- pylas.lasdatas.base.LasBase The result of the merging
def edit_profile(): """Updates a profile""" if g.user is None: abort(401) form = dict(name=g.user.name, email=g.user.email) if request.method == 'POST': if 'delete' in request.form: User.get_collection().remove(g.user) session['openid'] = None flash(u'Profile deleted') return redirect(url_for('index')) form['name'] = request.form['name'] form['email'] = request.form['email'] if not form['name']: flash(u'Error: you have to provide a name') elif '@' not in form['email']: flash(u'Error: you have to enter a valid email address') else: flash(u'Profile successfully created') g.user.name = form['name'] g.user.email = form['email'] uid = User.get_collection().save(g.user) return redirect(url_for('edit_profile')) return render_template('edit_profile.html', form=form)
Updates a profile
def modify_replication_instance(ReplicationInstanceArn=None, AllocatedStorage=None, ApplyImmediately=None, ReplicationInstanceClass=None, VpcSecurityGroupIds=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AllowMajorVersionUpgrade=None, AutoMinorVersionUpgrade=None, ReplicationInstanceIdentifier=None): """ Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request. Some settings are applied during the maintenance window. See also: AWS API Documentation :example: response = client.modify_replication_instance( ReplicationInstanceArn='string', AllocatedStorage=123, ApplyImmediately=True|False, ReplicationInstanceClass='string', VpcSecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, ReplicationInstanceIdentifier='string' ) :type ReplicationInstanceArn: string :param ReplicationInstanceArn: [REQUIRED] The Amazon Resource Name (ARN) of the replication instance. :type AllocatedStorage: integer :param AllocatedStorage: The amount of storage (in gigabytes) to be allocated for the replication instance. :type ApplyImmediately: boolean :param ApplyImmediately: Indicates whether the changes should be applied immediately or during the next maintenance window. :type ReplicationInstanceClass: string :param ReplicationInstanceClass: The compute and memory capacity of the replication instance. Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance. (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true . :type EngineVersion: string :param EngineVersion: The engine version number of the replication instance. :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the replication instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and AWS DMS has enabled auto patching for that engine version. :type ReplicationInstanceIdentifier: string :param ReplicationInstanceIdentifier: The replication instance identifier. This parameter is stored as a lowercase string. :rtype: dict :return: { 'ReplicationInstance': { 'ReplicationInstanceIdentifier': 'string', 'ReplicationInstanceClass': 'string', 'ReplicationInstanceStatus': 'string', 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'AvailabilityZone': 'string', 'ReplicationSubnetGroup': { 'ReplicationSubnetGroupIdentifier': 'string', 'ReplicationSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ] }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'ReplicationInstanceClass': 'string', 'AllocatedStorage': 123, 'MultiAZ': True|False, 'EngineVersion': 'string' }, 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'KmsKeyId': 'string', 'ReplicationInstanceArn': 'string', 'ReplicationInstancePublicIpAddress': 'string', 'ReplicationInstancePrivateIpAddress': 'string', 'ReplicationInstancePublicIpAddresses': [ 'string', ], 'ReplicationInstancePrivateIpAddresses': [ 'string', ], 'PubliclyAccessible': True|False, 'SecondaryAvailabilityZone': 'string' } } :returns: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. """ pass
Modifies the replication instance to apply new settings. You can change one or more parameters by specifying these parameters and the new values in the request. Some settings are applied during the maintenance window. See also: AWS API Documentation :example: response = client.modify_replication_instance( ReplicationInstanceArn='string', AllocatedStorage=123, ApplyImmediately=True|False, ReplicationInstanceClass='string', VpcSecurityGroupIds=[ 'string', ], PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AllowMajorVersionUpgrade=True|False, AutoMinorVersionUpgrade=True|False, ReplicationInstanceIdentifier='string' ) :type ReplicationInstanceArn: string :param ReplicationInstanceArn: [REQUIRED] The Amazon Resource Name (ARN) of the replication instance. :type AllocatedStorage: integer :param AllocatedStorage: The amount of storage (in gigabytes) to be allocated for the replication instance. :type ApplyImmediately: boolean :param ApplyImmediately: Indicates whether the changes should be applied immediately or during the next maintenance window. :type ReplicationInstanceClass: string :param ReplicationInstanceClass: The compute and memory capacity of the replication instance. Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance. (string) -- :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied. Default: Uses existing setting Format: ddd:hh24:mi-ddd:hh24:mi Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes :type MultiAZ: boolean :param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true . :type EngineVersion: string :param EngineVersion: The engine version number of the replication instance. :type AllowMajorVersionUpgrade: boolean :param AllowMajorVersionUpgrade: Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the replication instance's current version. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor version upgrades will be applied automatically to the replication instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true during the maintenance window, and a newer minor version is available, and AWS DMS has enabled auto patching for that engine version. :type ReplicationInstanceIdentifier: string :param ReplicationInstanceIdentifier: The replication instance identifier. This parameter is stored as a lowercase string. :rtype: dict :return: { 'ReplicationInstance': { 'ReplicationInstanceIdentifier': 'string', 'ReplicationInstanceClass': 'string', 'ReplicationInstanceStatus': 'string', 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'AvailabilityZone': 'string', 'ReplicationSubnetGroup': { 'ReplicationSubnetGroupIdentifier': 'string', 'ReplicationSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ] }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'ReplicationInstanceClass': 'string', 'AllocatedStorage': 123, 'MultiAZ': True|False, 'EngineVersion': 'string' }, 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'KmsKeyId': 'string', 'ReplicationInstanceArn': 'string', 'ReplicationInstancePublicIpAddress': 'string', 'ReplicationInstancePrivateIpAddress': 'string', 'ReplicationInstancePublicIpAddresses': [ 'string', ], 'ReplicationInstancePrivateIpAddresses': [ 'string', ], 'PubliclyAccessible': True|False, 'SecondaryAvailabilityZone': 'string' } } :returns: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens.
def newton_iterate(evaluate_fn, s, t): r"""Perform a Newton iteration. In this function, we assume that :math:`s` and :math:`t` are nonzero, this makes convergence easier to detect since "relative error" at ``0.0`` is not a useful measure. There are several tolerance / threshold quantities used below: * :math:`10` (:attr:`MAX_NEWTON_ITERATIONS`) iterations will be done before "giving up". This is based on the assumption that we are already starting near a root, so quadratic convergence should terminate quickly. * :math:`\tau = \frac{1}{4}` is used as the boundary between linear and superlinear convergence. So if the current error :math:`\|p_{n + 1} - p_n\|` is not smaller than :math:`\tau` times the previous error :math:`\|p_n - p_{n - 1}\|`, then convergence is considered to be linear at that point. * :math:`\frac{2}{3}` of all iterations must be converging linearly for convergence to be stopped (and moved to the next regime). This will only be checked after 4 or more updates have occurred. * :math:`\tau = 2^{-42}` (:attr:`NEWTON_ERROR_RATIO`) is used to determine that an update is sufficiently small to stop iterating. So if the error :math:`\|p_{n + 1} - p_n\|` smaller than :math:`\tau` times size of the term being updated :math:`\|p_n\|`, then we exit with the "correct" answer. It is assumed that ``evaluate_fn`` will use a Jacobian return value of :data:`None` to indicate that :math:`F(s, t)` is exactly ``0.0``. We **assume** that if the function evaluates to exactly ``0.0``, then we are at a solution. It is possible however, that badly parameterized curves can evaluate to exactly ``0.0`` for inputs that are relatively far away from a solution (see issue #21). Args: evaluate_fn (Callable[Tuple[float, float], tuple]): A callable which takes :math:`s` and :math:`t` and produces an evaluated function value and the Jacobian matrix. s (float): The (first) parameter where the iteration will start. t (float): The (second) parameter where the iteration will start. Returns: Tuple[bool, float, float]: The triple of * Flag indicating if the iteration converged. * The current :math:`s` value when the iteration stopped. * The current :math:`t` value when the iteration stopped. """ # Several quantities will be tracked throughout the iteration: # * norm_update_prev: ||p{n} - p{n-1}|| = ||dp{n-1}|| # * norm_update : ||p{n+1} - p{n} || = ||dp{n} || # * linear_updates : This is a count on the number of times that # ``dp{n}`` "looks like" ``dp{n-1}`` (i.e. # is within a constant factor of it). norm_update_prev = None norm_update = None linear_updates = 0 # Track the number of "linear" updates. current_s = s current_t = t for index in six.moves.xrange(MAX_NEWTON_ITERATIONS): jacobian, func_val = evaluate_fn(current_s, current_t) if jacobian is None: return True, current_s, current_t singular, delta_s, delta_t = _helpers.solve2x2( jacobian, func_val[:, 0] ) if singular: break norm_update_prev = norm_update norm_update = np.linalg.norm([delta_s, delta_t], ord=2) # If ||p{n} - p{n-1}|| > 0.25 ||p{n-1} - p{n-2}||, then that means # our convergence is acting linear at the current step. if index > 0 and norm_update > 0.25 * norm_update_prev: linear_updates += 1 # If ``>=2/3`` of the updates have been linear, we are near a # non-simple root. (Make sure at least 5 updates have occurred.) if index >= 4 and 3 * linear_updates >= 2 * index: break # Determine the norm of the "old" solution before updating. norm_soln = np.linalg.norm([current_s, current_t], ord=2) current_s -= delta_s current_t -= delta_t if norm_update < NEWTON_ERROR_RATIO * norm_soln: return True, current_s, current_t return False, current_s, current_t
r"""Perform a Newton iteration. In this function, we assume that :math:`s` and :math:`t` are nonzero, this makes convergence easier to detect since "relative error" at ``0.0`` is not a useful measure. There are several tolerance / threshold quantities used below: * :math:`10` (:attr:`MAX_NEWTON_ITERATIONS`) iterations will be done before "giving up". This is based on the assumption that we are already starting near a root, so quadratic convergence should terminate quickly. * :math:`\tau = \frac{1}{4}` is used as the boundary between linear and superlinear convergence. So if the current error :math:`\|p_{n + 1} - p_n\|` is not smaller than :math:`\tau` times the previous error :math:`\|p_n - p_{n - 1}\|`, then convergence is considered to be linear at that point. * :math:`\frac{2}{3}` of all iterations must be converging linearly for convergence to be stopped (and moved to the next regime). This will only be checked after 4 or more updates have occurred. * :math:`\tau = 2^{-42}` (:attr:`NEWTON_ERROR_RATIO`) is used to determine that an update is sufficiently small to stop iterating. So if the error :math:`\|p_{n + 1} - p_n\|` smaller than :math:`\tau` times size of the term being updated :math:`\|p_n\|`, then we exit with the "correct" answer. It is assumed that ``evaluate_fn`` will use a Jacobian return value of :data:`None` to indicate that :math:`F(s, t)` is exactly ``0.0``. We **assume** that if the function evaluates to exactly ``0.0``, then we are at a solution. It is possible however, that badly parameterized curves can evaluate to exactly ``0.0`` for inputs that are relatively far away from a solution (see issue #21). Args: evaluate_fn (Callable[Tuple[float, float], tuple]): A callable which takes :math:`s` and :math:`t` and produces an evaluated function value and the Jacobian matrix. s (float): The (first) parameter where the iteration will start. t (float): The (second) parameter where the iteration will start. Returns: Tuple[bool, float, float]: The triple of * Flag indicating if the iteration converged. * The current :math:`s` value when the iteration stopped. * The current :math:`t` value when the iteration stopped.
def node_from_xml(xmlfile, nodefactory=Node): """ Convert a .xml file into a Node object. :param xmlfile: a file name or file object open for reading """ root = parse(xmlfile).getroot() return node_from_elem(root, nodefactory)
Convert a .xml file into a Node object. :param xmlfile: a file name or file object open for reading
def build(args): """Build a target and its dependencies.""" if len(args) != 1: log.error('One target required.') app.quit(1) target = address.new(args[0]) log.info('Resolved target to: %s', target) try: bb = Butcher() bb.clean() bb.load_graph(target) bb.build(target) except (gitrepo.GitError, error.BrokenGraph, error.NoSuchTargetError) as err: log.fatal(err) app.quit(1) except error.OverallBuildFailure as err: log.fatal(err) log.fatal('Error list:') [log.fatal(' [%s]: %s', e.node, e) for e in bb.failure_log] app.quit(1)
Build a target and its dependencies.
def on_draw(self, e): """Draw all visuals.""" gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
Draw all visuals.
def multi(method): """Decorator for RestServer methods that take multiple addresses""" @functools.wraps(method) def multi(self, address=''): values = flask.request.values address = urllib.parse.unquote_plus(address) if address and values and not address.endswith('.'): address += '.' result = {} for a in values or '': try: if not self.project: raise ValueError('No Project is currently loaded') ed = editor.Editor(address + a, self.project) result[address + a] = {'value': method(self, ed, a)} except: if self.project: traceback.print_exc() result[address + a] = {'error': 'Could not multi addr %s' % a} return flask.jsonify(result) return multi
Decorator for RestServer methods that take multiple addresses
def _parse_application_info(self, info_container): """ Parses the guild's application info. Parameters ---------- info_container: :class:`bs4.Tag` The parsed content of the information container. """ m = applications_regex.search(info_container.text) if m: self.open_applications = m.group(1) == "opened"
Parses the guild's application info. Parameters ---------- info_container: :class:`bs4.Tag` The parsed content of the information container.
def graph_from_dot_file(path): """Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph. """ fd = open(path, 'rb') data = fd.read() fd.close() return graph_from_dot_data(data)
Load graph as defined by a DOT file. The file is assumed to be in DOT format. It will be loaded, parsed and a Dot class will be returned, representing the graph.
def days_at_time(days, t, tz, day_offset=0): """ Create an index of days at time ``t``, interpreted in timezone ``tz``. The returned index is localized to UTC. Parameters ---------- days : DatetimeIndex An index of dates (represented as midnight). t : datetime.time The time to apply as an offset to each day in ``days``. tz : pytz.timezone The timezone to use to interpret ``t``. day_offset : int The number of days we want to offset @days by Examples -------- In the example below, the times switch from 13:45 to 12:45 UTC because March 13th is the daylight savings transition for US/Eastern. All the times are still 8:45 when interpreted in US/Eastern. >>> import pandas as pd; import datetime; import pprint >>> dts = pd.date_range('2016-03-12', '2016-03-14') >>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern') >>> pprint.pprint([str(dt) for dt in dts_at_845]) ['2016-03-12 13:45:00+00:00', '2016-03-13 12:45:00+00:00', '2016-03-14 12:45:00+00:00'] """ days = pd.DatetimeIndex(days).tz_localize(None) if len(days) == 0: return days.tz_localize(UTC) # Offset days without tz to avoid timezone issues. delta = pd.Timedelta( days=day_offset, hours=t.hour, minutes=t.minute, seconds=t.second, ) return (days + delta).tz_localize(tz).tz_convert(UTC)
Create an index of days at time ``t``, interpreted in timezone ``tz``. The returned index is localized to UTC. Parameters ---------- days : DatetimeIndex An index of dates (represented as midnight). t : datetime.time The time to apply as an offset to each day in ``days``. tz : pytz.timezone The timezone to use to interpret ``t``. day_offset : int The number of days we want to offset @days by Examples -------- In the example below, the times switch from 13:45 to 12:45 UTC because March 13th is the daylight savings transition for US/Eastern. All the times are still 8:45 when interpreted in US/Eastern. >>> import pandas as pd; import datetime; import pprint >>> dts = pd.date_range('2016-03-12', '2016-03-14') >>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern') >>> pprint.pprint([str(dt) for dt in dts_at_845]) ['2016-03-12 13:45:00+00:00', '2016-03-13 12:45:00+00:00', '2016-03-14 12:45:00+00:00']
def listMembers(self, id, headers=None, query_params=None, content_type="application/json"): """ Get a list of network members It is method for GET /network/{id}/member """ uri = self.client.base_url + "/network/"+id+"/member" return self.client.get(uri, None, headers, query_params, content_type)
Get a list of network members It is method for GET /network/{id}/member
def _build_table(self) -> Dict[State, Tuple[Multiplex, ...]]: """ Private method which build the table which map a State to the active multiplex. """ result: Dict[State, Tuple[Multiplex, ...]] = {} for state in self.influence_graph.all_states(): result[state] = tuple(multiplex for multiplex in self.influence_graph.multiplexes if multiplex.is_active(state)) return result
Private method which build the table which map a State to the active multiplex.
def pad_batch_dimension_for_multiple_chains( observed_time_series, model, chain_batch_shape): """"Expand the observed time series with extra batch dimension(s).""" # Running with multiple chains introduces an extra batch dimension. In # general we also need to pad the observed time series with a matching batch # dimension. # # For example, suppose our model has batch shape [3, 4] and # the observed time series has shape `concat([[5], [3, 4], [100])`, # corresponding to `sample_shape`, `batch_shape`, and `num_timesteps` # respectively. The model will produce distributions with batch shape # `concat([chain_batch_shape, [3, 4]])`, so we pad `observed_time_series` to # have matching shape `[5, 1, 3, 4, 100]`, where the added `1` dimension # between the sample and batch shapes will broadcast to `chain_batch_shape`. [ # Extract mask and guarantee `event_ndims=2`. observed_time_series, is_missing ] = canonicalize_observed_time_series_with_mask(observed_time_series) event_ndims = 2 # event_shape = [num_timesteps, observation_size=1] model_batch_ndims = ( model.batch_shape.ndims if model.batch_shape.ndims is not None else tf.shape(input=model.batch_shape_tensor())[0]) # Compute ndims from chain_batch_shape. chain_batch_shape = tf.convert_to_tensor( value=chain_batch_shape, name='chain_batch_shape', dtype=tf.int32) if not chain_batch_shape.shape.is_fully_defined(): raise ValueError('Batch shape must have static rank. (given: {})'.format( chain_batch_shape)) if chain_batch_shape.shape.ndims == 0: # expand int `k` to `[k]`. chain_batch_shape = chain_batch_shape[tf.newaxis] chain_batch_ndims = tf.compat.dimension_value(chain_batch_shape.shape[0]) def do_padding(observed_time_series_tensor): current_sample_shape = tf.shape( input=observed_time_series_tensor)[:-(model_batch_ndims + event_ndims)] current_batch_and_event_shape = tf.shape( input=observed_time_series_tensor)[-(model_batch_ndims + event_ndims):] return tf.reshape( tensor=observed_time_series_tensor, shape=tf.concat([ current_sample_shape, tf.ones([chain_batch_ndims], dtype=tf.int32), current_batch_and_event_shape], axis=0)) # Padding is only needed if the observed time series has sample shape. observed_time_series = prefer_static.cond( (dist_util.prefer_static_rank(observed_time_series) > model_batch_ndims + event_ndims), lambda: do_padding(observed_time_series), lambda: observed_time_series) if is_missing is not None: is_missing = prefer_static.cond( (dist_util.prefer_static_rank(is_missing) > model_batch_ndims + event_ndims), lambda: do_padding(is_missing), lambda: is_missing) return missing_values_util.MaskedTimeSeries(observed_time_series, is_missing=is_missing) return observed_time_series
Expand the observed time series with extra batch dimension(s).
def update_video(video_data): """ Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object """ try: video = _get_video(video_data.get("edx_video_id")) except Video.DoesNotExist: error_message = u"Video not found when trying to update video with edx_video_id: {0}".format(video_data.get("edx_video_id")) raise ValVideoNotFoundError(error_message) serializer = VideoSerializer(video, data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotUpdateError(serializer.errors)
Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object
def build_save_containers(platforms, registry, load_cache) -> int: """ Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise """ from joblib import Parallel, delayed if len(platforms) == 0: return 0 platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")( delayed(_build_save_container)(platform, registry, load_cache) for platform in platforms) is_error = False for platform_result in platform_results: if platform_result is not None: logging.error('Failed to generate %s', platform_result) is_error = True return 1 if is_error else 0
Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise
def cancelPnL(self, account, modelCode: str = ''): """ Cancel PnL subscription. Args: account: Cancel for this account. modelCode: If specified, cancel for this account model. """ key = (account, modelCode) reqId = self.wrapper.pnlKey2ReqId.pop(key, None) if reqId: self.client.cancelPnL(reqId) self.wrapper.pnls.pop(reqId, None) else: self._logger.error( 'cancelPnL: No subscription for ' f'account {account}, modelCode {modelCode}')
Cancel PnL subscription. Args: account: Cancel for this account. modelCode: If specified, cancel for this account model.
def complete_object_value( self, return_type: GraphQLObjectType, field_nodes: List[FieldNode], info: GraphQLResolveInfo, path: ResponsePath, result: Any, ) -> AwaitableOrValue[Dict[str, Any]]: """Complete an Object value by executing all sub-selections.""" # If there is an `is_type_of()` predicate function, call it with the current # result. If `is_type_of()` returns False, then raise an error rather than # continuing execution. if return_type.is_type_of: is_type_of = return_type.is_type_of(result, info) if isawaitable(is_type_of): async def collect_and_execute_subfields_async(): if not await is_type_of: raise invalid_return_type_error( return_type, result, field_nodes ) return self.collect_and_execute_subfields( return_type, field_nodes, path, result ) return collect_and_execute_subfields_async() if not is_type_of: raise invalid_return_type_error(return_type, result, field_nodes) return self.collect_and_execute_subfields( return_type, field_nodes, path, result )
Complete an Object value by executing all sub-selections.
def _get_html_contents(html): """Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.""" parser = MyHTMLParser() parser.feed(html) if parser.is_code: return ('code', parser.data.strip()) elif parser.is_math: return ('math', parser.data.strip()) else: return '', ''
Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.
def remove_file(self, filepath): """ Removes the DataFrameModel from being registered. :param filepath: (str) The filepath to delete from the DataFrameModelManager. :return: None """ self._models.pop(filepath) self._updates.pop(filepath, default=None) self.signalModelDestroyed.emit(filepath)
Removes the DataFrameModel from being registered. :param filepath: (str) The filepath to delete from the DataFrameModelManager. :return: None
def to_bool(value): """Convert string value to bool.""" bool_value = False if str(value).lower() in ['1', 'true']: bool_value = True return bool_value
Convert string value to bool.
def _from_string(cls, serialized): """ Return a DefinitionLocator parsing the given serialized string :param serialized: matches the string to """ parse = cls.URL_RE.match(serialized) if not parse: raise InvalidKeyError(cls, serialized) parse = parse.groupdict() if parse['definition_id']: parse['definition_id'] = cls.as_object_id(parse['definition_id']) return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})
Return a DefinitionLocator parsing the given serialized string :param serialized: matches the string to
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved """ releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved
def mset_list(item, index, value): 'set mulitple items via index of int, slice or list' if isinstance(index, (int, slice)): item[index] = value else: map(item.__setitem__, index, value)
set mulitple items via index of int, slice or list
def addNoise(vecs, percent=0.1, n=2048): """ Add noise to the given sequence of vectors and return the modified sequence. A percentage of the on bits are shuffled to other locations. """ noisyVecs = [] for vec in vecs: nv = vec.copy() for idx in vec: if numpy.random.random() <= percent: nv.discard(idx) nv.add(numpy.random.randint(n)) noisyVecs.append(nv) return noisyVecs
Add noise to the given sequence of vectors and return the modified sequence. A percentage of the on bits are shuffled to other locations.
def list_quota_volume(name): ''' List quotas of glusterfs volume name Name of the gluster volume CLI Example: .. code-block:: bash salt '*' glusterfs.list_quota_volume <volume> ''' cmd = 'volume quota {0}'.format(name) cmd += ' list' root = _gluster_xml(cmd) if not _gluster_ok(root): return None ret = {} for limit in _iter(root, 'limit'): path = limit.find('path').text ret[path] = _etree_to_dict(limit) return ret
List quotas of glusterfs volume name Name of the gluster volume CLI Example: .. code-block:: bash salt '*' glusterfs.list_quota_volume <volume>
def _map_content_types(archetype_tool, catalogs_definition): """ Updates the mapping for content_types against catalogs :archetype_tool: an archetype_tool object :catalogs_definition: a dictionary like { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } } """ # This will be a dictionari like {'content_type':['catalog_id', ...]} ct_map = {} # This list will contain the atalog ids to be rebuild to_reindex = [] # getting the dictionary of mapped content_types in the catalog map_types = archetype_tool.catalog_map for catalog_id in catalogs_definition.keys(): catalog_info = catalogs_definition.get(catalog_id, {}) # Mapping the catalog with the defined types types = catalog_info.get('types', []) for t in types: tmp_l = ct_map.get(t, []) tmp_l.append(catalog_id) ct_map[t] = tmp_l # Mapping for t in ct_map.keys(): catalogs_list = ct_map[t] # Getting the previus mapping perv_catalogs_list = archetype_tool.catalog_map.get(t, []) # If the mapping has changed, update it set1 = set(catalogs_list) set2 = set(perv_catalogs_list) if set1 != set2: archetype_tool.setCatalogsByType(t, catalogs_list) # Adding to reindex only the catalogs that have changed to_reindex = to_reindex + list(set1 - set2) + list(set2 - set1) return to_reindex
Updates the mapping for content_types against catalogs :archetype_tool: an archetype_tool object :catalogs_definition: a dictionary like { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } }
def is_device_connected(self, ip): """ Check if a device identified by it IP is connected to the box :param ip: IP of the device you want to test :type ip: str :return: True is the device is connected, False if it's not :rtype: bool """ all_devices = self.get_all_connected_devices() for device in all_devices: if ip == device['ipaddress']: return device['active'] == 1 return False
Check if a device identified by it IP is connected to the box :param ip: IP of the device you want to test :type ip: str :return: True is the device is connected, False if it's not :rtype: bool
def configure_room(self, form): """ Configure the room using the provided data. Do nothing if the provided form is of type 'cancel'. :Parameters: - `form`: the configuration parameters. Should be a 'submit' form made by filling-in the configuration form retireved using `self.request_configuration_form` or a 'cancel' form. :Types: - `form`: `Form` :return: id of the request stanza or `None` if a 'cancel' form was provieded. :returntype: `unicode` """ if form.type == "cancel": return None elif form.type != "submit": raise ValueError("A 'submit' form required to configure a room") iq = Iq(to_jid = self.room_jid.bare(), stanza_type = "set") query = iq.new_query(MUC_OWNER_NS, "query") form.as_xml(query) self.manager.stream.set_response_handlers( iq, self.process_configuration_success, self.process_configuration_error) self.manager.stream.send(iq) return iq.get_id()
Configure the room using the provided data. Do nothing if the provided form is of type 'cancel'. :Parameters: - `form`: the configuration parameters. Should be a 'submit' form made by filling-in the configuration form retireved using `self.request_configuration_form` or a 'cancel' form. :Types: - `form`: `Form` :return: id of the request stanza or `None` if a 'cancel' form was provieded. :returntype: `unicode`
def insertOutputConfig(self, businput): """ Method to insert the Output Config. app_name, release_version, pset_hash, global_tag and output_module_label are required. args: businput(dic): input dictionary. Updated Oct 12, 2011 """ if not ("app_name" in businput and "release_version" in businput\ and "pset_hash" in businput and "output_module_label" in businput and "global_tag" in businput): dbsExceptionHandler('dbsException-invalid-input', "business/DBSOutputConfig/insertOutputConfig require:\ app_name, release_version, pset_hash, output_module_label and global_tag") conn = self.dbi.connection() tran = conn.begin() try: # Proceed with o/p module insertion businput['scenario'] = businput.get("scenario", None) businput['pset_name'] = businput.get("pset_name", None) self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex: if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: #if the validation is due to a unique constrain break in OUTPUT_MODULE_CONFIGS if str(ex).find("TUC_OMC_1") != -1: pass #otherwise, try again else: try: self.outmodin.execute(conn, businput, tran) tran.commit() tran = None except SQLAlchemyIntegrityError as ex1: if str(ex1).find("unique constraint") != -1 and str(ex1).find("TUC_OMC_1") != -1: pass except Exception as e1: if tran: tran.rollback() tran = None raise else: raise except Exception as e: if tran: tran.rollback() raise finally: if tran: tran.rollback() if conn: conn.close()
Method to insert the Output Config. app_name, release_version, pset_hash, global_tag and output_module_label are required. args: businput(dic): input dictionary. Updated Oct 12, 2011
def import_csv(file_name, **kwargs): """ Reads control points from a CSV file and generates a 1-dimensional list of control points. It is possible to use a different value separator via ``separator`` keyword argument. The following code segment illustrates the usage of ``separator`` keyword argument. .. code-block:: python :linenos: # By default, import_csv uses 'comma' as the value separator ctrlpts = exchange.import_csv("control_points.csv") # Alternatively, it is possible to import a file containing tab-separated values ctrlpts = exchange.import_csv("control_points.csv", separator="\\t") The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input file which generally contains the column headings. :param file_name: file name of the text file :type file_name: str :return: list of control points :rtype: list :raises GeomdlException: an error occurred reading the file """ # File delimiters sep = kwargs.get('separator', ",") content = exch.read_file(file_name, skip_lines=1) return exch.import_text_data(content, sep)
Reads control points from a CSV file and generates a 1-dimensional list of control points. It is possible to use a different value separator via ``separator`` keyword argument. The following code segment illustrates the usage of ``separator`` keyword argument. .. code-block:: python :linenos: # By default, import_csv uses 'comma' as the value separator ctrlpts = exchange.import_csv("control_points.csv") # Alternatively, it is possible to import a file containing tab-separated values ctrlpts = exchange.import_csv("control_points.csv", separator="\\t") The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input file which generally contains the column headings. :param file_name: file name of the text file :type file_name: str :return: list of control points :rtype: list :raises GeomdlException: an error occurred reading the file
def points_from_x0y0x1y1(xyxy): """ Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1] """ x0 = xyxy[0] y0 = xyxy[1] x1 = xyxy[2] y1 = xyxy[3] return "%s,%s %s,%s %s,%s %s,%s" % ( x0, y0, x1, y0, x1, y1, x0, y1 )
Constructs a polygon representation from a rectangle described as a list [x0, y0, x1, y1]