positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _initLayerCtors(self): ''' Registration for built-in Layer ctors ''' ctors = { 'lmdb': s_lmdblayer.LmdbLayer, 'remote': s_remotelayer.RemoteLayer, } self.layrctors.update(**ctors)
Registration for built-in Layer ctors
def _mk_uninit_array(self, bounds): """ given a list of bounds for the N dimensions of an array, _mk_uninit_array() creates and returns an N-dimensional array of the size specified by the bounds with each element set to the value None.""" if len(bounds) == 0: raise For2PyError("Zero-length arrays current not handled!.") this_dim = bounds[0] lo,hi = this_dim[0],this_dim[1] sz = hi-lo+1 if len(bounds) == 1: return [None] * sz sub_array = self._mk_uninit_array(bounds[1:]) this_array = [copy.deepcopy(sub_array) for i in range(sz)] return this_array
given a list of bounds for the N dimensions of an array, _mk_uninit_array() creates and returns an N-dimensional array of the size specified by the bounds with each element set to the value None.
def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None): """ Creates the id_token dictionary. See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken Return a dic. """ if scope is None: scope = [] sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user) expires_in = settings.get('OIDC_IDTOKEN_EXPIRE') # Convert datetimes into timestamps. now = int(time.time()) iat_time = now exp_time = int(now + expires_in) user_auth_time = user.last_login or user.date_joined auth_time = int(dateformat.format(user_auth_time, 'U')) dic = { 'iss': get_issuer(request=request), 'sub': sub, 'aud': str(aud), 'exp': exp_time, 'iat': iat_time, 'auth_time': auth_time, } if nonce: dic['nonce'] = str(nonce) if at_hash: dic['at_hash'] = at_hash # Inlude (or not) user standard claims in the id_token. if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'): if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'): custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token) claims = custom_claims.create_response_dic() else: claims = StandardScopeClaims(token).create_response_dic() dic.update(claims) dic = run_processing_hook( dic, 'OIDC_IDTOKEN_PROCESSING_HOOK', user=user, token=token, request=request) return dic
Creates the id_token dictionary. See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken Return a dic.
def hash_parameters(keys, minimize=True, to_int=None): """ Calculates the parameters for a perfect hash. The result is returned as a HashInfo tuple which has the following fields: t The "table parameter". This is the minimum side length of the table used to create the hash. In practice, t**2 is the maximum size of the output hash. slots The original inputs mapped to a vector. This is the hash function. r The displacement vector. This is the displacement of the given row in the result vector. To find a given value, use ``x + r[y]``. offset The amount by which to offset all values (once converted to ints) to_int A function that converts the input to an int (if given). Keyword parameters: ``minimize`` Whether or not offset all integer keys internally by the minimum value. This typically results in smaller output. ``to_int`` A callable that converts the input keys to ints. If not specified, all keys should be given as ints. >>> hash_parameters([1, 5, 7], minimize=False) HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None) >>> hash_parameters([1, 5, 7]) HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None) >>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34) >>> phash = hash_parameters(l) >>> phash.slots (18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15) For some values, the displacement vector will be rather empty: >>> hash_parameters('Andrea', to_int=ord).r (1, None, None, None, 0, -3, 4, None) """ # If to_int is not assigned, simply use the identity function. if to_int is None: to_int = __identity key_to_original = {to_int(original): original for original in keys} # Create a set of all items to be hashed. items = list(key_to_original.keys()) if minimize: offset = 0 - min(items) items = frozenset(x + offset for x in items) else: offset = 0 # 1. Start with a square array (not stored) that is t units on each side. # Choose a t such that t * t >= max(S) t = choose_best_t(items) assert t * t > max(items) and t * t >= len(items) # 2. Place each key K in the square at location (x,y), where # x = K mod t, y = K / t. row_queue = place_items_in_square(items, t) # 3. Arrange rows so that they'll fit into one row and generate a # displacement vector. final_row, displacement_vector = arrange_rows(row_queue, t) # Translate the internal keys to their original items. slots = tuple(key_to_original[item - offset] if item is not None else None for item in final_row) # Return the parameters return HashInfo( t=t, slots=slots, r=displacement_vector, offset=offset, to_int=to_int if to_int is not __identity else None )
Calculates the parameters for a perfect hash. The result is returned as a HashInfo tuple which has the following fields: t The "table parameter". This is the minimum side length of the table used to create the hash. In practice, t**2 is the maximum size of the output hash. slots The original inputs mapped to a vector. This is the hash function. r The displacement vector. This is the displacement of the given row in the result vector. To find a given value, use ``x + r[y]``. offset The amount by which to offset all values (once converted to ints) to_int A function that converts the input to an int (if given). Keyword parameters: ``minimize`` Whether or not offset all integer keys internally by the minimum value. This typically results in smaller output. ``to_int`` A callable that converts the input keys to ints. If not specified, all keys should be given as ints. >>> hash_parameters([1, 5, 7], minimize=False) HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None) >>> hash_parameters([1, 5, 7]) HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None) >>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34) >>> phash = hash_parameters(l) >>> phash.slots (18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15) For some values, the displacement vector will be rather empty: >>> hash_parameters('Andrea', to_int=ord).r (1, None, None, None, 0, -3, 4, None)
def to_dict(self): """ Return a dictionary representation of the dataset. """ d = dict(doses=self.doses, ns=self.ns, means=self.means, stdevs=self.stdevs) d.update(self.kwargs) return d
Return a dictionary representation of the dataset.
async def activate_scene(self, scene_id: int): """Activate a scene :param scene_id: Scene id. :return: """ _scene = await self.get_scene(scene_id) await _scene.activate()
Activate a scene :param scene_id: Scene id. :return:
def run(self): '''Extends the run() method of threading.Thread ''' self.connect() while True: for event in self.slack_client.rtm_read(): logger.debug(event) if 'type' in event and event['type'] in self.supported_events: event_type = event['type'] dispatcher = self.supported_events[event_type] message = dispatcher(event) logger.debug(message) self.baseplate.tell(message) self.keepalive() time.sleep(0.1) return
Extends the run() method of threading.Thread
def _print_entity_intro(self, g=None, entity=None, first_time=True): """after a selection, prints on screen basic info about onto or entity, plus change prompt 2015-10-18: removed the sound 2016-01-18: entity is the shell wrapper around the ontospy entity """ if entity: self._clear_screen() obj = entity['object'] self._print("Loaded %s: <%s>" % (entity['type'].capitalize(), str(obj.uri)), "TIP") self._print("----------------", "TIP") # self._print(obj.bestDescription(), "TEXT") if first_time: self.prompt = _get_prompt(self.current['file'], self.currentEntity) elif g: self._printDescription(False) if first_time: self.prompt = _get_prompt(self.current['file'])
after a selection, prints on screen basic info about onto or entity, plus change prompt 2015-10-18: removed the sound 2016-01-18: entity is the shell wrapper around the ontospy entity
def cdf(arr, **kwargs): """ ARGS arr array to calculate cumulative distribution function **kwargs Passed directly to numpy.histogram. Typical options include: bins = <num_bins> normed = True|False DESC Determines the cumulative distribution function. """ counts, bin_edges = histogram(arr, **kwargs) cdf = cumsum(counts) return cdf
ARGS arr array to calculate cumulative distribution function **kwargs Passed directly to numpy.histogram. Typical options include: bins = <num_bins> normed = True|False DESC Determines the cumulative distribution function.
def delete_alias(self, addressid, data): """Delete alias address""" return self.api_call( ENDPOINTS['aliases']['delete'], dict(addressid=addressid), body=data)
Delete alias address
def login(self, email, password): """Login to the flightradar24 session The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans. For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains a token that will be passed on all the requests; this obtains the data as per the plan limits. Args: email (str): The email ID which is used to login to flightradar24 password (str): The password for the user ID Example:: from pyflightdata import FlightData f=FlightData() f.login(myemail,mypassword) """ response = FlightData.session.post( url=LOGIN_URL, data={ 'email': email, 'password': password, 'remember': 'true', 'type': 'web' }, headers={ 'Origin': 'https://www.flightradar24.com', 'Referer': 'https://www.flightradar24.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0' } ) response = self._fr24.json_loads_byteified( response.content) if response.status_code == 200 else None if response: token = response['userData']['subscriptionKey'] self.AUTH_TOKEN = token
Login to the flightradar24 session The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans. For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains a token that will be passed on all the requests; this obtains the data as per the plan limits. Args: email (str): The email ID which is used to login to flightradar24 password (str): The password for the user ID Example:: from pyflightdata import FlightData f=FlightData() f.login(myemail,mypassword)
def setStimReps(self): """Sets the reps of the StimulusModel from values pulled from this widget""" reps = self.ui.nrepsSpnbx.value() self.stimModel.setRepCount(reps)
Sets the reps of the StimulusModel from values pulled from this widget
def compare_zips(left, right): """ yields EVENT,ENTRY pairs describing the differences between left and right ZipFile instances """ ll = set(left.namelist()) rl = set(right.namelist()) for f in ll: if f in rl: rl.remove(f) if f[-1] == '/': # it's a directory entry pass elif _different(left, right, f): yield DIFF, f else: yield SAME, f else: yield LEFT, f for f in rl: yield RIGHT, f
yields EVENT,ENTRY pairs describing the differences between left and right ZipFile instances
async def wait_tasks(tasks, flatten=True): '''Gather a list of asynchronous tasks and wait their completion. :param list tasks: A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: The results of tasks as a list or as a flattened list ''' rets = await asyncio.gather(*tasks) if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)): rets = list(itertools.chain(*rets)) return rets
Gather a list of asynchronous tasks and wait their completion. :param list tasks: A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: The results of tasks as a list or as a flattened list
def _timer_update(self): """Add some moving points to the dependency resolution text.""" self._timer_counter += 1 dot = self._timer_dots.pop(0) self._timer_dots = self._timer_dots + [dot] self._rows = [[_(u'Resolving dependencies') + dot, u'', u'', u'']] index = self.createIndex(0, 0) self.dataChanged.emit(index, index) if self._timer_counter > 150: self._timer.stop() self._timer_counter = 0
Add some moving points to the dependency resolution text.
def rgba_to_rgb(color, bg='rgb(255,255,255)'): """ Converts from rgba to rgb Parameters: ----------- color : string Color representation in rgba bg : string Color representation in rgb Example: rgba_to_rgb('rgb(23,25,24,.4)'' """ def c_tup(c): return eval(c[c.find('('):]) color = c_tup(color) bg = hex_to_rgb(normalize(bg)) bg = c_tup(bg) a = color[3] r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)] return 'rgb' + str(tuple(r))
Converts from rgba to rgb Parameters: ----------- color : string Color representation in rgba bg : string Color representation in rgb Example: rgba_to_rgb('rgb(23,25,24,.4)''
def solid_angle(center, coords): """ Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle. """ o = np.array(center) r = [np.array(c) - o for c in coords] r.append(r[0]) n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)] n.append(np.cross(r[1], r[0])) vals = [] for i in range(len(n) - 1): v = -np.dot(n[i], n[i + 1]) \ / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])) vals.append(acos(abs_cap(v))) phi = sum(vals) return phi + (3 - len(r)) * pi
Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle.
def validate_json_field(dist, attr, value): """ Check for json validity. """ try: is_json_compat(value) except ValueError as e: raise DistutilsSetupError("%r %s" % (attr, e)) return True
Check for json validity.
def validate(self): """Validate this object as Image API data. Raise IIIFInfoError with helpful message if not valid. """ errors = [] for param in self.required_params: if (not hasattr(self, param) or getattr(self, param) is None): errors.append("missing %s parameter" % (param)) if (len(errors) > 0): raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors)) return True
Validate this object as Image API data. Raise IIIFInfoError with helpful message if not valid.
def movMF( X, n_clusters, posterior_type="soft", force_weights=None, n_init=10, n_jobs=1, max_iter=300, verbose=False, init="random-class", random_state=None, tol=1e-6, copy_x=True, ): """Wrapper for parallelization of _movMF and running n_init times. """ if n_init <= 0: raise ValueError( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init ) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError( "Number of iterations should be a positive number," " got %d instead" % max_iter ) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) if hasattr(init, "__array__"): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) if n_init != 1: warnings.warn( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init, RuntimeWarning, stacklevel=2, ) n_init = 1 # defaults best_centers = None best_labels = None best_weights = None best_concentrations = None best_posterior = None best_inertia = None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # cluster on the sphere (centers, weights, concentrations, posterior, labels, inertia) = _movMF( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_centers = centers.copy() best_labels = labels.copy() best_weights = weights.copy() best_concentrations = concentrations.copy() best_posterior = posterior.copy() best_inertia = inertia else: # parallelisation of movMF runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_movMF)( X, n_clusters, posterior_type=posterior_type, force_weights=force_weights, max_iter=max_iter, verbose=verbose, init=init, random_state=random_state, tol=tol, ) for seed in seeds ) # Get results with the lowest inertia centers, weights, concentrations, posteriors, labels, inertia = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_concentrations = concentrations[best] best_posterior = posteriors[best] best_weights = weights[best] return ( best_centers, best_labels, best_inertia, best_weights, best_concentrations, best_posterior, )
Wrapper for parallelization of _movMF and running n_init times.
def available(name, limit=''): ''' Return True if the named service is available. Use the ``limit`` param to restrict results to services of that type. CLI Examples: .. code-block:: bash salt '*' service.available sshd salt '*' service.available sshd limit=upstart salt '*' service.available sshd limit=sysvinit ''' if limit == 'upstart': return _service_is_upstart(name) elif limit == 'sysvinit': return _service_is_sysv(name) else: return _service_is_upstart(name) or _service_is_sysv(name) or _service_is_chkconfig(name)
Return True if the named service is available. Use the ``limit`` param to restrict results to services of that type. CLI Examples: .. code-block:: bash salt '*' service.available sshd salt '*' service.available sshd limit=upstart salt '*' service.available sshd limit=sysvinit
def is_running(process): ''' Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running? ''' if os.name == 'nt': process_list = get_cmd_out(['tasklist', '/v']) return process in process_list else: process_list = get_cmd_out('ps axw | awk \'{print $5}\'') for i in process_list.split('\n'): # 'COMMAND' is the column heading # [*] indicates kernel-level processes like \ # kthreadd, which manages threads in the Linux kernel if not i == 'COMMAND' or i.startswith('['): if i == process: return True elif os.path.basename(i) == process: # check i without executable path # for example, if 'process' arguments is 'sshd' # and '/usr/bin/sshd' is listed in ps, return True return True return False
Check if process is running. Check if the given process name is running or not. Note: On a Linux system, kernel threads (like ``kthreadd`` etc.) are excluded. Args: process (str): The name of the process. Returns: bool: Is the process running?
def Debugger_setScriptSource(self, scriptId, scriptSource, **kwargs): """ Function path: Debugger.setScriptSource Domain: Debugger Method name: setScriptSource Parameters: Required arguments: 'scriptId' (type: Runtime.ScriptId) -> Id of the script to edit. 'scriptSource' (type: string) -> New content of the script. Optional arguments: 'dryRun' (type: boolean) -> If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code. Returns: 'callFrames' (type: array) -> New stack trace in case editing has happened while VM was stopped. 'stackChanged' (type: boolean) -> Whether current call stack was modified after applying the changes. 'asyncStackTrace' (type: Runtime.StackTrace) -> Async stack trace, if any. 'exceptionDetails' (type: Runtime.ExceptionDetails) -> Exception details if any. Description: Edits JavaScript source live. """ assert isinstance(scriptSource, (str,) ), "Argument 'scriptSource' must be of type '['str']'. Received type: '%s'" % type( scriptSource) if 'dryRun' in kwargs: assert isinstance(kwargs['dryRun'], (bool,) ), "Optional argument 'dryRun' must be of type '['bool']'. Received type: '%s'" % type( kwargs['dryRun']) expected = ['dryRun'] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are ['dryRun']. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command('Debugger.setScriptSource', scriptId=scriptId, scriptSource=scriptSource, **kwargs) return subdom_funcs
Function path: Debugger.setScriptSource Domain: Debugger Method name: setScriptSource Parameters: Required arguments: 'scriptId' (type: Runtime.ScriptId) -> Id of the script to edit. 'scriptSource' (type: string) -> New content of the script. Optional arguments: 'dryRun' (type: boolean) -> If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code. Returns: 'callFrames' (type: array) -> New stack trace in case editing has happened while VM was stopped. 'stackChanged' (type: boolean) -> Whether current call stack was modified after applying the changes. 'asyncStackTrace' (type: Runtime.StackTrace) -> Async stack trace, if any. 'exceptionDetails' (type: Runtime.ExceptionDetails) -> Exception details if any. Description: Edits JavaScript source live.
def _timedatectl(): ''' get the output of timedatectl ''' ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False) if ret['retcode'] != 0: msg = 'timedatectl failed: {0}'.format(ret['stderr']) raise CommandExecutionError(msg) return ret
get the output of timedatectl
def delete_bulk_device_enrollment(self, enrollment_identities, **kwargs): # noqa: E501 """Bulk delete # noqa: E501 With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required) :return: BulkResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501 else: (data) = self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501 return data
Bulk delete # noqa: E501 With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required) :return: BulkResponse If the method is called asynchronously, returns the request thread.
def is_adb_root(self): """True if adb is running as root for this device. """ try: return '0' == self.adb.shell('id -u').decode('utf-8').strip() except adb.AdbError: # Wait a bit and retry to work around adb flakiness for this cmd. time.sleep(0.2) return '0' == self.adb.shell('id -u').decode('utf-8').strip()
True if adb is running as root for this device.
def create(self, from_=values.unset, attributes=values.unset, date_created=values.unset, date_updated=values.unset, last_updated_by=values.unset, body=values.unset, media_sid=values.unset): """ Create a new MessageInstance :param unicode from_: The identity of the new message's author :param unicode attributes: A valid JSON string that contains application-specific data :param datetime date_created: The ISO 8601 date and time in GMT when the resource was created :param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated :param unicode last_updated_by: The Identity of the User who last updated the Message :param unicode body: The message to send to the channel :param unicode media_sid: The Media Sid to be attached to the new Message :returns: Newly created MessageInstance :rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance """ data = values.of({ 'From': from_, 'Attributes': attributes, 'DateCreated': serialize.iso8601_datetime(date_created), 'DateUpdated': serialize.iso8601_datetime(date_updated), 'LastUpdatedBy': last_updated_by, 'Body': body, 'MediaSid': media_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return MessageInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], )
Create a new MessageInstance :param unicode from_: The identity of the new message's author :param unicode attributes: A valid JSON string that contains application-specific data :param datetime date_created: The ISO 8601 date and time in GMT when the resource was created :param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated :param unicode last_updated_by: The Identity of the User who last updated the Message :param unicode body: The message to send to the channel :param unicode media_sid: The Media Sid to be attached to the new Message :returns: Newly created MessageInstance :rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
async def generate_wallet_key(config: Optional[str]) -> str: """ Generate wallet master key. Returned key is compatible with "RAW" key derivation method. It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave. :param config: (optional) key configuration json. { "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. } :return: Error code """ logger = logging.getLogger(__name__) logger.debug("generate_wallet_key: >>> config: %r", config) if not hasattr(generate_wallet_key, "cb"): logger.debug("generate_wallet_key: Creating callback") generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_config = c_char_p(config.encode('utf-8')) if config is not None else None key = await do_call('indy_generate_wallet_key', c_config, generate_wallet_key.cb) res = key.decode() logger.debug("generate_wallet_key: <<< res: %r", res) return res
Generate wallet master key. Returned key is compatible with "RAW" key derivation method. It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave. :param config: (optional) key configuration json. { "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. } :return: Error code
def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError('WSGI write called before start_response.') chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response( '500 Internal Server Error', 'The requested resource returned more bytes than the ' 'declared Content-Length.', ) else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] self.req.ensure_headers_sent() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError( 'Response body exceeds the declared Content-Length.', )
WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application).
def _extract_axes_for_slice(self, axes): """ Return the slice dictionary for these axes. """ return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
Return the slice dictionary for these axes.
def append(self, filename_in_zip, file_contents): ''' Appends a file with name filename_in_zip and contents of file_contents to the in-memory zip. ''' # Set the file pointer to the end of the file self.in_memory_zip.seek(-1, io.SEEK_END) # Get a handle to the in-memory zip in append mode zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False) # Write the file to the in-memory zip zf.writestr(filename_in_zip, file_contents) # Mark the files as having been created on Windows so that # Unix permissions are not inferred as 0000 for zfile in zf.filelist: zfile.create_system = 0 # Close the ZipFile zf.close() # Rewind the file self.in_memory_zip.seek(0) return self
Appends a file with name filename_in_zip and contents of file_contents to the in-memory zip.
def randomize_colors(im, keep_vals=[0]): r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument. ''' im_flat = im.flatten() keep_vals = sp.array(keep_vals) swap_vals = ~sp.in1d(im_flat, keep_vals) im_vals = sp.unique(im_flat[swap_vals]) new_vals = sp.random.permutation(im_vals) im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int) im_map[im_vals] = new_vals im_new = im_map[im_flat] im_new = sp.reshape(im_new, newshape=sp.shape(im)) return im_new
r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument.
def current(): """ Returns the current environment manager for the projex system. :return <EnvManager> """ if not EnvManager._current: path = os.environ.get('PROJEX_ENVMGR_PATH') module = os.environ.get('PROJEX_ENVMGR_MODULE') clsname = os.environ.get('PROJEX_ENVMGR_CLASS') cls = EnvManager if module and clsname: # check if the user specified an import path if path: logger.info('Adding env manager path: %s' % path) sys.path.insert(0, path) logger.info('Loading env manager: %s.%s' % (module, clsname)) try: __import__(module) mod = sys.modules[module] cls = getattr(mod, clsname) except ImportError: logger.error('Could not import env manager %s', module) except KeyError: logger.error('Could not import env manager %s', module) except AttributeError: msg = '%s is not a valid class of %s' % (clsname, module) logger.error(msg) EnvManager._current = cls() return EnvManager._current
Returns the current environment manager for the projex system. :return <EnvManager>
def count(self): """ Compute count of group, excluding missing values """ ids, _, ngroups = self.grouper.group_info val = self.obj.get_values() mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) minlength = ngroups or 0 out = np.bincount(ids[mask], minlength=minlength) return Series(out, index=self.grouper.result_index, name=self._selection_name, dtype='int64')
Compute count of group, excluding missing values
def get_firmware(self): """Get the current firmware version.""" self.get_status() try: self.firmware = self.data['fw_version'] except TypeError: self.firmware = 'Unknown' return self.firmware
Get the current firmware version.
def get_num_processors(): """ Return number of online processor cores. """ # try different strategies and use first one that succeeeds try: return os.cpu_count() # Py3 only except AttributeError: pass try: import multiprocessing return multiprocessing.cpu_count() except ImportError: # no multiprocessing? pass except NotImplementedError: # multiprocessing cannot determine CPU count pass try: from subprocess32 import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess32? pass try: from subprocess import check_output ncpus = check_output('nproc') return int(ncpus) except CalledProcessError: # no `/usr/bin/nproc` pass except (ValueError, TypeError): # unexpected output from `nproc` pass except ImportError: # no subprocess.check_call (Py 2.6) pass raise RuntimeError("Cannot determine number of processors")
Return number of online processor cores.
def base36encode(number): """Converts an integer into a base36 string.""" ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz" base36 = '' sign = '' if number < 0: sign = '-' number = -number if 0 <= number < len(ALPHABET): return sign + ALPHABET[number] while number != 0: number, i = divmod(number, len(ALPHABET)) base36 = ALPHABET[i] + base36 return sign + base36
Converts an integer into a base36 string.
def return_tip(self, home_after=True): """ Drop the pipette's current tip to it's originating tip rack Notes ----- This method requires one or more tip-rack :any:`Container` to be in this Pipette's `tip_racks` list (see :any:`Pipette`) Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left', ... tip_racks=[tiprack, tiprack2]) # doctest: +SKIP >>> p300.pick_up_tip() # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.dispense(plate[1]) # doctest: +SKIP >>> p300.return_tip() # doctest: +SKIP """ if not self.tip_attached: log.warning("Cannot return tip without tip attached.") if not self.current_tip(): self.robot.add_warning( 'Pipette has no tip to return, dropping in place') self.drop_tip(self.current_tip(), home_after=home_after) return self
Drop the pipette's current tip to it's originating tip rack Notes ----- This method requires one or more tip-rack :any:`Container` to be in this Pipette's `tip_racks` list (see :any:`Pipette`) Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left', ... tip_racks=[tiprack, tiprack2]) # doctest: +SKIP >>> p300.pick_up_tip() # doctest: +SKIP >>> p300.aspirate(50, plate[0]) # doctest: +SKIP >>> p300.dispense(plate[1]) # doctest: +SKIP >>> p300.return_tip() # doctest: +SKIP
def advantage_as_result(self, move, val_scheme): """ Calculates advantage after move is played :type: move: Move :type: val_scheme: PieceValues :rtype: double """ test_board = cp(self) test_board.update(move) return test_board.material_advantage(move.color, val_scheme)
Calculates advantage after move is played :type: move: Move :type: val_scheme: PieceValues :rtype: double
def render_POST(self, request): """ Responds to events and starts the build process different implementations can decide on what methods they will accept :arguments: request the http request object """ try: d = self.getAndSubmitChanges(request) except Exception: d = defer.fail() def ok(_): request.setResponseCode(202) request.finish() def err(why): code = 500 if why.check(ValueError): code = 400 msg = unicode2bytes(why.getErrorMessage()) else: log.err(why, "adding changes from web hook") msg = b'Error processing changes.' request.setResponseCode(code, msg) request.write(msg) request.finish() d.addCallbacks(ok, err) return server.NOT_DONE_YET
Responds to events and starts the build process different implementations can decide on what methods they will accept :arguments: request the http request object
def _get_client(): ''' Return a cloud client ''' client = salt.cloud.CloudClient( os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'), pillars=copy.deepcopy(__pillar__.get('cloud', {})) ) return client
Return a cloud client
def parse_multipart_form(body, boundary): '''Parse a request body and returns fields and files :param body: bytes request body :param boundary: bytes multipart boundary :return: fields (RequestParameters), files (RequestParameters) ''' files = RequestParameters() fields = RequestParameters() form_parts = body.split(boundary) for form_part in form_parts[1:-1]: file_name = None file_type = None field_name = None line_index = 2 line_end_index = 0 while not line_end_index == -1: line_end_index = form_part.find(b'\r\n', line_index) form_line = form_part[line_index:line_end_index].decode('utf-8') line_index = line_end_index + 2 if not form_line: break colon_index = form_line.index(':') form_header_field = form_line[0:colon_index].lower() form_header_value, form_parameters = parse_header( form_line[colon_index + 2:]) if form_header_field == 'content-disposition': if 'filename' in form_parameters: file_name = form_parameters['filename'] field_name = form_parameters.get('name') elif form_header_field == 'content-type': file_type = form_header_value post_data = form_part[line_index:-4] if file_name or file_type: file = File(type=file_type, name=file_name, body=post_data) if field_name in files: files[field_name].append(file) else: files[field_name] = [file] else: value = post_data.decode('utf-8') if field_name in fields: fields[field_name].append(value) else: fields[field_name] = [value] return fields, files
Parse a request body and returns fields and files :param body: bytes request body :param boundary: bytes multipart boundary :return: fields (RequestParameters), files (RequestParameters)
def _get_class_name(error_code): """ Gets the corresponding class name for the given error code, this either being an integer (thus base error name) or str. """ if isinstance(error_code, int): return KNOWN_BASE_CLASSES.get( error_code, 'RPCError' + str(error_code).replace('-', 'Neg') ) return snake_to_camel_case( error_code.replace('FIRSTNAME', 'FIRST_NAME').lower(), suffix='Error')
Gets the corresponding class name for the given error code, this either being an integer (thus base error name) or str.
def GetFileSystem(self, path_spec): """Retrieves a file system object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: FileSystem: a file system object or None if not cached. """ identifier = self._GetFileSystemCacheIdentifier(path_spec) return self._file_system_cache.GetObject(identifier)
Retrieves a file system object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: FileSystem: a file system object or None if not cached.
def distributions_for_instances(self, data): """ Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray """ if self.is_batchpredictor: return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject)) else: return None
Peforms predictions, returning the class distributions. :param data: the Instances to get the class distributions for :type data: Instances :return: the class distribution matrix, None if not a batch predictor :rtype: ndarray
def purge(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs): ''' .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Portage does not have a purge, this function calls remove followed by depclean to emulate a purge process name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if name is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> salt '*' pkg.purge <package name> slot=4.4 salt '*' pkg.purge <package1>,<package2>,<package3> salt '*' pkg.purge pkgs='["foo", "bar"]' ''' ret = remove(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs) ret.update(depclean(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs)) return ret
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing any emerge commands spawned by Salt when the ``salt-minion`` service is restarted. (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Portage does not have a purge, this function calls remove followed by depclean to emulate a purge process name The name of the package to be deleted. slot Restrict the remove to a specific slot. Ignored if name is None. fromrepo Restrict the remove to a specific slot. Ignored if ``name`` is None. Multiple Package Options: pkgs Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are ignored if this argument is present. Must be passed as a python list. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> salt '*' pkg.purge <package name> slot=4.4 salt '*' pkg.purge <package1>,<package2>,<package3> salt '*' pkg.purge pkgs='["foo", "bar"]'
def _change_kind(self, post_uid): ''' To modify the category of the post, and kind. ''' post_data = self.get_post_data() logger.info('admin post update: {0}'.format(post_data)) MPost.update_misc(post_uid, kind=post_data['kcat']) # self.update_category(post_uid) update_category(post_uid, post_data) self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid))
To modify the category of the post, and kind.
def success(item): '''Successful finish''' try: # mv to done trg_queue = item.queue os.rename(fsq_path.item(trg_queue, item.id, host=item.host), os.path.join(fsq_path.done(trg_queue, host=item.host), item.id)) except AttributeError, e: # DuckType TypeError'ing raise TypeError(u'item must be an FSQWorkItem, not:'\ u' {0}'.format(item.__class__.__name__)) except (OSError, IOError, ), e: raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\ u' {1}'.format(item.id, wrap_io_os_err(e)))
Successful finish
def tzname(self, dt): """datetime -> string name of time zone.""" tt = _localtime(_mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1))) return _time.tzname[tt.tm_isdst > 0]
datetime -> string name of time zone.
def encode_refresh_token(identity, secret, algorithm, expires_delta, user_claims, csrf, identity_claim_key, user_claims_key, json_encoder=None): """ Creates a new encoded (utf-8) refresh token. :param identity: Some identifier used to identify the owner of this token :param secret: Secret key to encode the JWT with :param algorithm: Which algorithm to use for the toek :param expires_delta: How far in the future this token should expire (set to False to disable expiration) :type expires_delta: datetime.timedelta or False :param user_claims: Custom claims to include in this token. This data must be json serializable :param csrf: Whether to include a csrf double submit claim in this token (boolean) :param identity_claim_key: Which key should be used to store the identity :param user_claims_key: Which key should be used to store the user claims :return: Encoded refresh token """ token_data = { identity_claim_key: identity, 'type': 'refresh', } # Don't add extra data to the token if user_claims is empty. if user_claims: token_data[user_claims_key] = user_claims if csrf: token_data['csrf'] = _create_csrf_token() return _encode_jwt(token_data, expires_delta, secret, algorithm, json_encoder=json_encoder)
Creates a new encoded (utf-8) refresh token. :param identity: Some identifier used to identify the owner of this token :param secret: Secret key to encode the JWT with :param algorithm: Which algorithm to use for the toek :param expires_delta: How far in the future this token should expire (set to False to disable expiration) :type expires_delta: datetime.timedelta or False :param user_claims: Custom claims to include in this token. This data must be json serializable :param csrf: Whether to include a csrf double submit claim in this token (boolean) :param identity_claim_key: Which key should be used to store the identity :param user_claims_key: Which key should be used to store the user claims :return: Encoded refresh token
def calc_timestep_statistic(self, statistic, time): """ Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.timesteps[ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_timestep_statistic(stat_name, time) -\ self.calc_timestep_statistic(stat_name, time - 1) else: stat_val = np.nan return stat_val
Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic
def ObjectTransitionedEventHandler(obj, event): """Object has been transitioned to an new state """ # only snapshot supported objects if not supports_snapshots(obj): return # default transition entry entry = { "modified": DateTime().ISO(), "action": event.action, } # get the last history item history = api.get_review_history(obj, rev=True) if history: entry = history[0] # make transitions also a modification entry timestamp = entry.pop("time", DateTime()) entry["modified"] = timestamp.ISO() entry["action"] = event.action # take a new snapshot take_snapshot(obj, **entry) # reindex the object in the auditlog catalog reindex_object(obj)
Object has been transitioned to an new state
def _GetUtf8Contents(self, file_name): """Check for errors in file_name and return a string for csv reader.""" contents = self._FileContents(file_name) if not contents: # Missing file return # Check for errors that will prevent csv.reader from working if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): self._problems.FileFormat("appears to be encoded in utf-16", (file_name, )) # Convert and continue, so we can find more errors contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8') null_index = contents.find('\0') if null_index != -1: # It is easier to get some surrounding text than calculate the exact # row_num m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL) self._problems.FileFormat( "contains a null in text \"%s\" at byte %d" % (codecs.getencoder('string_escape')(m.group()), null_index + 1), (file_name, )) return # strip out any UTF-8 Byte Order Marker (otherwise it'll be # treated as part of the first column name, causing a mis-parse) contents = contents.lstrip(codecs.BOM_UTF8) return contents
Check for errors in file_name and return a string for csv reader.
def ontologyClassTree(self): """ Returns a dict representing the ontology tree Top level = {0:[top classes]} Multi inheritance is represented explicitly """ treedict = {} if self.all_classes: treedict[0] = self.toplayer_classes for element in self.all_classes: if element.children(): treedict[element] = element.children() return treedict return treedict
Returns a dict representing the ontology tree Top level = {0:[top classes]} Multi inheritance is represented explicitly
def set_flag(self, info, code=None): """Flag this instance for investigation.""" self.flag = True self.flag_info += info if code is not None: self.flag_code = code
Flag this instance for investigation.
def get_os_file_names(files): """ returns file names :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list """ fnames = [] for f in files: if utils.is_str(f): fnames.append(f) elif isinstance(f, file_configuration_t): if f.content_type in ( file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE, file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE): fnames.append(f.data) else: pass return fnames
returns file names :param files: list of strings and\\or :class:`file_configuration_t` instances. :type files: list
def adjust_text(x, y, texts, ax=None, expand_text=(1.2, 1.2), expand_points=(1.2, 1.2), autoalign=True, va='center', ha='center', force_text=1., force_points=1., lim=100, precision=0, only_move={}, text_from_text=True, text_from_points=True, save_steps=False, save_prefix='', save_format='png', add_step_numbers=True, draggable=True, repel_from_axes=False, min_arrow_sep=0.0, *args, **kwargs): """ Iteratively adjusts the locations of texts. First moves all texts that are outside the axes limits inside. Then in each iteration moves all texts away from each other and from points. In the end hides texts and substitutes them with annotations to link them to the rescpective points. Args: x (seq): x-coordinates of labelled points y (seq): y-coordinates of labelled points texts (list): a list of text.Text objects to adjust ax (obj): axes object with the plot; if not provided is determined by plt.gca() expand_text (seq): a tuple/list/... with 2 numbers (x, y) to expand texts when repelling them from each other; default (1.2, 1.2) expand_points (seq): a tuple/list/... with 2 numbers (x, y) to expand texts when repelling them from points; default (1.2, 1.2) autoalign (bool): If True, the best alignment of all texts will be determined automatically before running the iterative adjustment; if 'x' will only align horizontally, if 'y' - vertically; overrides va and ha va (str): vertical alignment of texts ha (str): horizontal alignment of texts force_text (float): the repel force from texts is multiplied by this value; default 0.5 force_points (float): the repel force from points is multiplied by this value; default 0.5 lim (int): limit of number of iterations precision (float): up to which sum of all overlaps along both x and y to iterate; may need to increase for complicated situations; default 0, so no overlaps with anything. only_move (dict): a dict to restrict movement of texts to only certain axis. Valid keys are 'points' and 'text', for each of them valid values are 'x', 'y' and 'xy'. This way you can forbid moving texts along either of the axes due to overlaps with points, but let it happen if there is an overlap with texts: only_move={'points':'y', 'text':'xy'}. Default: None, so everything is allowed. text_from_text (bool): whether to repel texts from each other; default True text_from_points (bool): whether to repel texts from points; default True; can helpful to switch of in extremely crouded plots save_steps (bool): whether to save intermediate steps as images; default False save_prefix (str): a path and/or prefix to the saved steps; default '' save_format (str): a format to save the steps into; default 'png *args and **kwargs: any arguments will be fed into plt.annotate after all the optimization is done just for plotting add_step_numbers (bool): whether to add step numbers as titles to the images of saving steps draggable (bool): whether to make the annotations draggable; default True """ if ax is None: ax = plt.gca() r = ax.get_figure().canvas.get_renderer() orig_xy = [text.get_position() for text in texts] orig_x = [xy[0] for xy in orig_xy] orig_y = [xy[1] for xy in orig_xy] for text in texts: text.set_va(va) text.set_ha(ha) if save_steps: if add_step_numbers: plt.title('0a') plt.savefig(save_prefix + '0a.' + save_format, format=save_format) if autoalign: if autoalign is not True: texts = optimally_align_text(x, y, texts, direction=autoalign, expand=expand_points, renderer=r, ax=ax) else: texts = optimally_align_text(orig_x, orig_y, texts, expand=expand_points, renderer=r, ax=ax) if save_steps: if add_step_numbers: plt.title('0b') plt.savefig(save_prefix + '0b.' + save_format, format=save_format) if repel_from_axes is True: texts = repel_text_from_axes( texts, ax, renderer=r, expand=expand_points) history = [np.inf] * 5 for i in xrange(lim): q1, q2 = np.inf, np.inf if text_from_text: d_x_text, d_y_text, q1 = repel_text(texts, renderer=r, ax=ax, expand=expand_text) else: d_x_text, d_y_text, q1 = [0] * len(texts), [0] * len(texts), 0 if text_from_points: d_x_points, d_y_points, q2 = repel_text_from_points(x, y, texts, ax=ax, renderer=r, expand=expand_points) else: d_x_points, d_y_points, q1 = [0] * len(texts), [0] * len(texts), 0 if only_move: if 'text' in only_move: if 'x' not in only_move['text']: d_x_text = np.zeros_like(d_x_text) if 'y' not in only_move['text']: d_y_text = np.zeros_like(d_y_text) if 'points' in only_move: if 'x' not in only_move['points']: d_x_points = np.zeros_like(d_x_points) if 'y' not in only_move['points']: d_y_points = np.zeros_like(d_y_points) dx = np.array(d_x_text) + np.array(d_x_points) dy = np.array(d_y_text) + np.array(d_y_points) q = round(np.sum(np.array([q1, q2])[np.array([q1, q2]) < np.inf]), 5) if q > precision and q < np.max(history): history.pop(0) history.append(q) move_texts(texts, dx * force_text, dy * force_points, bboxes=get_bboxes(texts, r, (1, 1)), ax=ax) if save_steps: if add_step_numbers: plt.title(i + 1) plt.savefig(save_prefix + str(i + 1) + '.' + save_format, format=save_format) else: break bboxes = get_bboxes(texts, r, (1, 1)) originLW = kwargs["arrowprops"]["lw"] for j, text in enumerate(texts): cx, cy = get_midpoint(bboxes[j]) one = (orig_xy[j][0] - cx)**2 two = (orig_xy[j][1] - cy)**2 sep = (one + two)**0.5 print text.get_text(), sep try: if sep < min_arrow_sep: kwargs["arrowprops"]["lw"] = 0. else: kwargs["arrowprops"]["lw"] = originLW except Exception, e: print e a = ax.annotate(text.get_text(), xy=(orig_xy[j]), xytext=text.get_position(), *args, **kwargs) a.__dict__.update(text.__dict__) if draggable: a.draggable() texts[j].remove() if save_steps: if add_step_numbers: plt.title(i + 1) plt.savefig(save_prefix + str(i + 1) + '.' + save_format, format=save_format)
Iteratively adjusts the locations of texts. First moves all texts that are outside the axes limits inside. Then in each iteration moves all texts away from each other and from points. In the end hides texts and substitutes them with annotations to link them to the rescpective points. Args: x (seq): x-coordinates of labelled points y (seq): y-coordinates of labelled points texts (list): a list of text.Text objects to adjust ax (obj): axes object with the plot; if not provided is determined by plt.gca() expand_text (seq): a tuple/list/... with 2 numbers (x, y) to expand texts when repelling them from each other; default (1.2, 1.2) expand_points (seq): a tuple/list/... with 2 numbers (x, y) to expand texts when repelling them from points; default (1.2, 1.2) autoalign (bool): If True, the best alignment of all texts will be determined automatically before running the iterative adjustment; if 'x' will only align horizontally, if 'y' - vertically; overrides va and ha va (str): vertical alignment of texts ha (str): horizontal alignment of texts force_text (float): the repel force from texts is multiplied by this value; default 0.5 force_points (float): the repel force from points is multiplied by this value; default 0.5 lim (int): limit of number of iterations precision (float): up to which sum of all overlaps along both x and y to iterate; may need to increase for complicated situations; default 0, so no overlaps with anything. only_move (dict): a dict to restrict movement of texts to only certain axis. Valid keys are 'points' and 'text', for each of them valid values are 'x', 'y' and 'xy'. This way you can forbid moving texts along either of the axes due to overlaps with points, but let it happen if there is an overlap with texts: only_move={'points':'y', 'text':'xy'}. Default: None, so everything is allowed. text_from_text (bool): whether to repel texts from each other; default True text_from_points (bool): whether to repel texts from points; default True; can helpful to switch of in extremely crouded plots save_steps (bool): whether to save intermediate steps as images; default False save_prefix (str): a path and/or prefix to the saved steps; default '' save_format (str): a format to save the steps into; default 'png *args and **kwargs: any arguments will be fed into plt.annotate after all the optimization is done just for plotting add_step_numbers (bool): whether to add step numbers as titles to the images of saving steps draggable (bool): whether to make the annotations draggable; default True
def buildlist(self, category=tpb.CATEGORIES.VIDEO.TV_SHOWS, limit=1000): """ Build the torrent list Return list of list sorted by seeders count Id can be used to retrieve torrent associate with this id [[<title>, <Seeders>, <id>] ...] """ try: s = self.source.search(self.title.lower(), limit) except Exception as e: logging.error("Can not send search request to the t411 server") logging.error(e.message) sys.exit(1) try: for t in s.items(): pass except: logging.error("t411 server returned an invalid result") sys.exit(1) torrentlist = [] for torrent in s['torrents']: if isinstance(torrent, dict): #logging.debug("Compare regex to: %s" % t.title.lower()) if (re.search(self.regexp, torrent['name'].lower()) and (int(torrent['seeders']) >= self.seeders_min)): # logging.debug("Matched") torrentlist.append((torrent['name'], torrent['seeders'], torrent['id'])) logging.debug("Found %d matching items " % (len(torrentlist))) # Return the list return torrentlist
Build the torrent list Return list of list sorted by seeders count Id can be used to retrieve torrent associate with this id [[<title>, <Seeders>, <id>] ...]
def entities(self, tc_data, resource_type): """ Yields a entity. Takes both a list of indicators/groups or a individual indicator/group response. example formats: { "status":"Success", "data":{ "resultCount":984240, "address":[ { "id":4222035, "ownerName":"System", "dateAdded":"2019-03-28T10:32:05-04:00", "lastModified":"2019-03-28T11:02:46-04:00", "rating":4, "confidence":90, "threatAssessRating":4, "threatAssessConfidence":90, "webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.14", "ip":"221.123.32.14" }, { "id":4221517, "ownerName":"System", "dateAdded":"2018-11-05T14:24:54-05:00", "lastModified":"2019-03-07T12:38:36-05:00", "threatAssessRating":0, "threatAssessConfidence":0, "webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.12", "ip":"221.123.32.12" } ] } } or: { "status": "Success", "data": { "address": { "id": 4222035, "owner": { "id": 1, "name": "System", "type": "Organization" }, "dateAdded": "2019-03-28T10:32:05-04:00", "lastModified": "2019-03-28T11:02:46-04:00", "rating": 4, "confidence": 90, "threatAssessRating": 4, "threatAssessConfidence": 90, "webLink": "{host}/auth/indicators/details/address.xhtml?address=221.123.32.14", "ip": "221.123.32.14" } } } Args: json_response: Yields: """ if not isinstance(tc_data, list): tc_data = [tc_data] for d in tc_data: entity = {'id': d.get('id'), 'webLink': d.get('webLink')} values = [] value = None if resource_type in self.tcex.group_types: r = self.tcex.ti.group(group_type=resource_type, name=d.get('name')) value = d.get('name') elif resource_type in self.tcex.indicator_types: r = self.tcex.ti.indicator(indicator_type=resource_type) r._set_unique_id(d) value = r.unique_id elif resource_type.lower() in ['victim']: r = self.tcex.ti.victim(d.get('name')) value = d.get('name') else: self.tcex.handle_error(925, ['type', 'entities', 'type', 'type', resource_type]) if 'summary' in d: values.append(d.get('summary')) else: values.append(value) entity['value'] = ' : '.join(values) if r.is_group() or r.is_indicator(): if 'owner' in d: entity['ownerName'] = d['owner']['name'] else: entity['ownerName'] = d.get('ownerName') entity['dateAdded'] = d.get('dateAdded') if r.is_victim(): entity['ownerName'] = d.get('org') if r.is_indicator(): entity['confidence'] = d.get('confidence') entity['rating'] = d.get('rating') entity['threatAssessConfidence'] = d.get('threatAssessConfidence') entity['threatAssessRating'] = d.get('threatAssessRating') entity['dateLastModified'] = d.get('lastModified') # type if d.get('type') is not None: entity['type'] = d.get('type') else: entity['type'] = resource_type yield entity
Yields a entity. Takes both a list of indicators/groups or a individual indicator/group response. example formats: { "status":"Success", "data":{ "resultCount":984240, "address":[ { "id":4222035, "ownerName":"System", "dateAdded":"2019-03-28T10:32:05-04:00", "lastModified":"2019-03-28T11:02:46-04:00", "rating":4, "confidence":90, "threatAssessRating":4, "threatAssessConfidence":90, "webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.14", "ip":"221.123.32.14" }, { "id":4221517, "ownerName":"System", "dateAdded":"2018-11-05T14:24:54-05:00", "lastModified":"2019-03-07T12:38:36-05:00", "threatAssessRating":0, "threatAssessConfidence":0, "webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.12", "ip":"221.123.32.12" } ] } } or: { "status": "Success", "data": { "address": { "id": 4222035, "owner": { "id": 1, "name": "System", "type": "Organization" }, "dateAdded": "2019-03-28T10:32:05-04:00", "lastModified": "2019-03-28T11:02:46-04:00", "rating": 4, "confidence": 90, "threatAssessRating": 4, "threatAssessConfidence": 90, "webLink": "{host}/auth/indicators/details/address.xhtml?address=221.123.32.14", "ip": "221.123.32.14" } } } Args: json_response: Yields:
def open_outside_spyder(self, fnames): """Open file outside Spyder with the appropriate application If this does not work, opening unknown file in Spyder, as text file""" for path in sorted(fnames): path = file_uri(path) ok = programs.start_file(path) if not ok: self.sig_edit.emit(path)
Open file outside Spyder with the appropriate application If this does not work, opening unknown file in Spyder, as text file
def bodn2c(name): """ Translate the name of a body or object to the corresponding SPICE integer ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodn2c_c.html :param name: Body name to be translated into a SPICE ID code. :type name: str :return: SPICE integer ID code for the named body. :rtype: int """ name = stypes.stringToCharP(name) code = ctypes.c_int(0) found = ctypes.c_int(0) libspice.bodn2c_c(name, ctypes.byref(code), ctypes.byref(found)) return code.value, bool(found.value)
Translate the name of a body or object to the corresponding SPICE integer ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodn2c_c.html :param name: Body name to be translated into a SPICE ID code. :type name: str :return: SPICE integer ID code for the named body. :rtype: int
def process_post_author(self, bulk_mode, api_author): """ Create or update an Author related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_author: the data in the api for the Author :return: the up-to-date Author object """ # get from the ref data map if in bulk mode, else look it up from the db if bulk_mode: author = self.ref_data_map["authors"].get(api_author["ID"]) if author: self.update_existing_author(author, api_author) else: # if the author wasn't found (likely because it's a Byline or guest author, not a user), # go ahead and create the author now author = Author.objects.create(site_id=self.site_id, wp_id=api_author["ID"], **self.api_object_data("author", api_author)) else: # do a direct db lookup if we're not in bulk mode author, created = self.get_or_create_author(api_author) if author and not created: self.update_existing_author(author, api_author) # add to the ref data map so we don't try to create it again if author: self.ref_data_map["authors"][api_author["ID"]] = author return author
Create or update an Author related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_author: the data in the api for the Author :return: the up-to-date Author object
def forget_canvas(canvas): """ Forget about the given canvas. Used by the canvas when closed. """ cc = [c() for c in canvasses if c() is not None] while canvas in cc: cc.remove(canvas) canvasses[:] = [weakref.ref(c) for c in cc]
Forget about the given canvas. Used by the canvas when closed.
def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p
Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest
def attach_zoom(ax, scaling=2.0): """ Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ---------- ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function """ # See https://stackoverflow.com/questions/11551049 def zoom(event): # Get the current x and y limits cur_xlim = ax.get_xlim() cur_ylim = ax.get_ylim() # Get event location xdata = event.xdata ydata = event.ydata # Return if cursor is not over valid region of plot if xdata is None or ydata is None: return if event.button == 'up': # Deal with zoom in scale_factor = 1.0 / scaling elif event.button == 'down': # Deal with zoom out scale_factor = scaling # Get distance from the cursor to the edge of the figure frame x_left = xdata - cur_xlim[0] x_right = cur_xlim[1] - xdata y_top = ydata - cur_ylim[0] y_bottom = cur_ylim[1] - ydata # Calculate new x and y limits new_xlim = (xdata - x_left * scale_factor, xdata + x_right * scale_factor) new_ylim = (ydata - y_top * scale_factor, ydata + y_bottom * scale_factor) # Ensure that x limit range is no larger than that of the reference if np.diff(new_xlim) > np.diff(zoom.xlim_ref): new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim) # Ensure that lower x limit is not less than that of the reference if new_xlim[0] < zoom.xlim_ref[0]: new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0]) # Ensure that upper x limit is not greater than that of the reference if new_xlim[1] > zoom.xlim_ref[1]: new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1]) # Ensure that ylim tuple has the smallest value first if zoom.ylim_ref[1] < zoom.ylim_ref[0]: ylim_ref = zoom.ylim_ref[::-1] new_ylim = new_ylim[::-1] else: ylim_ref = zoom.ylim_ref # Ensure that y limit range is no larger than that of the reference if np.diff(new_ylim) > np.diff(ylim_ref): new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim) # Ensure that lower y limit is not less than that of the reference if new_ylim[0] < ylim_ref[0]: new_ylim += np.array(ylim_ref[0] - new_ylim[0]) # Ensure that upper y limit is not greater than that of the reference if new_ylim[1] > ylim_ref[1]: new_ylim -= np.array(new_ylim[1] - ylim_ref[1]) # Return the ylim tuple to its original order if zoom.ylim_ref[1] < zoom.ylim_ref[0]: new_ylim = new_ylim[::-1] # Set new x and y limits ax.set_xlim(new_xlim) ax.set_ylim(new_ylim) # Force redraw ax.figure.canvas.draw() # Record reference x and y limits prior to any zooming zoom.xlim_ref = ax.get_xlim() zoom.ylim_ref = ax.get_ylim() # Get figure for specified axes and attach the event handler fig = ax.get_figure() fig.canvas.mpl_connect('scroll_event', zoom) return zoom
Attach an event handler that supports zooming within a plot using the mouse scroll wheel. Parameters ---------- ax : :class:`matplotlib.axes.Axes` object Axes to which event handling is to be attached scaling : float, optional (default 2.0) Scaling factor for zooming in and out Returns ------- zoom : function Mouse scroll wheel event handler function
def _check_user(user, group): ''' Checks if the named user and group are present on the minion ''' err = '' if user: uid = __salt__['file.user_to_uid'](user) if uid == '': err += 'User {0} is not available '.format(user) if group: gid = __salt__['file.group_to_gid'](group) if gid == '': err += 'Group {0} is not available'.format(group) return err
Checks if the named user and group are present on the minion
def process_boolean(self, tag): """Process Boolean type tags""" tag.set_address(self.normal_register.current_bit_address) self.normal_register.move_to_next_bit_address()
Process Boolean type tags
def create_ticket(self, ticket=None, **kwargs): """ Create a new ``Ticket``. Additional arguments are passed to the ``create()`` function. Return the newly created ``Ticket``. """ if not ticket: ticket = self.create_ticket_str() if 'service' in kwargs: kwargs['service'] = clean_service_url(kwargs['service']) if 'expires' not in kwargs: expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE) kwargs['expires'] = expires t = self.create(ticket=ticket, **kwargs) logger.debug("Created %s %s" % (t.name, t.ticket)) return t
Create a new ``Ticket``. Additional arguments are passed to the ``create()`` function. Return the newly created ``Ticket``.
async def expand_foreign_keys(self, database, table, column, values): "Returns dict mapping (column, value) -> label" labeled_fks = {} foreign_keys = await self.foreign_keys_for_table(database, table) # Find the foreign_key for this column try: fk = [ foreign_key for foreign_key in foreign_keys if foreign_key["column"] == column ][0] except IndexError: return {} label_column = await self.label_column_for_table(database, fk["other_table"]) if not label_column: return { (fk["column"], value): str(value) for value in values } labeled_fks = {} sql = ''' select {other_column}, {label_column} from {other_table} where {other_column} in ({placeholders}) '''.format( other_column=escape_sqlite(fk["other_column"]), label_column=escape_sqlite(label_column), other_table=escape_sqlite(fk["other_table"]), placeholders=", ".join(["?"] * len(set(values))), ) try: results = await self.execute( database, sql, list(set(values)) ) except InterruptedError: pass else: for id, value in results: labeled_fks[(fk["column"], id)] = value return labeled_fks
Returns dict mapping (column, value) -> label
def position(axis, hardware, cp=None): """ Read position from driver into a tuple and map 3-rd value to the axis of a pipette currently used """ if not ff.use_protocol_api_v2(): p = hardware._driver.position return (p['X'], p['Y'], p[axis]) else: p = hardware.gantry_position(axis, critical_point=cp) return (p.x, p.y, p.z)
Read position from driver into a tuple and map 3-rd value to the axis of a pipette currently used
def get_masquerade(zone=None, permanent=True): ''' Show if masquerading is enabled on a zone. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.get_masquerade zone ''' zone_info = list_all(zone, permanent) if 'no' in [zone_info[i]['masquerade'][0] for i in zone_info]: return False return True
Show if masquerading is enabled on a zone. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.get_masquerade zone
def get_bio(self, section, language=None): """ Returns a section of the bio. section can be "content", "summary" or "published" (for published date) """ if language: params = self._get_params() params["lang"] = language else: params = None return self._extract_cdata_from_request( self.ws_prefix + ".getInfo", section, params )
Returns a section of the bio. section can be "content", "summary" or "published" (for published date)
def _backup_file(path): """ Backup a file but never overwrite an existing backup file """ backup_base = '/var/local/woven-backup' backup_path = ''.join([backup_base,path]) if not exists(backup_path): directory = ''.join([backup_base,os.path.split(path)[0]]) sudo('mkdir -p %s'% directory) sudo('cp %s %s'% (path,backup_path))
Backup a file but never overwrite an existing backup file
def split_glides(n_samples, dur, fs_a, GL, min_dur=None): '''Get start/stop indices of each `dur` length sub-glide for glides in GL Args ---- dur: int Desired duration of glides GL: ndarray, (n, 2) Matrix containing the start time (first column) and end time (2nd column) of any glides.Times are in seconds. min_dur: int, default (bool False) Minimum number of seconds for sub-glide. Default value is `False`, which makes `min_dur` equal to `dur`, ignoring sub-glides smaller than `dur`. Attributes ---------- gl_ind_diff: ndarray, (n,3) GL, with additional column of difference between the first two columns Returns ------- SGL: ndarray, (n, 2) Matrix containing the start time (first column) and end time (2nd column) of the generated sub-glides. All glides must have duration equal to the given dur value.Times are in seconds. ''' import numpy # Convert `dur` in seconds to duration in number of samples `ndur` ndur = dur * fs_a # If minimum duration not passed, set to `min_dur` to skip slices < `dur` if not min_dur: min_ndur = dur * fs_a else: min_ndur = min_dur * fs_a # `GL` plus column for total duration of glide, seconds gl_ind_diff = numpy.vstack((GL.T, GL[:, 1] - GL[:, 0])).T # Split all glides in `GL` SGL_started = False for i in range(len(GL)): gl_ndur = gl_ind_diff[i, 2] # Split into sub glides if longer than duration if abs(gl_ndur) > ndur: # Make list of index lengths to start of each sub-glide n_sgl = int(gl_ndur//ndur) sgl_ndur = numpy.ones(n_sgl)*ndur sgl_start = numpy.arange(n_sgl)*(ndur+1) # Add remainder as a sub-glide, skips if `min_ndur` not passed if (gl_ndur%ndur > min_ndur): last_ndur = numpy.floor(gl_ndur%ndur) sgl_ndur = numpy.hstack([sgl_ndur, last_ndur]) last_start = (len(sgl_start)*ndur) + ndur sgl_start = numpy.hstack([sgl_start, last_start]) # Get start and end index positions for each sub-glide for k in range(len(sgl_start)): # starting at original glide start... # sgl_start_ind: add index increments of ndur+1 for next start idx next_start_ind = (gl_ind_diff[i, 0] + sgl_start[k]).astype(int) # end_glide: add `ndur` to that to get ending idx next_end_ind = (next_start_ind + sgl_ndur[k]).astype(int) # If first iteration, set equal to first set of indices if SGL_started == False: sgl_start_ind = next_start_ind sgl_end_ind = next_end_ind SGL_started = True else: # Concatenate 1D arrays together, shape (n,) sgl_start_ind = numpy.hstack((sgl_start_ind, next_start_ind)) sgl_end_ind = numpy.hstack((sgl_end_ind, next_end_ind)) # Stack and transpose indices into shape (n, 2) SGL = numpy.vstack((sgl_start_ind, sgl_end_ind)).T # Filter out sub-glides that fall outside of sensor data indices SGL = SGL[(SGL[:, 0] >= 0) & (SGL[:, 1] < n_samples)] # check that all sub-glides have a duration of `ndur` seconds sgl_ndur = SGL[:, 1] - SGL[:, 0] # If sub-glide `min_ndur` set, make sure all above `min_ndur`, below `ndur` if min_dur: assert numpy.all((sgl_ndur <= ndur) & (sgl_ndur >= min_ndur)) # Else make sure all sample number durations equal to `ndur` else: assert numpy.all(sgl_ndur == ndur) # Create `data_sgl_mask` data_sgl_mask = numpy.zeros(n_samples, dtype=bool) for start, stop in SGL.astype(int): data_sgl_mask[start:stop] = True return SGL, data_sgl_mask
Get start/stop indices of each `dur` length sub-glide for glides in GL Args ---- dur: int Desired duration of glides GL: ndarray, (n, 2) Matrix containing the start time (first column) and end time (2nd column) of any glides.Times are in seconds. min_dur: int, default (bool False) Minimum number of seconds for sub-glide. Default value is `False`, which makes `min_dur` equal to `dur`, ignoring sub-glides smaller than `dur`. Attributes ---------- gl_ind_diff: ndarray, (n,3) GL, with additional column of difference between the first two columns Returns ------- SGL: ndarray, (n, 2) Matrix containing the start time (first column) and end time (2nd column) of the generated sub-glides. All glides must have duration equal to the given dur value.Times are in seconds.
def trailing_window(rows, group_by=None, order_by=None): """Create a trailing window for use with aggregate window functions. Parameters ---------- rows : int Number of trailing rows to include. 0 includes only the current row group_by : expressions, default None Either specify here or with TableExpr.group_by order_by : expressions, default None For analytic functions requiring an ordering, specify here, or let Ibis determine the default ordering (for functions like rank) Returns ------- Window """ return Window( preceding=rows, following=0, group_by=group_by, order_by=order_by )
Create a trailing window for use with aggregate window functions. Parameters ---------- rows : int Number of trailing rows to include. 0 includes only the current row group_by : expressions, default None Either specify here or with TableExpr.group_by order_by : expressions, default None For analytic functions requiring an ordering, specify here, or let Ibis determine the default ordering (for functions like rank) Returns ------- Window
def enable_all_breakpoints(self): """ Enables all disabled breakpoints in all processes. @see: enable_code_breakpoint, enable_page_breakpoint, enable_hardware_breakpoint """ # disable code breakpoints for (pid, bp) in self.get_all_code_breakpoints(): if bp.is_disabled(): self.enable_code_breakpoint(pid, bp.get_address()) # disable page breakpoints for (pid, bp) in self.get_all_page_breakpoints(): if bp.is_disabled(): self.enable_page_breakpoint(pid, bp.get_address()) # disable hardware breakpoints for (tid, bp) in self.get_all_hardware_breakpoints(): if bp.is_disabled(): self.enable_hardware_breakpoint(tid, bp.get_address())
Enables all disabled breakpoints in all processes. @see: enable_code_breakpoint, enable_page_breakpoint, enable_hardware_breakpoint
def add_conditional_clause(self, clause): """ Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: ConditionalClause """ clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.conditionals.append(clause)
Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: ConditionalClause
def resolve_font(name): """Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass """ if os.path.exists(name): return os.path.abspath(name) fonts = get_font_files() if name in fonts: return fonts[name] raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name)
Turns font names into absolute filenames This is case sensitive. The extension should be omitted. For example:: >>> path = resolve_font('NotoSans-Bold') >>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts') >>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf') >>> noto_path = os.path.abspath(noto_path) >>> assert path == noto_path Absolute paths are allowed:: >>> resolve_font(noto_path) == noto_path True Raises :exc:`FontNotFound` on failure:: >>> try: ... resolve_font('blahahaha') ... assert False ... except FontNotFound: ... pass
def metric_path(cls, project, metric): """Return a fully-qualified metric string.""" return google.api_core.path_template.expand( "projects/{project}/metrics/{metric}", project=project, metric=metric )
Return a fully-qualified metric string.
def getScan(self, title, peptide=None): """ allows random lookup """ if self.ra.has_key(title): self.filename.seek(self.ra[title][0],0) toRead = self.ra[title][1]-self.ra[title][0] info = self.filename.read(toRead) scan = self.parseScan(info) else: return None return scan
allows random lookup
def _parse_block(self, parser, allow_pluralize): """Parse until the next block tag with a given name.""" referenced = [] buf = [] while 1: if parser.stream.current.type == 'data': buf.append(parser.stream.current.value.replace('%', '%%')) next(parser.stream) elif parser.stream.current.type == 'variable_begin': next(parser.stream) name = parser.stream.expect('name').value referenced.append(name) buf.append('%%(%s)s' % name) parser.stream.expect('variable_end') elif parser.stream.current.type == 'block_begin': next(parser.stream) if parser.stream.current.test('name:endtrans'): break elif parser.stream.current.test('name:pluralize'): if allow_pluralize: break parser.fail('a translatable section can have only one ' 'pluralize section') parser.fail('control structures in translatable sections are ' 'not allowed') elif parser.stream.eos: parser.fail('unclosed translation block') else: assert False, 'internal parser error' return referenced, concat(buf)
Parse until the next block tag with a given name.
def _handle_sub_action(self, input_dict, handler): """ Handles resolving replacements in the Sub action based on the handler that is passed as an input. :param input_dict: Dictionary to be resolved :param supported_values: One of several different objects that contain the supported values that need to be changed. See each method above for specifics on these objects. :param handler: handler that is specific to each implementation. :return: Resolved value of the Sub dictionary """ if not self.can_handle(input_dict): return input_dict key = self.intrinsic_name sub_value = input_dict[key] input_dict[key] = self._handle_sub_value(sub_value, handler) return input_dict
Handles resolving replacements in the Sub action based on the handler that is passed as an input. :param input_dict: Dictionary to be resolved :param supported_values: One of several different objects that contain the supported values that need to be changed. See each method above for specifics on these objects. :param handler: handler that is specific to each implementation. :return: Resolved value of the Sub dictionary
def sum_2_dictionaries(dicta, dictb): """Given two dictionaries of totals, where each total refers to a key in the dictionary, add the totals. E.g.: dicta = { 'a' : 3, 'b' : 1 } dictb = { 'a' : 1, 'c' : 5 } dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 } @param dicta: (dictionary) @param dictb: (dictionary) @return: (dictionary) - the sum of the 2 dictionaries """ dict_out = dicta.copy() for key in dictb.keys(): if 'key' in dict_out: # Add the sum for key in dictb to that of dict_out: dict_out[key] += dictb[key] else: # the key is not in the first dictionary - add it directly: dict_out[key] = dictb[key] return dict_out
Given two dictionaries of totals, where each total refers to a key in the dictionary, add the totals. E.g.: dicta = { 'a' : 3, 'b' : 1 } dictb = { 'a' : 1, 'c' : 5 } dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 } @param dicta: (dictionary) @param dictb: (dictionary) @return: (dictionary) - the sum of the 2 dictionaries
def spark_config(self): """ config spark :return: """ configs = [ 'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'.format( bigdata_conf.hadoop_home ), 'export SPARK_LOCAL_IP={0}'.format(env.host_string) ] append(bigdata_conf.global_env_home, configs, use_sudo=True) run('source {0}'.format(bigdata_conf.global_env_home))
config spark :return:
def continuous_partition_data(data, bins='auto', n_bins=10): """Convenience method for building a partition object on continuous data Args: data (list-like): The data from which to construct the estimate. bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins) n_bins (int): Ignored if bins is auto. Returns: A new partition_object:: { "bins": (list) The endpoints of the partial partition of reals, "weights": (list) The densities of the bins implied by the partition. } """ if bins == 'uniform': bins = np.linspace(start=np.min(data), stop=np.max(data), num=n_bins+1) elif bins == 'ntile': bins = np.percentile(data, np.linspace( start=0, stop=100, num=n_bins+1)) elif bins != 'auto': raise ValueError("Invalid parameter for bins argument") hist, bin_edges = np.histogram(data, bins, density=False) return { "bins": bin_edges, "weights": hist / len(data) }
Convenience method for building a partition object on continuous data Args: data (list-like): The data from which to construct the estimate. bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins) n_bins (int): Ignored if bins is auto. Returns: A new partition_object:: { "bins": (list) The endpoints of the partial partition of reals, "weights": (list) The densities of the bins implied by the partition. }
def astype(self, dtype): """Return a copy of this space with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newspace : `ProductSpace` Version of this space with given data type. """ if dtype is None: # Need to filter this out since Numpy iterprets it as 'float' raise ValueError('`None` is not a valid data type') dtype = np.dtype(dtype) current_dtype = getattr(self, 'dtype', object) if dtype == current_dtype: return self else: return ProductSpace(*[space.astype(dtype) for space in self.spaces])
Return a copy of this space with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newspace : `ProductSpace` Version of this space with given data type.
def load_molecule(name, format=None): '''Read a `~chemlab.core.Molecule` from a file. .. seealso:: `chemlab.io.datafile` ''' mol = datafile(name, format=format).read('molecule') display_system(System([mol]))
Read a `~chemlab.core.Molecule` from a file. .. seealso:: `chemlab.io.datafile`
def _convert_from_pandas(self, pdf, schema, timezone): """ Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records """ if timezone is not None: from pyspark.sql.types import _check_series_convert_timestamps_tz_local copied = False if isinstance(schema, StructType): for field in schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone) if s is not pdf[field.name]: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[field.name] = s else: for column, series in pdf.iteritems(): s = _check_series_convert_timestamps_tz_local(series, timezone) if s is not series: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[column] = s # Convert pandas.DataFrame to list of numpy records np_records = pdf.to_records(index=False) # Check if any columns need to be fixed for Spark to infer properly if len(np_records) > 0: record_dtype = self._get_numpy_record_dtype(np_records[0]) if record_dtype is not None: return [r.astype(record_dtype).tolist() for r in np_records] # Convert list of numpy records to python lists return [r.tolist() for r in np_records]
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records
def warn(self, cmd, desc=''): ''' Style for warning message. ''' return self._label_desc(cmd, desc, self.warn_color)
Style for warning message.
def compute_invar(self): """ Compute the three invariants (I0, I1, I2) of the tensor, as well as the quantity I = -(I2/2)**2 / (I1/3)**3. """ self.i0 = self.vxx + self.vyy + self.vzz self.i1 = (self.vxx*self.vyy + self.vyy*self.vzz + self.vxx*self.vzz - self.vxy**2 - self.vyz**2 - self.vxz**2) self.i2 = (self.vxx*(self.vyy*self.vzz - self.vyz**2) + self.vxy*(self.vyz*self.vxz - self.vxy*self.vzz) + self.vxz*(self.vxy*self.vyz - self.vxz*self.vyy)) self.i = (-1.) * (self.i2 / 2.)**2 self.i.data[1:, :] /= (self.i1.data[1:, :] / 3.)**3
Compute the three invariants (I0, I1, I2) of the tensor, as well as the quantity I = -(I2/2)**2 / (I1/3)**3.
def find_keywords(self, string, **kwargs): """ Returns a sorted list of keywords in the given string. """ return find_keywords(string, parser = self, top = kwargs.pop("top", 10), frequency = kwargs.pop("frequency", {}), **kwargs )
Returns a sorted list of keywords in the given string.
def _ellipsoid_phantom_3d(space, ellipsoids): """Create an ellipsoid phantom in 3d space. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be generated. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created (instead of squashing the whole phantom into the slice). ellipsoids : list of lists Each row should contain the entries :: 'value', 'axis_1', 'axis_2', 'axis_3', 'center_x', 'center_y', 'center_z', 'rotation_phi', 'rotation_theta', 'rotation_psi' The provided ellipsoids need to be specified relative to the reference cube ``[-1, -1, -1] x [1, 1, 1]``. Angles are to be given in radians. Returns ------- phantom : ``space`` element 3D ellipsoid phantom in ``space``. See Also -------- shepp_logan : The typical use-case for this function. """ # Blank volume p = np.zeros(space.shape, dtype=space.dtype) minp = space.grid.min_pt maxp = space.grid.max_pt # Create the pixel grid grid_in = space.grid.meshgrid # Move points to [-1, 1] grid = [] for i in range(3): mean_i = (minp[i] + maxp[i]) / 2.0 # Where space.shape = 1, we have minp = maxp, so we set diff_i = 1 # to avoid division by zero. Effectively, this allows constructing # a slice of a 3D phantom. diff_i = (maxp[i] - minp[i]) / 2.0 or 1.0 grid.append((grid_in[i] - mean_i) / diff_i) for ellip in ellipsoids: assert len(ellip) == 10 intensity = ellip[0] a_squared = ellip[1] ** 2 b_squared = ellip[2] ** 2 c_squared = ellip[3] ** 2 x0 = ellip[4] y0 = ellip[5] z0 = ellip[6] phi = ellip[7] theta = ellip[8] psi = ellip[9] scales = [1 / a_squared, 1 / b_squared, 1 / c_squared] center = (np.array([x0, y0, z0]) + 1.0) / 2.0 # Create the offset x,y and z values for the grid if any([phi, theta, psi]): # Rotate the points to the expected coordinate system. cphi = np.cos(phi) sphi = np.sin(phi) ctheta = np.cos(theta) stheta = np.sin(theta) cpsi = np.cos(psi) spsi = np.sin(psi) mat = np.array([[cpsi * cphi - ctheta * sphi * spsi, cpsi * sphi + ctheta * cphi * spsi, spsi * stheta], [-spsi * cphi - ctheta * sphi * cpsi, -spsi * sphi + ctheta * cphi * cpsi, cpsi * stheta], [stheta * sphi, -stheta * cphi, ctheta]]) # Calculate the points that could possibly be inside the volume # Since the points are rotated, we cannot do anything directional # without more logic max_radius = np.sqrt( np.abs(mat).dot([a_squared, b_squared, c_squared])) idx, shapes = _getshapes_3d(center, max_radius, space.shape) subgrid = [g[idi] for g, idi in zip(grid, shapes)] offset_points = [vec * (xi - x0i)[..., None] for xi, vec, x0i in zip(subgrid, mat.T, [x0, y0, z0])] rotated = offset_points[0] + offset_points[1] + offset_points[2] np.square(rotated, out=rotated) radius = np.dot(rotated, scales) else: # Calculate the points that could possibly be inside the volume max_radius = np.sqrt([a_squared, b_squared, c_squared]) idx, shapes = _getshapes_3d(center, max_radius, space.shape) subgrid = [g[idi] for g, idi in zip(grid, shapes)] squared_dist = [ai * (xi - x0i) ** 2 for xi, ai, x0i in zip(subgrid, scales, [x0, y0, z0])] # Parentheses to get best order for broadcasting radius = squared_dist[0] + (squared_dist[1] + squared_dist[2]) # Find the points within the ellipse inside = radius <= 1 # Add the ellipse intensity to those points p[idx][inside] += intensity return space.element(p)
Create an ellipsoid phantom in 3d space. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be generated. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created (instead of squashing the whole phantom into the slice). ellipsoids : list of lists Each row should contain the entries :: 'value', 'axis_1', 'axis_2', 'axis_3', 'center_x', 'center_y', 'center_z', 'rotation_phi', 'rotation_theta', 'rotation_psi' The provided ellipsoids need to be specified relative to the reference cube ``[-1, -1, -1] x [1, 1, 1]``. Angles are to be given in radians. Returns ------- phantom : ``space`` element 3D ellipsoid phantom in ``space``. See Also -------- shepp_logan : The typical use-case for this function.
def deactivate_license(key_name=None): ''' Deactivates an installed license. Required version 7.0.0 or greater. key_name(str): The file name of the license key installed. CLI Example: .. code-block:: bash salt '*' panos.deactivate_license key_name=License_File_Name.key ''' _required_version = '7.0.0' if not __proxy__['panos.is_required_version'](_required_version): return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version) if not key_name: return False, 'You must specify a key_name.' else: query = {'type': 'op', 'cmd': '<request><license><deactivate><key><features><member>{0}</member></features>' '</key></deactivate></license></request>'.format(key_name)} return __proxy__['panos.call'](query)
Deactivates an installed license. Required version 7.0.0 or greater. key_name(str): The file name of the license key installed. CLI Example: .. code-block:: bash salt '*' panos.deactivate_license key_name=License_File_Name.key
def add_receiver( self, consumer_group, partition, offset=None, prefetch=300, operation=None, keep_alive=30, auto_reconnect=True): """ Add a receiver to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :rtype: ~azure.eventhub.receiver.Receiver """ path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition) handler = Receiver( self, source_url, offset=offset, prefetch=prefetch, keep_alive=keep_alive, auto_reconnect=auto_reconnect) self.clients.append(handler) return handler
Add a receiver to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :rtype: ~azure.eventhub.receiver.Receiver
def align_subplot_array(axes,xlim=None, ylim=None): """ Make all of the axes in the array hae the same limits, turn off unnecessary ticks use plt.subplots() to get an array of axes """ #find sensible xlim,ylim if xlim is None: xlim = [np.inf,-np.inf] for ax in axes.flatten(): xlim[0] = min(xlim[0],ax.get_xlim()[0]) xlim[1] = max(xlim[1],ax.get_xlim()[1]) if ylim is None: ylim = [np.inf,-np.inf] for ax in axes.flatten(): ylim[0] = min(ylim[0],ax.get_ylim()[0]) ylim[1] = max(ylim[1],ax.get_ylim()[1]) N,M = axes.shape for i,ax in enumerate(axes.flatten()): ax.set_xlim(xlim) ax.set_ylim(ylim) if (i)%M: ax.set_yticks([]) else: removeRightTicks(ax) if i<(M*(N-1)): ax.set_xticks([]) else: removeUpperTicks(ax)
Make all of the axes in the array hae the same limits, turn off unnecessary ticks use plt.subplots() to get an array of axes
def response(self, action=None): """ returns cached response (as dict) for given action, or list of cached actions """ if action in self.cache: return utils.json_loads(self.cache[action]['response']) return self.cache.keys() or None
returns cached response (as dict) for given action, or list of cached actions
def commit_or_abort(self, ctx, timeout=None, metadata=None, credentials=None): """Runs commit or abort operation.""" return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata, credentials=credentials)
Runs commit or abort operation.
def get_python_args(fname, python_args, interact, debug, end_args): """Construct Python interpreter arguments""" p_args = [] if python_args is not None: p_args += python_args.split() if interact: p_args.append('-i') if debug: p_args.extend(['-m', 'pdb']) if fname is not None: if os.name == 'nt' and debug: # When calling pdb on Windows, one has to replace backslashes by # slashes to avoid confusion with escape characters (otherwise, # for example, '\t' will be interpreted as a tabulation): p_args.append(osp.normpath(fname).replace(os.sep, '/')) else: p_args.append(fname) if end_args: p_args.extend(shell_split(end_args)) return p_args
Construct Python interpreter arguments
def size(self): """Get the number of elements in the DataFrame. Returns: The number of elements in the DataFrame. """ return len(self._query_compiler.index) * len(self._query_compiler.columns)
Get the number of elements in the DataFrame. Returns: The number of elements in the DataFrame.
def get_polygon_constraints_m(self, polygons_m, print_out=False): """ :param range_polygones: list of numbers of polygones to test. :return A, b: the constraints on the theta-vector of the form A*theta = b """ rows_b = [] rows_A = [] m = len(polygons_m[0]) rows_b.append((m - 2) * pi * np.ones( len(polygons_m), )) for p in polygons_m: row = np.zeros((self.theta.shape[0], )) for k in range(m): index = get_index(self.corners, p[1], (p[0], p[2])) row[index] = 1 p = np.roll(p, 1) assert np.sum(row) == m rows_A.append(row) A = np.vstack(rows_A) b = np.hstack(rows_b) num_constraints = A.shape[0] A_repeat = np.repeat(A.astype(bool), 3).reshape((1, -1)) corners = self.corners.reshape((1, -1)) corners_tiled = np.tile(corners, num_constraints) if (print_out): print('shape of A {}'.format(A.shape)) if (print_out): print('chosen angles m={}:\n{}'.format(m, (corners_tiled)[A_repeat] .reshape((-1, m * 3)))) if (print_out): print('{}-polygones: {}'.format(m, rows_A)) self.A = A self.b = b return A, b
:param range_polygones: list of numbers of polygones to test. :return A, b: the constraints on the theta-vector of the form A*theta = b