code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def not26(func): @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
Function decorator for methods not implemented in Python 2.6.
def terminate(self): logger.debug('client.terminate() called (state=%s)', self.strstate) if self.state == ClientState.WAITING_FOR_RESULT: raise ClientStateError('terimate() called while state='+self.strstate) if self.state == ClientState.TERMINATING: raise ClientStateError('terimate() called while state='+self.strstate) elif self.state in ClientState.TerminatedSet: assert not self._server_process.is_alive() return elif self.state == ClientState.READY: self._assert_alive() self.state = ClientState.TERMINATING self._delegate_channel.put(FunctionCallDelegate(_raise_terminate)) try: self._read_result(num_retries=5) except ProcessTerminationError as ex: pass except ChannelError as ex: logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway') self.state = ClientState.TERMINATED_CLEANLY
Stop the server process and change our state to TERMINATING. Only valid if state=READY.
def safe_str_to_class(s): lst = s.split(".") klass = lst[-1] mod_list = lst[:-1] module = ".".join(mod_list) if not module: module = klass mod = my_import(module) if hasattr(mod, klass): return getattr(mod, klass) else: raise ImportError('')
Helper function to map string class names to module classes.
def reset(self): query = self.backend.library.database.connection.execute(query)
Drops index table.
def patch(self, request): attrs = ('edx_video_id', 'status') missing = [attr for attr in attrs if attr not in request.data] if missing: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{missing}" params must be specified.'.format(missing=' and '.join(missing))} ) edx_video_id = request.data['edx_video_id'] video_status = request.data['status'] if video_status not in VALID_VIDEO_STATUSES: return Response( status=status.HTTP_400_BAD_REQUEST, data={'message': u'"{status}" is not a valid Video status.'.format(status=video_status)} ) try: video = Video.objects.get(edx_video_id=edx_video_id) video.status = video_status video.save() response_status = status.HTTP_200_OK response_payload = {} except Video.DoesNotExist: response_status = status.HTTP_400_BAD_REQUEST response_payload = { 'message': u'Video is not found for specified edx_video_id: {edx_video_id}'.format( edx_video_id=edx_video_id ) } return Response(status=response_status, data=response_payload)
Update the status of a video.
def resetSession(self, username=None, password=None, verify=True) : self.disconnectSession() self.session = AikidoSession(username, password, verify)
resets the session
def list_all(): ret = [] if _sd_version() >= 219: for line in _machinectl('list-images')['stdout'].splitlines(): try: ret.append(line.split()[0]) except IndexError: continue else: rootdir = _root() try: for dirname in os.listdir(rootdir): if os.path.isdir(os.path.join(rootdir, dirname)): ret.append(dirname) except OSError: pass return ret
Lists all nspawn containers CLI Example: .. code-block:: bash salt myminion nspawn.list_all
def listDatawraps() : l = {"Genomes" : [], "SNPs" : []} for f in os.listdir(os.path.join(this_dir, "bootstrap_data/genomes")) : if f.find(".tar.gz") > -1 : l["Genomes"].append(f) for f in os.listdir(os.path.join(this_dir, "bootstrap_data/SNPs")) : if f.find(".tar.gz") > -1 : l["SNPs"].append(f) return l
Lists all the datawraps pyGeno comes with
def get_nodedata(self, sort_names=False): if not self.Node.n: return if not self.pflow.solved: logger.error('Power flow not solved when getting bus data.') return tuple([False] * 7) idx = self.Node.idx names = self.Node.name V = [self.dae.y[x] for x in self.Node.v] if sort_names: ret = (list(x) for x in zip(*sorted(zip(idx, names, V), key=itemgetter(0)))) else: ret = idx, names, V return ret
get dc node data from solved power flow
def add_state(self, state, storage_load=False): assert isinstance(state, State) while state.state_id == self.state_id or state.state_id in self.states: state.change_state_id() if state.state_id in self._states.keys(): raise AttributeError("State id %s already exists in the container state", state.state_id) else: state.parent = self self._states[state.state_id] = state return state.state_id
Adds a state to the container state. :param state: the state that is going to be added :param storage_load: True if the state was directly loaded from filesystem :return: the state_id of the new state :raises exceptions.AttributeError: if state.state_id already exist
def get_agent_settings(): ret = dict() sorted_types = sorted(_SERVICE_TYPES.items(), key=lambda x: (-x[1], x[0])) ret['services'] = list() ret['contact'] = (__utils__['reg.read_value']( _HKEY, _AGENT_KEY, 'sysContact'))['vdata'] ret['location'] = (__utils__['reg.read_value']( _HKEY, _AGENT_KEY, 'sysLocation'))['vdata'] current_bitmask = (__utils__['reg.read_value']( _HKEY, _AGENT_KEY, 'sysServices'))['vdata'] if current_bitmask == 0: ret['services'].append(sorted_types[-1][0]) else: for service, bitmask in sorted_types: if current_bitmask is not None and current_bitmask > 0: remaining_bitmask = current_bitmask - bitmask if remaining_bitmask >= 0: current_bitmask = remaining_bitmask ret['services'].append(service) else: break ret['services'] = sorted(ret['services']) return ret
Determine the value of the SNMP sysContact, sysLocation, and sysServices settings. Returns: dict: A dictionary of the agent settings. CLI Example: .. code-block:: bash salt '*' win_snmp.get_agent_settings
def namespace_lower(self, namespace): return self.namespace(namespace, key_transform=lambda key: key.lower())
Return a copy with only the keys from a given namespace, lower-cased. The keys in the returned dict will be transformed to lower case after filtering, so they can be easily passed as keyword arguments to other functions. This is just syntactic sugar for calling :meth:`~ConfigLoader.namespace` with ``key_transform=lambda key: key.lower()``. Example:: >>> from configloader import ConfigLoader >>> config = ConfigLoader( ... MY_APP_SETTING1='a', ... EXTERNAL_LIB_SETTING1='b', ... EXTERNAL_LIB_SETTING2='c', ... ) >>> config.namespace_lower('EXTERNAL_LIB') ConfigLoader({'setting1': 'b', 'setting2': 'c'}) :arg namespace: Common prefix. :return: New config dict. :rtype: :class:`ConfigLoader`
def _parse_boolean(element_text, state): value = None lowered_text = element_text.lower() if lowered_text == 'true': value = True elif lowered_text == 'false': value = False else: state.raise_error(InvalidPrimitiveValue, 'Invalid boolean value "{}"'.format(element_text)) return value
Parse the raw XML string as a boolean value.
def build_submit_description(executable, output, error, user_log, query_params): all_query_params = DEFAULT_QUERY_CLASSAD.copy() all_query_params.update(query_params) submit_description = [] for key, value in all_query_params.items(): submit_description.append('%s = %s' % (key, value)) submit_description.append('executable = ' + executable) submit_description.append('output = ' + output) submit_description.append('error = ' + error) submit_description.append('log = ' + user_log) submit_description.append('queue') return '\n'.join(submit_description)
Build up the contents of a condor submit description file. >>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul') >>> submit_args['query_params'] = dict() >>> default_description = build_submit_description(**submit_args) >>> assert 'executable = /path/to/script' in default_description >>> assert 'output = o' in default_description >>> assert 'error = e' in default_description >>> assert 'queue' in default_description >>> assert 'universe = vanilla' in default_description >>> assert 'universe = standard' not in default_description >>> submit_args['query_params'] = dict(universe='standard') >>> std_description = build_submit_description(**submit_args) >>> assert 'universe = vanilla' not in std_description >>> assert 'universe = standard' in std_description
def run(self, n_iter=-1, bg=False): if n_iter == -1: if not self.eval_at: raise ValueError('Set n_iter or define evaluate_at.') n_iter = self.eval_at[-1] + 1 self._running.set() if bg: self._t = threading.Thread(target=lambda: self._run(n_iter)) self._t.start() else: self._run(n_iter)
Run the experiment. :param int n_iter: Number of run iterations, by default will run until the last evaluation step. :param bool bg: whether to run in background (using a Thread)
def create_node(self, *args, **kwargs): with (yield from self._iou_id_lock): application_id = get_next_application_id(self.nodes) node = yield from super().create_node(*args, application_id=application_id, **kwargs) return node
Creates a new IOU VM. :returns: IOUVM instance
def __eliminate_unused_constraits (self, objects): result = [] for c in self.constraints_: if c [0] in objects and c [1] in objects: result.append (c) return result
Eliminate constraints which mention objects not in 'objects'. In graph-theory terms, this is finding subgraph induced by ordered vertices.
def create_queue_service(self): try: from azure.storage.queue.queueservice import QueueService return QueueService(self.account_name, self.account_key, sas_token=self.sas_token, is_emulated=self.is_emulated) except ImportError: raise Exception('The package azure-storage-queue is required. ' + 'Please install it using "pip install azure-storage-queue"')
Creates a QueueService object with the settings specified in the CloudStorageAccount. :return: A service object. :rtype: :class:`~azure.storage.queue.queueservice.QueueService`
def last_written_resolver(riak_object): riak_object.siblings = [max(riak_object.siblings, key=lambda x: x.last_modified), ]
A conflict-resolution function that resolves by selecting the most recently-modified sibling by timestamp. :param riak_object: an object-in-conflict that will be resolved :type riak_object: :class:`RiakObject <riak.riak_object.RiakObject>`
def get_key_by_job_id(cls, mapreduce_id): return db.Key.from_path(cls.kind(), str(mapreduce_id))
Retrieves the Key for a Job. Args: mapreduce_id: The job to retrieve. Returns: Datastore Key that can be used to fetch the MapreduceState.
def _add_dispatcher(self, path_regex, dispatch_function): self._dispatchers.append((re.compile(path_regex), dispatch_function))
Add a request path and dispatch handler. Args: path_regex: A string regex, the path to match against incoming requests. dispatch_function: The function to call for these requests. The function should take (request, start_response) as arguments and return the contents of the response body.
def preprocess(cls, cat): if isinstance(cat, str): cat = intake.open_catalog(cat) return cat
Function to run on each cat input
def execution_timer(self, *path): start = time.time() try: yield finally: self.record_timing(max(start, time.time()) - start, *path)
Record the time it takes to perform an arbitrary code block. :param path: elements of the metric path to record This method returns a context manager that records the amount of time spent inside of the context and submits a timing metric to the specified `path` using (:meth:`record_timing`).
def inspect_swarm(self): url = self._url('/swarm') return self._result(self._get(url), True)
Retrieve low-level information about the current swarm. Returns: A dictionary containing data about the swarm. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def _parse_queue_list(list_output): queues = dict((q.split('/')[-1], q) for q in list_output['stdout']) return queues
Parse the queue to get a dict of name -> URL
def pickle_from_param(elem, name): return pickle.loads(str(get_pyvalue(elem, u"pickle:%s" % name)))
Retrieve a pickled Python object from the document tree rooted at elem.
def field_for(self, field_id): for field in self.fields: if field.id == field_id: return field return None
Fetches the field for the given Field ID. :param field_id: ID for Field to fetch. :return: :class:`ContentTypeField <ContentTypeField>` object. :rtype: contentful.ContentTypeField
def search_schema_path(self, index, **options): if not self.yz_wm_schema: raise RiakError("Yokozuna search is unsupported by this Riak node") return mkpath(self.yz_wm_schema, "schema", quote_plus(index), **options)
Builds a Yokozuna search Solr schema URL. :param index: a name of a yz solr schema :type index: string :param options: optional list of additional arguments :type index: dict :rtype URL string
def _download_movielens(dest_path): url = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip' req = requests.get(url, stream=True) with open(dest_path, 'wb') as fd: for chunk in req.iter_content(): fd.write(chunk)
Download the dataset.
def DbGetDeviceAliasList(self, argin): self._log.debug("In DbGetDeviceAliasList()") if not argin: argin = "%" else: argin = replace_wildcard(argin) return self.db.get_device_alias_list(argin)
Get device alias name with a specific filter :param argin: The filter :type: tango.DevString :return: Device alias list :rtype: tango.DevVarStringArray
def frame_size(self): if self.sample_type == SampleType.S16NativeEndian: return self.sample_size * self.channels else: raise ValueError('Unknown sample type: %d', self.sample_type)
The byte size of a single frame of this format.
def load_library(self): "loads configuration options" try: filename = self.environment.get_template('chartkick.json').filename except TemplateNotFound: return {} else: options = Options() options.load(filename) return options
loads configuration options
def rename(df, **kwargs): return df.rename(columns={v: k for k, v in kwargs.items()})
Renames columns, where keyword argument values are the current names of columns and keys are the new names. Args: df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe. Kwargs: **kwargs: key:value pairs where keys are new names for columns and values are current names of columns.
def restart(uuid, **kwargs): from .worker_engine import restart_worker return text_type(restart_worker(uuid, **kwargs).uuid)
Restart the workflow from a given workflow engine UUID.
def unload_fixture(apps, schema_editor): "Brutally deleting all entries for this model..." MyModel = apps.get_model("blog", "Post") MyModel.objects.all().delete()
Brutally deleting all entries for this model...
def _hasattr(self, fieldname): special = 'history', 'raw' return (fieldname in special or fieldname in self._defn.fieldmap or fieldname in self._defn.derivationmap)
Returns True if this packet contains fieldname, False otherwise.
def list_rbac_policies(self, retrieve_all=True, **_params): return self.list('rbac_policies', self.rbac_policies_path, retrieve_all, **_params)
Fetch a list of all RBAC policies for a project.
def submit(self): self._newflg = False ret = list() for buf in self._buffer.values(): buf = copy.deepcopy(buf) if self._fdpext: buf['fpout'] = f"{self._fproot}/{buf['label']}.{self._fdpext}" else: del buf['fpout'] buf['index'] = tuple(buf['index']) ret.append(Info(buf)) ret += self._stream return tuple(ret)
Submit traced TCP flows.
def logout(request): user = request.user serializer = LogoutSerializer(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data if should_authenticate_session(): auth.logout(request) if should_retrieve_token() and data['revoke_token']: try: user.auth_token.delete() except Token.DoesNotExist: raise BadRequest('Cannot remove non-existent token') return get_ok_response('Logout successful')
Logs out the user. returns an error if the user is not authenticated.
def init(args=None, lib='standard'): if args is None: args = sys.argv _loadlib(lib) arr = (ctypes.c_char_p * len(args))() arr[:] = args _LIB.RabitInit(len(args), arr)
Intialize the rabit module, call this once before using anything. Parameters ---------- args: list of str, optional The list of arguments used to initialized the rabit usually you need to pass in sys.argv. Defaults to sys.argv when it is None. lib: {'standard', 'mock', 'mpi'} Type of library we want to load
def bestDescription(self, prefLanguage="en"): test_preds = [rdflib.RDFS.comment, rdflib.namespace.DCTERMS.description, rdflib.namespace.DC.description, rdflib.namespace.SKOS.definition] for pred in test_preds: test = self.getValuesForProperty(pred) if test: return addQuotes(firstEnglishStringInList(test)) return ""
facility for extrating the best available description for an entity ..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
def handle_abort(self, obj): async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], }
def get_group_line(self, data): idx = -1 for key in self.groups: i = self.get_group_key_line(data, key) if (i < idx and i != -1) or idx == -1: idx = i return idx
Get the next group-style key's line. :param data: the data to proceed :returns: the line number
def add_to_replication_queue(source_node_urn, sysmeta_pyxb): replica_info_model = d1_gmn.app.models.replica_info( status_str='queued', source_node_urn=source_node_urn ) local_replica_model = d1_gmn.app.models.local_replica( pid=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), replica_info_model=replica_info_model, ) d1_gmn.app.models.replication_queue( local_replica_model=local_replica_model, size=sysmeta_pyxb.size )
Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica.
def start_circle_left(self, radius_m, velocity=VELOCITY): circumference = 2 * radius_m * math.pi rate = 360.0 * velocity / circumference self._set_vel_setpoint(velocity, 0.0, 0.0, -rate)
Start a circular motion to the left. This function returns immediately. :param radius_m: The radius of the circle (meters) :param velocity: The velocity of the motion (meters/second) :return:
def get_name_from_name_hash128( self, name ): cur = self.db.cursor() name = namedb_get_name_from_name_hash128( cur, name, self.lastblock ) return name
Get the name from a name hash
def routers_removed_from_hosting_device(self, context, router_ids, hosting_device): self._agent_notification_bulk( context, 'router_removed_from_hosting_device', router_ids, hosting_device, operation=None)
Notify cfg agent that routers have been removed from hosting device. @param: context - information about tenant, user etc @param: router-ids - list of ids @param: hosting_device - device hosting the routers
def cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, wDesc): n = ctypes.c_int() c = ctypes.c_int() h = ctypes.c_int() w = ctypes.c_int() status = _libcudnn.cudnnGetConvolution2dForwardOutputDim(convDesc, inputTensorDesc, wDesc, ctypes.byref(n), ctypes.byref(c), ctypes.byref(h), ctypes.byref(w)) cudnnCheckStatus(status) return n.value, c.value, h.value, w.value
Return the dimensions of the output tensor given a convolution descriptor. This function returns the dimensions of the resulting 4D tensor of a 2D convolution, given the convolution descriptor, the input tensor descriptor and the filter descriptor. This function can help to setup the output tensor and allocate the proper amount of memory prior to launching the actual convolution. Parameters ---------- convDesc : cudnnConvolutionDescriptor Handle to a previously created convolution descriptor. inputTensorDesc: cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc: cudnnFilterDescriptor Handle to a previously initialized filter descriptor. Returns ------- n : int Number of output images. c : int Number of output feature maps per image. h : int Height of each output feature map. w : int Width of each output feature map.
def can_allow_multiple_input_shapes(spec): try: layers = _get_nn_layers(spec) except: raise Exception('Unable to verify that this model contains a neural network.') try: shaper = NeuralNetworkShaper(spec, False) except: raise Exception('Unable to compute shapes for this neural network.') inputs = _get_input_names(spec) for name in inputs: shape_dict = shaper.shape(name) shape = NeuralNetworkMultiArrayShapeRange(shape_dict) if (shape.isFlexible()): return True return False
Examines a model specification and determines if it can compute results for more than one output shape. :param spec: MLModel The protobuf specification of the model. :return: Bool Returns True if the model can allow multiple input shapes, False otherwise.
def map_copy(source: tcod.map.Map, dest: tcod.map.Map) -> None: if source.width != dest.width or source.height != dest.height: dest.__init__( source.width, source.height, source._order ) dest._Map__buffer[:] = source._Map__buffer[:]
Copy map data from `source` to `dest`. .. deprecated:: 4.5 Use Python's copy module, or see :any:`tcod.map.Map` and assign between array attributes manually.
def maybe_download_and_extract(): dest_directory = "/tmp/cifar" if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
Download and extract the tarball from Alex's website.
def remove(self, game_object: Hashable) -> None: self.all.remove(game_object) for kind in type(game_object).mro(): self.kinds[kind].remove(game_object) for s in self.tags.values(): s.discard(game_object)
Remove the given object from the container. game_object: A hashable contained by container. Example: container.remove(myObject)
def _traverse_relationship_objs(self, rel2src2dsts, goobj_child, goids_seen): child_id = goobj_child.id goids_seen.add(child_id) for goid_altid in goobj_child.alt_ids: goids_seen.add(goid_altid) for reltype, recs in goobj_child.relationship.items(): if reltype in self.relationships: for relationship_obj in recs: relationship_id = relationship_obj.id rel2src2dsts[reltype][relationship_id].add(child_id) if relationship_id not in goids_seen: self._traverse_relationship_objs(rel2src2dsts, relationship_obj, goids_seen)
Traverse from source GO up relationships.
def list_styles(self): known = sorted(self.defaults.known_styles) if not known: err_exit('No styles', 0) for style in known: if style == self.defaults.default_style: print(style, '(default)') else: print(style) sys.exit(0)
Print available styles and exit.
def tail(ctx): click.echo('tailing logs') for e in ctx.tail()[-10:]: ts = datetime.utcfromtimestamp(e['timestamp'] // 1000).isoformat() click.echo("{}: {}".format(ts, e['message'])) click.echo('done')
Show the last 10 lines of the log file
def remove_user(self, workspace, params={}, **options): path = "/workspaces/%s/removeUser" % (workspace) return self.client.post(path, params, **options)
The user making this call must be an admin in the workspace. Returns an empty data record. Parameters ---------- workspace : {Id} The workspace or organization to invite the user to. [data] : {Object} Data for the request - user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request.
def _get_order_by(self, request): attr = request.params.get('sort', request.params.get('order_by')) if attr is None or not hasattr(self.mapped_class, attr): return None if request.params.get('dir', '').upper() == 'DESC': return desc(getattr(self.mapped_class, attr)) else: return asc(getattr(self.mapped_class, attr))
Return an SA order_by
def filter_data(data, filter_dict): for key, match_string in filter_dict.items(): if key not in data: logger.warning("{0} doesn't match a top level key".format(key)) continue values = data[key] matcher = re.compile(match_string) if isinstance(values, list): values = [v for v in values if matcher.search(v)] elif isinstance(values, dict): values = dict((k, v) for k, v in values.items() if matcher.search(k)) else: raise MiuraException("cannot filter a {0}".format(type(values))) data[key] = values
filter a data dictionary for values only matching the filter
def _make_callsites(self, stack_pointer_tracker=None): rd = self.project.analyses.ReachingDefinitions(func=self.function, func_graph=self.graph, observe_all=True) for key in self._blocks: block = self._blocks[key] csm = self.project.analyses.AILCallSiteMaker(block, reaching_definitions=rd) if csm.result_block: ail_block = csm.result_block simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker) self._blocks[key] = simp.result_block self._update_graph()
Simplify all function call statements. :return: None
def unbound_dimensions(streams, kdims, no_duplicates=True): params = stream_parameters(streams, no_duplicates) return [d for d in kdims if d not in params]
Return a list of dimensions that have not been associated with any streams.
def list_folder(cls, session, mailbox, folder): return cls( '/mailboxes/%d/folders/%s/conversations.json' % ( mailbox.id, folder.id, ), session=session, )
Return conversations in a specific folder of a mailbox. Args: session (requests.sessions.Session): Authenticated session. mailbox (helpscout.models.Mailbox): Mailbox that folder is in. folder (helpscout.models.Folder): Folder to list. Returns: RequestPaginator(output_type=helpscout.models.Conversation): Conversations iterator.
def update_properties(self, properties, email_to_addresses=None, email_cc_addresses=None, email_insert=None): volreq_obj = copy.deepcopy(properties) volreq_obj['operation'] = 'modify' volreq_obj['element-uri'] = self.uri body = { 'storage-volumes': [volreq_obj], } if email_to_addresses: body['email-to-addresses'] = email_to_addresses if email_cc_addresses: body['email-cc-addresses'] = email_cc_addresses if email_insert: body['email-insert'] = email_insert else: if email_cc_addresses: raise ValueError("email_cc_addresses must not be specified if " "there is no email_to_addresses: %r" % email_cc_addresses) if email_insert: raise ValueError("email_insert must not be specified if " "there is no email_to_addresses: %r" % email_insert) self.manager.session.post( self.manager.storage_group.uri + '/operations/modify', body=body) self.properties.update(copy.deepcopy(properties))
Update writeable properties of this storage volume on the HMC, and optionally send emails to storage administrators requesting modification of the storage volume on the storage subsystem and of any resources related to the storage volume. This method performs the "Modify Storage Group Properties" operation, requesting modification of the volume. Authorization requirements: * Object-access permission to the storage group owning this storage volume. * Task permission to the "Configure Storage - System Programmer" task. Parameters: properties (dict): New property values for the volume. Allowable properties are the fields defined in the "storage-volume-request-info" nested object for the "modify" operation. That nested object is described in section "Request body contents" for operation "Modify Storage Group Properties" in the :term:`HMC API` book. The properties provided in this parameter will be copied and then amended with the `operation="modify"` and `element-uri` properties, and then used as a single array item for the `storage-volumes` field in the request body of the "Modify Storage Group Properties" operation. email_to_addresses (:term:`iterable` of :term:`string`): Email addresses of one or more storage administrator to be notified. If `None` or empty, no email will be sent. email_cc_addresses (:term:`iterable` of :term:`string`): Email addresses of one or more storage administrator to be copied on the notification email. If `None` or empty, nobody will be copied on the email. Must be `None` or empty if `email_to_addresses` is `None` or empty. email_insert (:term:`string`): Additional text to be inserted in the notification email. The text can include HTML formatting tags. If `None`, no additional text will be inserted. Must be `None` or empty if `email_to_addresses` is `None` or empty. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def extractSurface(image, radius=0.5): fe = vtk.vtkExtractSurface() fe.SetInputData(image) fe.SetRadius(radius) fe.Update() return Actor(fe.GetOutput())
``vtkExtractSurface`` filter. Input is a ``vtkImageData``. Generate zero-crossing isosurface from truncated signed distance volume.
def replay_sync(self, live=False): 'Replay all commands in log.' self.cursorRowIndex = 0 CommandLog.currentReplay = self with Progress(total=len(self.rows)) as prog: while self.cursorRowIndex < len(self.rows): if CommandLog.currentReplay is None: status('replay canceled') return vd().statuses.clear() try: if self.replayOne(self.cursorRow): self.cancel() return except Exception as e: self.cancel() exceptionCaught(e) status('replay canceled') return self.cursorRowIndex += 1 prog.addProgress(1) sync(1 if live else 0) while not self.delay(): pass status('replay complete') CommandLog.currentReplay = None
Replay all commands in log.
def _kill_process(proc): try: proc.kill() except AccessDenied: logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format( proc.pid, proc.exe()))
Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process
def skip_connection_distance(a, b): if a[2] != b[2]: return 1.0 len_a = abs(a[1] - a[0]) len_b = abs(b[1] - b[0]) return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b))
The distance between two skip-connections.
def get_channel_comment(self, name=None, group=None, index=None): gp_nr, ch_nr = self._validate_channel_selection(name, group, index) grp = self.groups[gp_nr] channel = grp.channels[ch_nr] return extract_cncomment_xml(channel.comment)
Gets channel comment. Channel can be specified in two ways: * using the first positional argument *name* * if there are multiple occurrences for this channel then the *group* and *index* arguments can be used to select a specific group. * if there are multiple occurrences for this channel and either the *group* or *index* arguments is None then a warning is issued * using the group number (keyword argument *group*) and the channel number (keyword argument *index*). Use *info* method for group and channel numbers If the *raster* keyword argument is not *None* the output is interpolated accordingly. Parameters ---------- name : string name of channel group : int 0-based group index index : int 0-based channel index Returns ------- comment : str found channel comment
def check_driver_dependencies(driver, dependencies): ret = True for key, value in six.iteritems(dependencies): if value is False: log.warning( "Missing dependency: '%s'. The %s driver requires " "'%s' to be installed.", key, driver, key ) ret = False return ret
Check if the driver's dependencies are available. .. versionadded:: 2015.8.0 driver The name of the driver. dependencies The dictionary of dependencies to check.
def page(self, category=values.unset, start_date=values.unset, end_date=values.unset, include_subaccounts=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({ 'Category': category, 'StartDate': serialize.iso8601_date(start_date), 'EndDate': serialize.iso8601_date(end_date), 'IncludeSubaccounts': include_subaccounts, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return DailyPage(self._version, response, self._solution)
Retrieve a single page of DailyInstance records from the API. Request is executed immediately :param DailyInstance.Category category: The usage category of the UsageRecord resources to read :param date start_date: Only include usage that has occurred on or after this date :param date end_date: Only include usage that occurred on or before this date :param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of DailyInstance :rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
def _function_add_call_edge(self, addr, src_node, function_addr, syscall=False, stmt_idx=None, ins_addr=None): try: if src_node is None: self.kb.functions._add_node(function_addr, addr, syscall=syscall) else: src_snippet = self._to_snippet(cfg_node=src_node) return_to_outside = False ret_snippet = None self.kb.functions._add_call_to(function_addr, src_snippet, addr, ret_snippet, syscall=syscall, stmt_idx=stmt_idx, ins_addr=ins_addr, return_to_outside=return_to_outside, ) return True except (SimMemoryError, SimEngineError): return False
Add a call edge to the function transition map. :param int addr: Address that is being called (callee). :param CFGNode src_node: The source CFG node (caller). :param int ret_addr: Address that returns to (in case the function returns). :param int function_addr: Function address.. :param bool syscall: If this is a call to a syscall or not. :param int or str stmt_idx: Statement ID of this call. :param int or None ins_addr: Instruction address of this call. :return: True if the edge is added. False if any exception occurred. :rtype: bool
def _max_args(self, f): if f.func_defaults is None: return f.func_code.co_argcount return f.func_code.co_argcount + len(f.func_defaults)
Returns maximum number of arguments accepted by given function.
def close(self): if isinstance(self._session, requests.Session): self._session.close()
Close http session.
def connect_float_text(instance, prop, widget, fmt="{:g}"): if callable(fmt): format_func = fmt else: def format_func(x): return fmt.format(x) def update_prop(): val = widget.text() try: setattr(instance, prop, float(val)) except ValueError: setattr(instance, prop, 0) def update_widget(val): if val is None: val = 0. widget.setText(format_func(val)) add_callback(instance, prop, update_widget) try: widget.editingFinished.connect(update_prop) except AttributeError: pass update_widget(getattr(instance, prop))
Connect a numerical callback property with a Qt widget containing text. Parameters ---------- instance : object The class instance that the callback property is attached to prop : str The name of the callback property widget : QtWidget The Qt widget to connect. This should implement the ``setText`` and ``text`` methods as well optionally the ``editingFinished`` signal. fmt : str or func This should be either a format string (in the ``{}`` notation), or a function that takes a number and returns a string.
def get_instructions(self): tmp_ins = [] idx = 0 for i in self.method.get_instructions(): if idx >= self.start and idx < self.end: tmp_ins.append(i) idx += i.get_length() return tmp_ins
Get all instructions from a basic block. :rtype: Return all instructions in the current basic block
def distance_to_angle(distance, units='metric'): if units in ('km', 'metric'): pass elif units in ('sm', 'imperial', 'US customary'): distance *= STATUTE_MILE elif units in ('nm', 'nautical'): distance *= NAUTICAL_MILE else: raise ValueError('Unknown units type %r' % units) return math.degrees(distance / BODY_RADIUS)
Convert a distance in to an angle along a great circle. Args: distance (float): Distance to convert to degrees units (str): Unit type to be used for distances Returns: float: Angle in degrees Raises: ValueError: Unknown value for ``units``
def select_by_key(self, key): self._selected_key = None self._selected_item = None for item in self.children.values(): item.attributes['selected'] = False if key in self.children: self.children[key].attributes['selected'] = True self._selected_key = key self._selected_item = self.children[key]
Selects an item by its key. Args: key (str): The unique string identifier of the item that have to be selected.
def truepath_relative(path, otherpath=None): if otherpath is None: otherpath = os.getcwd() otherpath = truepath(otherpath) path_ = normpath(relpath(path, otherpath)) return path_
Normalizes and returns absolute path with so specs Args: path (str): path to file or directory otherpath (None): (default = None) Returns: str: path_ CommandLine: python -m utool.util_path --exec-truepath_relative --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> path = 'C:/foobar/foobiz' >>> otherpath = 'C:/foobar' >>> path_ = truepath_relative(path, otherpath) >>> result = ('path_ = %s' % (ut.repr2(path_),)) >>> print(result) path_ = 'foobiz'
def wrap(self, text, **kwargs): pilcrow = re.compile(r'(\n\s*\n)', re.MULTILINE) list_prefix = re.compile(r'\s*(?:\w|[0-9]+)[\.\)]\s+') paragraphs = pilcrow.split(text) wrapped_lines = [] for paragraph in paragraphs: if paragraph.isspace(): wrapped_lines.append('') else: wrapper = textwrap.TextWrapper(**vars(self)) list_item = re.match(list_prefix, paragraph) if list_item: wrapper.subsequent_indent += ' ' * len(list_item.group(0)) wrapped_lines.extend(wrapper.wrap(paragraph)) return wrapped_lines
Wraps each paragraph in ``text`` individually. Parameters ---------- text : str Returns ------- str Single string containing the wrapped paragraphs.
def Reynolds_valve(nu, Q, D1, FL, Fd, C): r return N4*Fd*Q/nu/(C*FL)**0.5*(FL**2*C**2/(N2*D1**4) + 1)**0.25
r'''Calculates Reynolds number of a control valve for a liquid or gas flowing through it at a specified Q, for a specified D1, FL, Fd, C, and with kinematic viscosity `nu` according to IEC 60534 calculations. .. math:: Re_v = \frac{N_4 F_d Q}{\nu \sqrt{C F_L}}\left(\frac{F_L^2 C^2} {N_2D^4} +1\right)^{1/4} Parameters ---------- nu : float Kinematic viscosity, [m^2/s] Q : float Volumetric flow rate of the fluid [m^3/s] D1 : float Diameter of the pipe before the valve [m] FL : float, optional Liquid pressure recovery factor of a control valve without attached fittings [] Fd : float Valve style modifier [-] C : float Metric Kv valve flow coefficient (flow rate of water at a pressure drop of 1 bar) [m^3/hr] Returns ------- Rev : float Valve reynolds number [-] Examples -------- >>> Reynolds_valve(3.26e-07, 360, 150.0, 0.9, 0.46, 165) 2966984.7525455453 References ---------- .. [1] IEC 60534-2-1 / ISA-75.01.01-2007
def set_value(self, value): v = 0 measure_unit = 'px' try: v = int(float(value.replace('px', ''))) except ValueError: try: v = int(float(value.replace('%', ''))) measure_unit = '%' except ValueError: pass self.numInput.set_value(v) self.dropMeasureUnit.set_value(measure_unit)
The value have to be in the form '10px' or '10%', so numeric value plus measure unit
def from_samples(cls, samples_like, vectors, info, vartype, variable_labels=None): try: samples = np.asarray(samples_like, dtype=np.int8) except TypeError: samples, variable_labels = _samples_dicts_to_array(samples_like, variable_labels) assert samples.dtype == np.int8, 'sanity check' record = data_struct_array(samples, **vectors) if variable_labels is None: __, num_variables = record.sample.shape variable_labels = list(range(num_variables)) return cls(record, variable_labels, info, vartype)
Build a response from samples. Args: samples_like: A collection of samples. 'samples_like' is an extension of NumPy's array_like to include an iterable of sample dictionaries (as returned by :meth:`.Response.samples`). data_vectors (dict[field, :obj:`numpy.array`/list]): Additional per-sample data as a dict of vectors. Each vector is the same length as `samples_matrix`. The key 'energy' and it's vector is required. info (dict): Information about the response as a whole formatted as a dict. vartype (:class:`.Vartype`/str/set): Variable type for the response. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` variable_labels (list, optional): Determines the variable labels if samples_like is not an iterable of dictionaries. If samples_like is not an iterable of dictionaries and if variable_labels is not provided then index labels are used. Returns: :obj:`.Response` Examples: From dicts >>> import dimod ... >>> samples = [{'a': -1, 'b': +1}, {'a': -1, 'b': -1}] >>> response = dimod.Response.from_samples(samples, {'energy': [-1, 0]}, {}, dimod.SPIN) From an array >>> import dimod >>> import numpy as np ... >>> samples = np.ones((2, 3), dtype='int8') # 2 samples, 3 variables >>> response = dimod.Response.from_samples(samples, {'energy': [-1.0, -1.0]}, {}, ... dimod.SPIN, variable_labels=['a', 'b', 'c'])
def to_global(s): if s.startswith('GPSTime'): s = 'Gps' + s[3:] if '_' in s: s = "".join([i.capitalize() for i in s.split("_")]) return s[0].lower() + s[1:]
Format a global variable name.
def follow(user, obj, send_action=True, actor_only=True, flag='', **kwargs): check(obj) instance, created = apps.get_model('actstream', 'follow').objects.get_or_create( user=user, object_id=obj.pk, flag=flag, content_type=ContentType.objects.get_for_model(obj), actor_only=actor_only ) if send_action and created: if not flag: action.send(user, verb=_('started following'), target=obj, **kwargs) else: action.send(user, verb=_('started %s' % flag), target=obj, **kwargs) return instance
Creates a relationship allowing the object's activities to appear in the user's stream. Returns the created ``Follow`` instance. If ``send_action`` is ``True`` (the default) then a ``<user> started following <object>`` action signal is sent. Extra keyword arguments are passed to the action.send call. If ``actor_only`` is ``True`` (the default) then only actions where the object is the actor will appear in the user's activity stream. Set to ``False`` to also include actions where this object is the action_object or the target. If ``flag`` not an empty string then the relationship would marked by this flag. Example:: follow(request.user, group, actor_only=False) follow(request.user, group, actor_only=False, flag='liking')
def load(self, shapefile=None): if shapefile: (shapeName, ext) = os.path.splitext(shapefile) self.shapeName = shapeName try: self.shp = open("%s.shp" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.shp" % shapeName) try: self.shx = open("%s.shx" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.shx" % shapeName) try: self.dbf = open("%s.dbf" % shapeName, "rb") except IOError: raise ShapefileException("Unable to open %s.dbf" % shapeName) if self.shp: self.__shpHeader() if self.dbf: self.__dbfHeader()
Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file object or file name as an argument.
def calc_downsample(w, h, target=400): if w > h: return h / target elif h >= w: return w / target
Calculate downsampling value.
def get_values(self, set, selected_meta): warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id") sample_ids = set.index.get_level_values(-1) corresponding_meta = self.meta.loc[sample_ids] values = corresponding_meta[selected_meta] try: values = values.astype(float) except ValueError: print("the values should be numeric") return values
Retrieves the selected metadata values of the given set :param set: cluster that contains the data :param selected_meta: the values of the selected_meta :return: the values of the selected meta of the cluster
def target(self): task = yield self.task() if not task: yield defer.succeed(None) defer.returnValue(None) defer.returnValue(task.target)
Find the target name for this build. :returns: deferred that when fired returns the build task's target name. If we could not determine the build task, or the task's target, return None.
def async_run(self, keyword, *args, **kwargs): handle = self._last_thread_handle thread = self._threaded(keyword, *args, **kwargs) thread.start() self._thread_pool[handle] = thread self._last_thread_handle += 1 return handle
Executes the provided Robot Framework keyword in a separate thread and immediately returns a handle to be used with async_get
def setInstrument(self, instrument, override_analyses=False): analyses = [an for an in self.getAnalyses() if (not an.getInstrument() or override_analyses) and an.isInstrumentAllowed(instrument)] total = 0 for an in analyses: instr_methods = instrument.getMethods() meth = instr_methods[0] if instr_methods else None if meth and an.isMethodAllowed(meth): if an.getMethod() not in instr_methods: an.setMethod(meth) an.setInstrument(instrument) total += 1 self.getField('Instrument').set(self, instrument) return total
Sets the specified instrument to the Analysis from the Worksheet. Only sets the instrument if the Analysis allows it, according to its Analysis Service and Method. If an analysis has already assigned an instrument, it won't be overriden. The Analyses that don't allow the instrument specified will not be modified. Returns the number of analyses affected
def get_prop_value(name, props, default=None): if not props: return default try: return props[name] except KeyError: return default
Returns the value of a property or the default one :param name: Name of a property :param props: Dictionary of properties :param default: Default value :return: The value of the property or the default one
def open_file_like(f, mode): new_fd = isinstance(f, (str, pathlib.Path)) if new_fd: f = open(f, mode) try: yield f finally: if new_fd: f.close()
Wrapper for opening a file
def focusd(task): if registration.get_registered(event_hooks=True, root_access=True): start_cmd_srv = (os.getuid() == 0) else: start_cmd_srv = False _run = lambda: Focusd(task).run(start_cmd_srv) daemonize(get_daemon_pidfile(task), task.task_dir, _run)
Forks the current process as a daemon to run a task. `task` ``Task`` instance for the task to run.
def __ordinal(self, num): if 10 <= num % 100 < 20: return str(num) + 'th' else: ord_info = {1: 'st', 2: 'nd', 3: 'rd'}.get(num % 10, 'th') return '{}{}'.format(num, ord_info)
Returns the ordinal number of a given integer, as a string. eg. 1 -> 1st, 2 -> 2nd, 3 -> 3rd, etc.
def execute(self, query, *multiparams, **params): coro = self._execute(query, *multiparams, **params) return _SAConnectionContextManager(coro)
Executes a SQL query with optional parameters. query - a SQL query string or any sqlalchemy expression. *multiparams/**params - represent bound parameter values to be used in the execution. Typically, the format is a dictionary passed to *multiparams: await conn.execute( table.insert(), {"id":1, "value":"v1"}, ) ...or individual key/values interpreted by **params:: await conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, a tuple or individual values in \*multiparams may be passed:: await conn.execute( "INSERT INTO table (id, value) VALUES (%d, %s)", (1, "v1") ) await conn.execute( "INSERT INTO table (id, value) VALUES (%s, %s)", 1, "v1" ) Returns ResultProxy instance with results of SQL query execution.
def disable_cors(self): return self.update_cors_configuration( enable_cors=False, allow_credentials=False, origins=[], overwrite_origins=True )
Switches CORS off. :returns: CORS status in JSON format
def chart(self, x=None, y=None, chart_type=None, opts=None, style=None, label=None, options={}, **kwargs): try: self.chart_obj = self._chart(x, y, chart_type, opts, style, label, options=options, **kwargs) except Exception as e: self.err(e, self.chart, "Can not create chart")
Get a chart
def _lookup_attributes(glyph_name, data): attributes = ( data.names.get(glyph_name) or data.alternative_names.get(glyph_name) or data.production_names.get(glyph_name) or {} ) return attributes
Look up glyph attributes in data by glyph name, alternative name or production name in order or return empty dictionary. Look up by alternative and production names for legacy projects and because of issue #232.
def read_string_data(file, number_values, endianness): offsets = [0] for i in range(number_values): offsets.append(types.Uint32.read(file, endianness)) strings = [] for i in range(number_values): s = file.read(offsets[i + 1] - offsets[i]) strings.append(s.decode('utf-8')) return strings
Read string raw data This is stored as an array of offsets followed by the contiguous string data.
def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss')
Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float.
def read_html_file(filename): with open(os.path.join(get_static_directory(), 'html/{filename}'.format(filename=filename))) as f: contents = f.read() return contents
Reads the contents of an html file in the css directory @return: Contents of the specified file