Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
374,100
def visit_BitVecSub(self, expression, *operands): left = expression.operands[0] right = expression.operands[1] if isinstance(left, BitVecAdd): if self._same_constant(left.operands[0], right): return left.operands[1] elif self._same_constant(left.operands[1], right): return left.operands[0]
a - 0 ==> 0 (a + b) - b ==> a (b + a) - b ==> a
374,101
def serialize(self, content): content = super(JSONPEmitter, self).serialize(content) callback = self.request.GET.get(, ) return u % (callback, content)
Serialize to JSONP. :return string: serializaed JSONP
374,102
def set_privkey_compressed(privkey, compressed=True): if len(privkey) != 64 and len(privkey) != 66: raise ValueError("expected 32-byte private key as a hex string") if compressed and len(privkey) == 64: privkey += if not compressed and len(privkey) == 66: if privkey[-2:] != : raise ValueError("private key does not end in ") privkey = privkey[:-2] return privkey
Make sure the private key given is compressed or not compressed
374,103
def dump(self): for modpath in sorted(self.map): title = % modpath print( + title + + *len(title)) for name, value in sorted(self.map.get(modpath, {}).items()): print( % (name, .join(sorted(value))))
Prints out the contents of the import map.
374,104
def element_at(index): if index < 0: raise IndexError("element_at used with illegal index {}".format(index)) def element_at_transducer(reducer): return ElementAt(reducer, index) return element_at_transducer
Create a transducer which obtains the item at the specified index.
374,105
def classinstances(cls): l = [i for i in cls.allinstances() if type(i) == cls] return l
Return all instances of the current class JB_Gui will not return the instances of subclasses A subclass will only return the instances that have the same type as the subclass. So it won\'t return instances of further subclasses. :returns: all instnaces of the current class :rtype: list :raises: None
374,106
def setWidth(self, typeID, width): self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_WIDTH, typeID, width)
setWidth(string, double) -> None Sets the width in m of vehicles of this type.
374,107
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator): for permission in exceptions_dict: if permission not in self._EXCEPTIONS_KEYS: continue exception_dict = exceptions_dict.get(permission, {}) for urls, url_dict in exception_dict.items(): last_used = url_dict.get(, None) if not last_used: continue primary_url, secondary_url = urls.split() event_data = ChromeContentSettingsExceptionsEventData() event_data.permission = permission event_data.primary_url = primary_url event_data.secondary_url = secondary_url timestamp = int(last_used * 1000000) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts site specific events. Args: exceptions_dict (dict): Permission exceptions data from Preferences file. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs.
374,108
def decode(self, encoded_packet): b64 = False if not isinstance(encoded_packet, binary_types): encoded_packet = encoded_packet.encode() elif not isinstance(encoded_packet, bytes): encoded_packet = bytes(encoded_packet) self.packet_type = six.byte2int(encoded_packet[0:1]) if self.packet_type == 98: self.binary = True encoded_packet = encoded_packet[1:] self.packet_type = six.byte2int(encoded_packet[0:1]) self.packet_type -= 48 b64 = True elif self.packet_type >= 48: self.packet_type -= 48 self.binary = False else: self.binary = True self.data = None if len(encoded_packet) > 1: if self.binary: if b64: self.data = base64.b64decode(encoded_packet[1:]) else: self.data = encoded_packet[1:] else: try: self.data = self.json.loads( encoded_packet[1:].decode()) if isinstance(self.data, int): raise ValueError except ValueError: self.data = encoded_packet[1:].decode()
Decode a transmitted package.
374,109
def _process_request(self, request, client_address): try: self.finish_request(request, client_address) except Exception: self.handle_error(request, client_address) finally: self.shutdown_request(request)
Actually processes the request.
374,110
def get_task_runner(local_task_job): if _TASK_RUNNER == "StandardTaskRunner": return StandardTaskRunner(local_task_job) elif _TASK_RUNNER == "CgroupTaskRunner": from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner return CgroupTaskRunner(local_task_job) else: raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
Get the task runner that can be used to run the given job. :param local_task_job: The LocalTaskJob associated with the TaskInstance that needs to be executed. :type local_task_job: airflow.jobs.LocalTaskJob :return: The task runner to use to run the task. :rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
374,111
def read(cls, proto): regionImpl = proto.regionImpl.as_struct(cls.getSchema()) return cls.readFromProto(regionImpl)
Calls :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto` on subclass after converting proto to specific type using :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`. :param proto: PyRegionProto capnproto object
374,112
def timeline(self, timeline="home", max_id=None, min_id=None, since_id=None, limit=None): if max_id != None: max_id = self.__unpack_id(max_id) if min_id != None: min_id = self.__unpack_id(min_id) if since_id != None: since_id = self.__unpack_id(since_id) params_initial = locals() if timeline == "local": timeline = "public" params_initial[] = True params = self.__generate_params(params_initial, []) url = .format(timeline) return self.__api_request(, url, params)
Fetch statuses, most recent ones first. `timeline` can be 'home', 'local', 'public', 'tag/hashtag' or 'list/id'. See the following functions documentation for what those do. Local hashtag timelines are supported via the `timeline_hashtag()`_ function. The default timeline is the "home" timeline. Media only queries are supported via the `timeline_public()`_ and `timeline_hashtag()`_ functions. Returns a list of `toot dicts`_.
374,113
def align(s1,s2,test=False,seqfmt=, psm=None,pmm=None,pgo=None,pge=None, matrix=None, outscore=False): import operator from Bio import pairwise2 if seqfmt==: if any([p is None for p in [psm,pmm,pgo,pge]]): alignments = pairwise2.align.localxx(s1.upper(),s2.upper()) else: alignments = pairwise2.align.localms(s1.upper(),s2.upper(),psm,pmm,pgo,pge) elif seqfmt==: from Bio.pairwise2 import format_alignment from Bio.SubsMat import MatrixInfo if matrix is None: matrix = MatrixInfo.blosum62 alignments =pairwise2.align.globaldx(s1, s2, matrix) if test: print(alignments) alignsymb=np.nan score=np.nan sorted_alignments = sorted(alignments, key=operator.itemgetter(2)) for a in alignments: alignstr=pairwise2.format_alignment(*a) alignsymb=alignstr.split()[1] score=a[2] if test: print(alignstr) break if not outscore: return alignsymb.replace(,),score else: return score
Creates pairwise local alignment between seqeunces. Get the visualization and alignment scores. :param s1: seqeunce 1 :param s2: seqeunce 2 REF: http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html The match parameters are: CODE DESCRIPTION x No parameters. Identical characters have score of 1, otherwise 0. m A match score is the score of identical chars, otherwise mismatch score. d A dictionary returns the score of any pair of characters. c A callback function returns scores. The gap penalty parameters are: CODE DESCRIPTION x No gap penalties. s Same open and extend gap penalties for both sequences. d The sequences have different open and extend gap penalties. c A callback function returns the gap penalties. -- DNA: localms: psm=2,pmm=0.5,pgo=-3,pge=-1): Protein: http://resources.qiagenbioinformatics.com/manuals/clcgenomicsworkbench/650/Use_scoring_matrices.html
374,114
def bind_top_down(lower, upper, __fval=None, **fval): if __fval is not None: fval.update(__fval) upper._overload_fields = upper._overload_fields.copy() upper._overload_fields[lower] = fval
Bind 2 layers for building. When the upper layer is added as a payload of the lower layer, all the arguments # noqa: E501 will be applied to them. ex: >>> bind_top_down(Ether, SNAP, type=0x1234) >>> Ether()/SNAP() <Ether type=0x1234 |<SNAP |>>
374,115
def unsplit_query(query): def unsplit_assignment((x, y)): if (x is not None) and (y is not None): return x + + y elif x is not None: return x elif y is not None: return + y else: return return .join(map(unsplit_assignment, query))
Create a query string using the tuple query with a format as the one returned by split_query()
374,116
def autoExpand(self, level=None): return self._autoExpand.get(level, self._autoExpand.get(None, False))
Returns whether or not to expand for the inputed level. :param level | <int> || None :return <bool>
374,117
def get(self, url): r = requests.get(self._format_url(url), headers=self.headers, timeout=TIMEOUT) self._check_response(r, 200) return r.json()
Do a GET request
374,118
def destroy(name, call=None): if call == : raise SaltCloudSystemExit( ) __utils__[]( , , .format(name), args={: name}, sock_dir=__opts__[], transport=__opts__[] ) vm_properties = [ "name", "summary.runtime.powerState" ] vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: if vm["name"] == name: if vm["summary.runtime.powerState"] != "poweredOff": try: log.info(, name) task = vm["object"].PowerOff() salt.utils.vmware.wait_for_task(task, name, ) except Exception as exc: log.error( , name, exc, exc_info_on_loglevel=logging.DEBUG ) return try: log.info(, name) task = vm["object"].Destroy_Task() salt.utils.vmware.wait_for_task(task, name, ) except Exception as exc: log.error( , name, exc, exc_info_on_loglevel=logging.DEBUG ) return __utils__[]( , , .format(name), args={: name}, sock_dir=__opts__[], transport=__opts__[] ) if __opts__.get(, False) is True: __utils__[](name, __active_provider_name__.split()[0], __opts__) return True
To destroy a VM from the VMware environment CLI Example: .. code-block:: bash salt-cloud -d vmname salt-cloud --destroy vmname salt-cloud -a destroy vmname
374,119
def _get_dependency_order(g, node_list): access_ = accessibility(g) deps = dict((k, set(v) - set([k])) for k, v in access_.iteritems()) nodes = node_list + list(set(g.nodes()) - set(node_list)) ordered_nodes = [] while nodes: n_ = nodes[0] n_deps = deps.get(n_) if (n_ in ordered_nodes) or (n_deps is None): nodes = nodes[1:] continue moved = False for i, n in enumerate(nodes[1:]): if n in n_deps: nodes = [nodes[i + 1]] + nodes[:i + 1] + nodes[i + 2:] moved = True break if not moved: ordered_nodes.append(n_) nodes = nodes[1:] return ordered_nodes
Return list of nodes as close as possible to the ordering in node_list, but with child nodes earlier in the list than parents.
374,120
def computeRange(corners): x = corners[:, 0] y = corners[:, 1] _xrange = (np.minimum.reduce(x), np.maximum.reduce(x)) _yrange = (np.minimum.reduce(y), np.maximum.reduce(y)) return _xrange, _yrange
Determine the range spanned by an array of pixel positions.
374,121
def parallel_starfeatures_lcdir(lcdir, outdir, lc_catalog_pickle, neighbor_radius_arcsec, fileglob=None, maxobjects=None, deredden=True, custom_bandpasses=None, lcformat=, lcformatdir=None, nworkers=NCPUS, recursive=True): objectsobjectidobjectslcfnamekdtrees `kdtree`, `objlist`, `lcflist`, and in GAIA. fileglob : str The UNIX file glob to use to search for the light curves in `lcdir`. If None, the default value for the light curve format specified will be used. maxobjects : int The number of objects to process from `lclist`. deredden : bool This controls if the colors and any color classifications will be dereddened using 2MASS DUST. custom_bandpasses : dict or None This is a dict used to define any custom bandpasses in the `in_objectinfo` dict you want to make this function aware of and generate colors for. Use the format below for this dict:: { :{:, : :[[, ], [, ]]}, . ... . :{:, : :[[, ], [, ]]}, } Where: `bandpass_key` is a key to use to refer to this bandpass in the `objectinfo` dict, e.g. for SDSS g band `twomass_dust_key` is the key to use in the 2MASS DUST result table for reddening per band-pass. For example, given the following DUST result table (using http://irsa.ipac.caltech.edu/applications/DUST/):: |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD| |char |float |float |float |float |float| | |microns| |mags | |mags | CTIO U 0.3734 4.107 0.209 4.968 0.253 CTIO B 0.4309 3.641 0.186 4.325 0.221 CTIO V 0.5517 2.682 0.137 3.240 0.165 . . ... The `twomass_dust_key` for would be . If you want to skip DUST lookup and want to pass in a specific reddening magnitude for your bandpass, use a float for the value of `twomass_dust_key`. If you want to skip DUST lookup entirely for this bandpass, use None for the value of `twomass_dust_key`. `band_label` is the label to use for this bandpass, e.g. for WISE-1 band, for SDSS u, etc. The list contains color definitions for all colors you want to generate using this bandpass. this list contains elements of the form:: [,] where the the first item is the bandpass keys making up this color, and the second item is the label for this color to be used by the frontends. An example:: [,] lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when yous not currently registered with lcproc. nworkers : int The number of parallel workers to launch. Returns ------- dict A dict with key:val pairs of the input light curve filename and the output star features pickle for each LC processed. t figure out the light curve format") return None except Exception as e: LOGEXCEPTION("cansearching for %s light curves in %s ...**found %s light curves, getting starfeatures...no light curve files in %s format found in %s' % (lcformat, lcdir)) return None
This runs parallel star feature extraction for a directory of LCs. Parameters ---------- lcdir : list of str The directory to search for light curves. outdir : str The output directory where the results will be placed. lc_catalog_pickle : str The path to a catalog containing at a dict with least: - an object ID array accessible with `dict['objects']['objectid']` - an LC filename array accessible with `dict['objects']['lcfname']` - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding neighbors for each object accessible with `dict['kdtree']` A catalog pickle of the form needed can be produced using :py:func:`astrobase.lcproc.catalogs.make_lclist` or :py:func:`astrobase.lcproc.catalogs.filter_lclist`. neighbor_radius_arcsec : float This indicates the radius in arcsec to search for neighbors for this object using the light curve catalog's `kdtree`, `objlist`, `lcflist`, and in GAIA. fileglob : str The UNIX file glob to use to search for the light curves in `lcdir`. If None, the default value for the light curve format specified will be used. maxobjects : int The number of objects to process from `lclist`. deredden : bool This controls if the colors and any color classifications will be dereddened using 2MASS DUST. custom_bandpasses : dict or None This is a dict used to define any custom bandpasses in the `in_objectinfo` dict you want to make this function aware of and generate colors for. Use the format below for this dict:: { '<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>', 'label':'<band_label_1>' 'colors':[['<bandkey1>-<bandkey2>', '<BAND1> - <BAND2>'], ['<bandkey3>-<bandkey4>', '<BAND3> - <BAND4>']]}, . ... . '<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>', 'label':'<band_label_N>' 'colors':[['<bandkey1>-<bandkey2>', '<BAND1> - <BAND2>'], ['<bandkey3>-<bandkey4>', '<BAND3> - <BAND4>']]}, } Where: `bandpass_key` is a key to use to refer to this bandpass in the `objectinfo` dict, e.g. 'sdssg' for SDSS g band `twomass_dust_key` is the key to use in the 2MASS DUST result table for reddening per band-pass. For example, given the following DUST result table (using http://irsa.ipac.caltech.edu/applications/DUST/):: |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD| |char |float |float |float |float |float| | |microns| |mags | |mags | CTIO U 0.3734 4.107 0.209 4.968 0.253 CTIO B 0.4309 3.641 0.186 4.325 0.221 CTIO V 0.5517 2.682 0.137 3.240 0.165 . . ... The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to skip DUST lookup and want to pass in a specific reddening magnitude for your bandpass, use a float for the value of `twomass_dust_key`. If you want to skip DUST lookup entirely for this bandpass, use None for the value of `twomass_dust_key`. `band_label` is the label to use for this bandpass, e.g. 'W1' for WISE-1 band, 'u' for SDSS u, etc. The 'colors' list contains color definitions for all colors you want to generate using this bandpass. this list contains elements of the form:: ['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>'] where the the first item is the bandpass keys making up this color, and the second item is the label for this color to be used by the frontends. An example:: ['sdssu-sdssg','u - g'] lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. nworkers : int The number of parallel workers to launch. Returns ------- dict A dict with key:val pairs of the input light curve filename and the output star features pickle for each LC processed.
374,122
def _convert_iterable(self, iterable): if not callable(self._wrapper): return iterable return [self._wrapper(x) for x in iterable]
Converts elements returned by an iterable into instances of self._wrapper
374,123
def list_pools(self): search_opts = {: True} return [FloatingIpPool(pool) for pool in self.client.list_networks(**search_opts).get()]
Fetches a list of all floating IP pools. :returns: List of FloatingIpPool objects
374,124
def max_age(self, value): option = Option() option.number = defines.OptionRegistry.MAX_AGE.number option.value = int(value) self.del_option_by_number(defines.OptionRegistry.MAX_AGE.number) self.add_option(option)
Set the MaxAge of the response. :type value: int :param value: the MaxAge option
374,125
def get_possible_initializer_keys( cls, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False, use_batch_norm_c=False): possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy() if not use_peepholes: possible_keys.difference_update( {cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG}) if not use_batch_norm_h: possible_keys.remove(cls.GAMMA_H) if not use_batch_norm_x: possible_keys.remove(cls.GAMMA_X) if not use_batch_norm_c: possible_keys.difference_update({cls.GAMMA_C, cls.BETA_C}) return possible_keys
Returns the keys the dictionary of variable initializers may contain. The set of all possible initializer keys are: w_gates: weight for gates b_gates: bias of gates w_f_diag: weight for prev_cell -> forget gate peephole w_i_diag: weight for prev_cell -> input gate peephole w_o_diag: weight for prev_cell -> output gate peephole gamma_h: batch norm scaling for previous_hidden -> gates gamma_x: batch norm scaling for input -> gates gamma_c: batch norm scaling for cell -> output beta_c: batch norm bias for cell -> output Args: cls:The class. use_peepholes: Boolean that indicates whether peephole connections are used. use_batch_norm_h: Boolean that indicates whether to apply batch normalization at the previous_hidden -> gates contribution. If you are experimenting with batch norm then this may be the most effective to turn on. use_batch_norm_x: Boolean that indicates whether to apply batch normalization at the input -> gates contribution. use_batch_norm_c: Boolean that indicates whether to apply batch normalization at the cell -> output contribution. Returns: Set with strings corresponding to the strings that may be passed to the constructor.
374,126
def all_coarse_grains_for_blackbox(blackbox): for partition in all_partitions(blackbox.output_indices): for grouping in all_groupings(partition): coarse_grain = CoarseGrain(partition, grouping) try: validate.blackbox_and_coarse_grain(blackbox, coarse_grain) except ValueError: continue yield coarse_grain
Generator over all |CoarseGrains| for the given blackbox. If a box has multiple outputs, those outputs are partitioned into the same coarse-grain macro-element.
374,127
def minion_sign_in_payload(self): payload = {} payload[] = payload[] = self.opts[] if in self.opts: autosign_grains = {} for grain in self.opts[]: autosign_grains[grain] = self.opts[].get(grain, None) payload[] = autosign_grains try: pubkey_path = os.path.join(self.opts[], self.mpub) pub = get_rsa_pub_key(pubkey_path) if HAS_M2: payload[] = pub.public_encrypt(self.token, RSA.pkcs1_oaep_padding) else: cipher = PKCS1_OAEP.new(pub) payload[] = cipher.encrypt(self.token) except Exception: pass with salt.utils.files.fopen(self.pub_path) as f: payload[] = f.read() return payload
Generates the payload used to authenticate with the master server. This payload consists of the passed in id_ and the ssh public key to encrypt the AES key sent back from the master. :return: Payload dictionary :rtype: dict
374,128
def plot_sections(self, fout_dir=".", **kws_usr): kws_plt, _ = self._get_kws_plt(None, **kws_usr) PltGroupedGos(self).plot_sections(fout_dir, **kws_plt)
Plot groups of GOs which have been placed in sections.
374,129
def hexedit(x): x = bytes(x) fname = get_temp_file() with open(fname,"wb") as f: f.write(x) subprocess.call([conf.prog.hexedit, fname]) with open(fname, "rb") as f: x = f.read() return x
Run external hex editor on a packet or bytes. Set editor in conf.prog.hexedit
374,130
def metadata(self): params = { self.PCTYPE: self.CTYPE_XML } response = self.call(self.CGI_BUG, params) return response
Get metadata information in XML format.
374,131
def build_option_parser(parser): parser.add_argument( "--os-data-processing-api-version", metavar="<data-processing-api-version>", default=utils.env( , default=DEFAULT_DATA_PROCESSING_API_VERSION), help=("Data processing API version, default=" + DEFAULT_DATA_PROCESSING_API_VERSION + )) parser.add_argument( "--os-data-processing-url", default=utils.env( "OS_DATA_PROCESSING_URL"), help=("Data processing API URL, " "(Env: OS_DATA_PROCESSING_API_URL)")) return parser
Hook to add global options.
374,132
def reducing(reducer, init=UNSET): reducer2 = reducer def reducing_transducer(reducer): return Reducing(reducer, reducer2, init) return reducing_transducer
Create a reducing transducer with the given reducer. Args: reducer: A two-argument function which will be used to combine the partial cumulative result in the first argument with the next item from the input stream in the second argument. Returns: A reducing transducer: A single argument function which, when passed a reducing function, returns a new reducing function which entirely reduces the input stream using 'reducer' before passing the result to the reducing function passed to the transducer.
374,133
def adsSyncReadReqEx2( port, address, index_group, index_offset, data_type, return_ctypes=False ): sync_read_request = _adsDLL.AdsSyncReadReqEx2 ams_address_pointer = ctypes.pointer(address.amsAddrStruct()) index_group_c = ctypes.c_ulong(index_group) index_offset_c = ctypes.c_ulong(index_offset) if data_type == PLCTYPE_STRING: data = (STRING_BUFFER * PLCTYPE_STRING)() else: data = data_type() data_pointer = ctypes.pointer(data) data_length = ctypes.c_ulong(ctypes.sizeof(data)) bytes_read = ctypes.c_ulong() bytes_read_pointer = ctypes.pointer(bytes_read) error_code = sync_read_request( port, ams_address_pointer, index_group_c, index_offset_c, data_length, data_pointer, bytes_read_pointer, ) if error_code: raise ADSError(error_code) if data_type != PLCTYPE_STRING and bytes_read.value != data_length.value: raise RuntimeError( "Insufficient data (expected {0} bytes, {1} were read).".format( data_length.value, bytes_read.value ) ) if return_ctypes: return data if data_type == PLCTYPE_STRING: return data.value.decode("utf-8") if type(data_type).__name__ == "PyCArrayType": return [i for i in data] if hasattr(data, "value"): return data.value return data
Read data synchronous from an ADS-device. :param int port: local AMS port as returned by adsPortOpenEx() :param pyads.structs.AmsAddr address: local or remote AmsAddr :param int index_group: PLC storage area, according to the INDEXGROUP constants :param int index_offset: PLC storage address :param Type data_type: type of the data given to the PLC, according to PLCTYPE constants :param bool return_ctypes: return ctypes instead of python types if True (default: False) :rtype: data_type :return: value: **value**
374,134
def infer_call(self, context=None): callcontext = contextmod.copy_context(context) callcontext.callcontext = contextmod.CallContext( args=self.args, keywords=self.keywords ) callcontext.boundnode = None if context is not None: callcontext.extra_context = _populate_context_lookup(self, context.clone()) for callee in self.func.infer(context): if callee is util.Uninferable: yield callee continue try: if hasattr(callee, "infer_call_result"): yield from callee.infer_call_result(caller=self, context=callcontext) except exceptions.InferenceError: continue return dict(node=self, context=context)
infer a Call node by trying to guess what the function returns
374,135
def _get_cache_key(self): keys = list(self.params.keys()) keys.sort() cache_key = str() for key in keys: if key != "api_sig" and key != "api_key" and key != "sk": cache_key += key + self.params[key] return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
The cache key is a string of concatenated sorted names and values.
374,136
def checkQueryRange(self, start, end): condition = ( (start < 0 or end > self.getLength()) or start > end or start == end) if condition: raise exceptions.ReferenceRangeErrorException( self.getId(), start, end)
Checks to ensure that the query range is valid within this reference. If not, raise ReferenceRangeErrorException.
374,137
def taskfile_created_data(file_, role): if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: dt = file_.date_created return dt_to_qdatetime(dt)
Return the data for created date :param file_: the file that holds the data :type file_: :class:`jukeboxcore.djadapter.models.File` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the created date :rtype: depending on role :raises: None
374,138
def path(self): if (self.source_file.startswith() and self.target_file.startswith()): filepath = self.source_file[2:] elif (self.source_file.startswith() and self.target_file == ): filepath = self.source_file[2:] elif (self.target_file.startswith() and self.source_file == ): filepath = self.target_file[2:] else: filepath = self.source_file return filepath
Return the file path abstracted from VCS.
374,139
def add_prefix(self, ncname: str) -> None: if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + , self.curi_maps) if uri and in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
Look up ncname and add it to the prefix map if necessary @param ncname: name to add
374,140
def _ensure_create_ha_compliant(self, router, router_type): details = router.pop(ha.DETAILS, {}) if details == ATTR_NOT_SPECIFIED: details = {} res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED), ha.DETAILS: details} if not is_attr_set(res[ha.ENABLED]): res[ha.ENABLED] = router_type[] if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled: raise ha.HADisabled() if not res[ha.ENABLED]: return res if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)): details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms: raise ha.HADisabledHAType(ha_type=details[ha.TYPE]) if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL, ATTR_NOT_SPECIFIED)): details[ha.REDUNDANCY_LEVEL] = ( cfg.CONF.ha.default_ha_redundancy_level) if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY, ATTR_NOT_SPECIFIED)): details[ha.PROBE_CONNECTIVITY] = ( cfg.CONF.ha.connectivity_probing_enabled_by_default) if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)): details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)): details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval return res
To be called in create_router() BEFORE router is created in DB.
374,141
def person_details(self, person_id, standardize=False): resp = self._request(path.join(ENDPOINTS[], person_id)) if standardize: resp[] = [self.standardize(res) for res in resp[]] return resp
Get a detailed person object :param person_id: String corresponding to the person's id. >>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438')
374,142
def get_start_time(self): if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA: dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION else: dwAccess = win32.PROCESS_QUERY_INFORMATION hProcess = self.get_handle(dwAccess) CreationTime = win32.GetProcessTimes(hProcess)[0] return win32.FileTimeToSystemTime(CreationTime)
Determines when has this process started running. @rtype: win32.SYSTEMTIME @return: Process start time.
374,143
def get(cls, name, raise_exc=True): element = cls.objects.filter(name, exact_match=True).first() if \ name is not None else None if not element and raise_exc: raise ElementNotFound( % (name, cls.__name__)) return element
Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element
374,144
def nth(lst, n): expect_type(n, (String, Number), unit=None) if isinstance(n, String): if n.value.lower() == : i = 0 elif n.value.lower() == : i = -1 else: raise ValueError("Invalid index %r" % (n,)) else: i = n.to_python_index(len(lst), circular=True) return lst[i]
Return the nth item in the list.
374,145
def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False, version_id=None, override_num_retries=None, response_headers=None, callback=None): if cb: if num_cb > 2: cb_count = self.size / self.BufferSize / (num_cb-2) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = total_bytes = 0 cb(total_bytes, self.size) save_debug = self.bucket.connection.debug if self.bucket.connection.debug == 1: self.bucket.connection.debug = 0 query_args = [] if torrent: query_args.append() override_num_retries=override_num_retries, callback=file_got)
Retrieves a file from an S3 Key :type fp: file :param fp: File pointer to put the data into :type headers: string :param: headers to send when retrieving the files :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type torrent: bool :param torrent: Flag for whether to get a torrent for the file :type override_num_retries: int :param override_num_retries: If not None will override configured num_retries parameter for underlying GET. :type response_headers: dict :param response_headers: A dictionary containing HTTP headers/values that will override any headers associated with the stored object in the response. See http://goo.gl/EWOPb for details.
374,146
def act(self, event, *args, **kwargs): if not isinstance(event, LifeCycleEvents): raise ValueError(" must be an instance of LifeCycleEvents class") method_name = "on_" + event.name for plugin in self._plugins: if not hasattr(plugin, method_name): raise NameError(" method is not found in the plugin with name " .format(method_name, plugin.name)) try: getattr(plugin, method_name)(*args, **kwargs) except InvalidResourceException as ex: raise ex except Exception as ex: logging.exception("Plugin raised an exception: %s", plugin.name, ex) raise ex
Act on the specific life cycle event. The action here is to invoke the hook function on all registered plugins. *args and **kwargs will be passed directly to the plugin's hook functions :param samtranslator.plugins.LifeCycleEvents event: Event to act upon :return: Nothing :raises ValueError: If event is not a valid life cycle event :raises NameError: If a plugin does not have the hook method defined :raises Exception: Any exception that a plugin raises
374,147
def estimate_maximum_read_length(fastq_file, quality_format="fastq-sanger", nreads=1000): in_handle = SeqIO.parse(open_fastq(fastq_file), quality_format) lengths = [] for _ in range(nreads): try: lengths.append(len(next(in_handle).seq)) except StopIteration: break in_handle.close() return max(lengths)
estimate average read length of a fastq file
374,148
def set_attribute(self, element, attribute, value): self.browser.execute_script( % ( attribute, self.__type2js(value=value)), element)
:Description: Modify the given attribute of the target element. :param element: Element for browser instance to target. :type element: WebElement :param attribute: Attribute of target element to modify. :type attribute: string :param value: Value of target element's attribute to modify. :type value: None, bool, int, float, string
374,149
def remove_translation(self, context_id, translation_id): return self.context.deleteAddressTranslation(translation_id, id=context_id)
Removes a translation entry from a tunnel context. :param int context_id: The id-value representing the context instance. :param int translation_id: The id-value representing the translation. :return bool: True if translation entry removal was successful.
374,150
def put(self, data, block=True): self.start(test_connection=False) while True: response = self._req_rep(QueuingServerMessageListener.SPACE) if response == QueuingServerMessageListener.SPACE_AVAILABLE: self._req_rep((QueuingServerMessageListener.DATA, data)) break else: time.sleep(0.01)
If there is space it sends data to server If no space in the queue It returns the request in every 10 millisecond until there will be space in the queue.
374,151
def createFile( self, fileName, desiredAccess, shareMode, creationDisposition, flagsAndAttributes, dokanFileInfo, ): return self.operations(, fileName)
Creates a file. :param fileName: name of file to create :type fileName: ctypes.c_wchar_p :param desiredAccess: desired access flags :type desiredAccess: ctypes.c_ulong :param shareMode: share mode flags :type shareMode: ctypes.c_ulong :param creationDisposition: creation disposition flags :type creationDisposition: ctypes.c_ulong :param flagsAndAttributes: creation flags and attributes :type flagsAndAttributes: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
374,152
def column_names(self): if self._is_vertex_frame(): return self.__graph__.__proxy__.get_vertex_fields() elif self._is_edge_frame(): return self.__graph__.__proxy__.get_edge_fields()
Returns the column names. Returns ------- out : list[string] Column names of the SFrame.
374,153
def set_value(self, instance, value, parent=None): self.resolve_base(instance) value = self.deserialize(value, parent) instance.values[self.alias] = value self._trigger_changed(instance, value)
Set prop value :param instance: :param value: :param parent: :return:
374,154
def query_file(self, path, fetchall=False, **params): with self.get_connection() as conn: return conn.query_file(path, fetchall, **params)
Like Database.query, but takes a filename to load a query from.
374,155
def get_saved_rules(conf_file=None): * if _conf() and not conf_file: conf_file = _conf() with salt.utils.files.fopen(conf_file) as fp_: lines = salt.utils.data.decode(fp_.readlines()) rules = [] for line in lines: tmpline = line.strip() if not tmpline: continue if tmpline.startswith(): continue rules.append(line) return rules
Return a data structure of the rules in the conf file CLI Example: .. code-block:: bash salt '*' nftables.get_saved_rules
374,156
def _broadcast_shapes(s1, s2): n1 = len(s1) n2 = len(s2) n = max(n1, n2) res = [1] * n for i in range(n): if i >= n1: c1 = 1 else: c1 = s1[n1-1-i] if i >= n2: c2 = 1 else: c2 = s2[n2-1-i] if c1 == 1: rc = c2 elif c2 == 1 or c1 == c2: rc = c1 else: raise ValueError( % (s1, s2)) res[n-1-i] = rc return tuple(res)
Given array shapes `s1` and `s2`, compute the shape of the array that would result from broadcasting them together.
374,157
def to_type(self, dtype: type, *cols, **kwargs): try: allcols = self.df.columns.values for col in cols: if col not in allcols: self.err("Column " + col + " not found") return self.df[col] = self.df[col].astype(dtype, **kwargs) except Exception as e: self.err(e, "Can not convert to type")
Convert colums values to a given type in the main dataframe :param dtype: a type to convert to: ex: ``str`` :type dtype: type :param \*cols: names of the colums :type \*cols: str, at least one :param \*\*kwargs: keyword arguments for ``df.astype`` :type \*\*kwargs: optional :example: ``ds.to_type(str, "mycol")``
374,158
def get_formfield(model, field): class_field = model._meta.get_field(field) if hasattr(class_field, "field"): formfield = class_field.field.formfield() else: formfield = class_field.formfield() if isinstance(formfield, ChoiceField): formfield.choices = class_field.get_choices() return formfield
Return the formfied associate to the field of the model
374,159
def get_toolbar_buttons(self): buttons = [] if self.stop_button is None: self.stop_button = create_toolbutton( self, text=_("Stop"), icon=self.stop_icon, tip=_("Stop the current command")) self.disable_stop_button() self.stop_button.clicked.connect(self.stop_button_click_handler) if is_dark_interface(): self.stop_button.setStyleSheet("QToolButton{padding: 3px;}") if self.stop_button is not None: buttons.append(self.stop_button) if self.reset_button is None: self.reset_button = create_toolbutton( self, text=_("Remove"), icon=ima.icon(), tip=_("Remove all variables"), triggered=self.reset_namespace) if is_dark_interface(): self.reset_button.setStyleSheet("QToolButton{padding: 3px;}") if self.reset_button is not None: buttons.append(self.reset_button) if self.options_button is None: options = self.get_options_menu() if options: self.options_button = create_toolbutton(self, text=_(), icon=ima.icon()) self.options_button.setPopupMode(QToolButton.InstantPopup) menu = QMenu(self) add_actions(menu, options) self.options_button.setMenu(menu) if self.options_button is not None: buttons.append(self.options_button) return buttons
Return toolbar buttons list.
374,160
def shrink(self, fraction=0.85): poly = self.polydata(True) shrink = vtk.vtkShrinkPolyData() shrink.SetInputData(poly) shrink.SetShrinkFactor(fraction) shrink.Update() return self.updateMesh(shrink.GetOutput())
Shrink the triangle polydata in the representation of the input mesh. Example: .. code-block:: python from vtkplotter import * pot = load(datadir + 'shapes/teapot.vtk').shrink(0.75) s = Sphere(r=0.2).pos(0,0,-0.5) show(pot, s) |shrink| |shrink.py|_
374,161
def add_replica(self, partition_name, count=1): try: partition = self.cluster_topology.partitions[partition_name] except KeyError: raise InvalidPartitionError( "Partition name {name} not found".format(name=partition_name), ) if partition.replication_factor + count > len(self.cluster_topology.brokers): raise InvalidReplicationFactorError( "Cannot increase replication factor to {0}. There are only " "{1} brokers." .format( partition.replication_factor + count, len(self.cluster_topology.brokers), ) ) non_full_rgs = [ rg for rg in self.cluster_topology.rgs.values() if rg.count_replica(partition) < len(rg.brokers) ] for _ in range(count): total_replicas = sum( rg.count_replica(partition) for rg in non_full_rgs ) opt_replicas, _ = compute_optimum( len(non_full_rgs), total_replicas, ) under_replicated_rgs = [ rg for rg in non_full_rgs if rg.count_replica(partition) < opt_replicas ] candidate_rgs = under_replicated_rgs or non_full_rgs rg = min(candidate_rgs, key=lambda rg: len(rg.partitions)) rg.add_replica(partition) if rg.count_replica(partition) >= len(rg.brokers): non_full_rgs.remove(rg)
Increase the replication-factor for a partition. The replication-group to add to is determined as follows: 1. Find all replication-groups that have brokers not already replicating the partition. 2. Of these, find replication-groups that have fewer than the average number of replicas for this partition. 3. Choose the replication-group with the fewest overall partitions. :param partition_name: (topic_id, partition_id) of the partition to add replicas of. :param count: The number of replicas to add. :raises InvalidReplicationFactorError when the resulting replication factor is greater than the number of brokers in the cluster.
374,162
def eval_gpr(expr, knockouts): if isinstance(expr, Expression): return eval_gpr(expr.body, knockouts) elif isinstance(expr, Name): return expr.id not in knockouts elif isinstance(expr, BoolOp): op = expr.op if isinstance(op, Or): return any(eval_gpr(i, knockouts) for i in expr.values) elif isinstance(op, And): return all(eval_gpr(i, knockouts) for i in expr.values) else: raise TypeError("unsupported operation " + op.__class__.__name__) elif expr is None: return True else: raise TypeError("unsupported operation " + repr(expr))
evaluate compiled ast of gene_reaction_rule with knockouts Parameters ---------- expr : Expression The ast of the gene reaction rule knockouts : DictList, set Set of genes that are knocked out Returns ------- bool True if the gene reaction rule is true with the given knockouts otherwise false
374,163
def _is_valid_datatype(datatype_instance): global _simple_type_remap if datatype_instance in _simple_type_remap: return True if isinstance(datatype_instance, (Int64, Double, String, Array)): return True elif isinstance(datatype_instance, Dictionary): kt = datatype_instance.key_type if isinstance(kt, (Int64, String)): return True return False
Returns true if datatype_instance is a valid datatype object and false otherwise.
374,164
def twitch_receive_messages(self): self._push_from_buffer() result = [] while True: try: msg = self.s.recv(4096).decode() except socket.error as e: err = e.args[0] if err == errno.EAGAIN or err == errno.EWOULDBLOCK: return result else: self.connect() return result else: if self.verbose: print(msg) rec = [self._parse_message(line) for line in filter(None, msg.split())] rec = [r for r in rec if r] result.extend(rec)
Call this function to process everything received by the socket This needs to be called frequently enough (~10s) Twitch logs off users not replying to ping commands. :return: list of chat messages received. Each message is a dict with the keys ['channel', 'username', 'message']
374,165
def create_from_tuple(cls, volume): if isinstance(volume, six.string_types): return Volume(target=volume) elif len(volume) == 2: return Volume(source=volume[0], target=volume[1]) elif len(volume) == 3: return Volume(source=volume[0], target=volume[1], mode=volume[2]) else: logger.debug("Cannot create volume instance from {}." "It has to be tuple of form target x source,target x source,target,mode.".format(volume)) raise ConuException("Cannot create volume instance.")
Create instance from tuple. :param volume: tuple in one one of the following forms: target | source,target | source,target,mode :return: instance of Volume
374,166
def roll(self, count=0, func=sum): if count: return [func([die.roll() for die in self._dice]) for x in range(0, count)] else: return func([die.roll() for die in self._dice])
Roll some dice! :param count: [0] Return list of sums :param func: [sum] Apply func to list of individual die rolls func([]) :return: A single sum or list of ``count`` sums
374,167
def get_tuple(nuplet, index, default=None): if nuplet is None: return default try: return nuplet[index] except IndexError: return default
:param tuple nuplet: A tuple :param int index: An index :param default: An optional default value :return: ``nuplet[index]`` if defined, else ``default`` (possibly ``None``)
374,168
def _get_property_values_with_defaults(self, classname, property_values): final_values = self.get_default_property_values(classname) final_values.update(property_values) return final_values
Return the property values for the class, with default values applied where needed.
374,169
def get_objects_from_from_queues(self): _t0 = time.time() had_some_objects = False for module in self.modules_manager.get_external_instances(): queue = module.from_q if not queue: continue while True: queue_size = queue.qsize() if queue_size: statsmgr.gauge( % module.get_name(), queue_size) try: obj = queue.get_nowait() except Full: logger.warning("Module %s from queue is full", module.get_name()) except Empty: break except (IOError, EOFError) as exp: logger.warning("Module %s from queue is no more available: %s", module.get_name(), str(exp)) except Exception as exp: logger.error("An external module queue got a problem ", str(exp)) else: had_some_objects = True self.add(obj) statsmgr.timer(, time.time() - _t0) return had_some_objects
Get objects from "from" queues and add them. :return: True if we got something in the queue, False otherwise. :rtype: bool
374,170
def act(self): g = get_root(self).globals g.clog.debug() self.stopping = False self.stopped_ok = False t = threading.Thread(target=stop_in_background) t.daemon = True t.start() self.after(500, self.check)
Carries out the action associated with Stop button
374,171
def p_propertyDeclaration_3(p): p[0] = CIMProperty(p[2], None, type=p[1], is_array=True, array_size=p[3])
propertyDeclaration_3 : dataType propertyName array ';
374,172
def send_feedback(self, document_id: str, feedback: List[Field]) -> dict: return self.post_document_id(document_id, feedback)
Send feedback to the model. This method takes care of sending feedback related to document specified by document_id. Feedback consists of ground truth values for the document specified as a list of Field instances. >>> from las import ApiClient >>> api_client = ApiClient(endpoint='<api endpoint>') >>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')] >>> api_client.send_feedback('<document id>', feedback) :param document_id: The document id of the document that will receive the feedback :type document_id: str :param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document :type feedback: List[Field] :return: Feedback response :rtype: dict :raises InvalidCredentialsException: If the credentials are invalid :raises TooManyRequestsException: If limit of requests per second is reached :raises LimitExceededException: If limit of total requests per month is reached :raises requests.exception.RequestException: If error was raised by requests
374,173
def estimate(data, fit_offset="mean", fit_profile="tilt", border_px=0, from_mask=None, ret_mask=False): if fit_profile not in VALID_FIT_PROFILES: msg = "`fit_profile` must be one of {}, got ".format( VALID_FIT_PROFILES, fit_profile) raise ValueError(msg) if fit_offset not in VALID_FIT_OFFSETS: msg = "`fit_offset` must be one of {}, got ".format( VALID_FIT_OFFSETS, fit_offset) raise ValueError(msg) if from_mask is not None: assert isinstance(from_mask, np.ndarray) mask = from_mask.copy() else: mask = np.ones_like(data, dtype=bool) if border_px > 0: border_px = int(np.round(border_px)) mask_px = np.zeros_like(mask) mask_px[:border_px, :] = True mask_px[-border_px:, :] = True mask_px[:, :border_px] = True mask_px[:, -border_px:] = True np.logical_and(mask, mask_px, out=mask) if fit_profile == "tilt": bgimg = profile_tilt(data, mask) elif fit_profile == "poly2o": bgimg = profile_poly2o(data, mask) else: bgimg = np.zeros_like(data, dtype=float) if fit_offset == "fit": if fit_profile == "offset": msg = "`fit_offset==` only valid when `fit_profile!='offset`" raise ValueError(msg) elif fit_offset == "gauss": bgimg += offset_gaussian((data - bgimg)[mask]) elif fit_offset == "mean": bgimg += np.mean((data - bgimg)[mask]) elif fit_offset == "mode": bgimg += offset_mode((data - bgimg)[mask]) if ret_mask: ret = (bgimg, mask) else: ret = bgimg return ret
Estimate the background value of an image Parameters ---------- data: np.ndarray Data from which to compute the background value fit_profile: str The type of background profile to fit: - "offset": offset only - "poly2o": 2D 2nd order polynomial with mixed terms - "tilt": 2D linear tilt with offset (default) fit_offset: str The method for computing the profile offset - "fit": offset as fitting parameter - "gauss": center of a gaussian fit - "mean": simple average - "mode": mode (see `qpimage.bg_estimate.mode`) border_px: float Assume that a frame of `border_px` pixels around the image is background. from_mask: boolean np.ndarray or None Use a boolean array to define the background area. The boolean mask must have the same shape as the input data. `True` elements are used for background estimation. ret_mask: bool Return the boolean mask used to compute the background. Notes ----- If both `border_px` and `from_mask` are given, the intersection of the two is used, i.e. the positions where both, the frame mask and `from_mask`, are `True`.
374,174
def password_attributes_max_retry(self, **kwargs): config = ET.Element("config") password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa") max_retry = ET.SubElement(password_attributes, "max-retry") max_retry.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
374,175
def push(self, x): if isinstance(x, Frame): frame = x else: frame = Frame(x) self.stack.append(frame) return frame
Push an I{object} onto the stack. @param x: An object to push. @type x: L{Frame} @return: The pushed frame. @rtype: L{Frame}
374,176
def delete(self, session, commit=True, soft=True): if soft: self.time_removed = sqlalchemy.func.unix_timestamp() else: session.delete(self) if commit: session.commit()
Delete a row from the DB. :param session: flask_sqlalchemy session object :param commit: whether to issue the commit :param soft: whether this is a soft delete (i.e., update time_removed)
374,177
def last_version(): try: last_update, version, success = last_version._cache except AttributeError: last_update = 0 version = None success = False cache_delta = 24 * 3600 if success else 600 if (time.time() - last_update) < cache_delta: return version else: try: req = requests.get(settings.CAS_NEW_VERSION_JSON_URL) data = json.loads(req.text) version = data["info"]["version"] last_version._cache = (time.time(), version, True) return version except ( KeyError, ValueError, requests.exceptions.RequestException ) as error: logger.error( "Unable to fetch %s: %s" % (settings.CAS_NEW_VERSION_JSON_URL, error) ) last_version._cache = (time.time(), version, False)
Fetch the last version from pypi and return it. On successful fetch from pypi, the response is cached 24h, on error, it is cached 10 min. :return: the last django-cas-server version :rtype: unicode
374,178
def load_tab_data(self): for tab in self._tabs.values(): if tab.load and not tab.data_loaded: try: tab._data = tab.get_context_data(self.request) except Exception: tab._data = False exceptions.handle(self.request)
Preload all data that for the tabs that will be displayed.
374,179
def disabled(name): ret = {: name, : True, : {}, : } stat = __salt__[]() if stat: if __opts__[]: ret[] = None ret[] = return ret ret[] = __salt__[]() ret[] = {: True} return ret ret[] = return ret
Disable the RDP service
374,180
def assign_reads_to_database(query, database_fasta, out_path, params=None): if params is None: params = {} params[] = out_path if not in params: raise InvalidArgumentApplicationError("Must specify which algorithm to" " use ( or )") elif params[] not in (, ): raise InvalidArgumentApplicationError("Unknown algorithm Please " "enter either or " "." % params[]) if not in params: params[] = elif params[] == : bwa_aln = BWA_aln(params=params[]) aln_files = {: index_prefix, : query} sai_file_path = bwa_aln(aln_files)[].name bwa = BWA_samse(params=subcommand_params) files = {: index_prefix, : sai_file_path, : query} result = bwa(files) return result[]
Assign a set of query sequences to a reference database database_fasta_fp: absolute file path to the reference database query_fasta_fp: absolute file path to query sequences output_fp: absolute file path of the file to be output params: dict of BWA specific parameters. * Specify which algorithm to use (bwa-short or bwasw) using the dict key "algorithm" * if algorithm is bwasw, specify params for the bwa bwasw subcommand * if algorithm is bwa-short, specify params for the bwa samse subcommand * if algorithm is bwa-short, must also specify params to use with bwa aln, which is used to get the sai file necessary to run samse. bwa aln params should be passed in using dict key "aln_params" and the associated value should be a dict of params for the bwa aln subcommand * if a temporary directory is not specified in params using dict key "temp_dir", it will be assumed to be /tmp This method returns an open file object (SAM format).
374,181
def get_changes(self, checks=None, imports=None, resources=None, task_handle=taskhandle.NullTaskHandle()): if checks is not None: warnings.warn( , DeprecationWarning, stacklevel=2) for name, value in checks.items(): self.args[name] = similarfinder._pydefined_to_str(value) if imports is not None: warnings.warn( , DeprecationWarning, stacklevel=2) self.imports = imports changes = change.ChangeSet( % (self.pattern, self.goal)) if resources is not None: files = [resource for resource in resources if libutils.is_python_file(self.project, resource)] else: files = self.project.get_python_files() job_set = task_handle.create_jobset(, len(files)) for resource in files: job_set.started_job(resource.path) pymodule = self.project.get_pymodule(resource) finder = similarfinder.SimilarFinder(pymodule, wildcards=self.wildcards) matches = list(finder.get_matches(self.pattern, self.args)) computer = self._compute_changes(matches, pymodule) result = computer.get_changed() if result is not None: imported_source = self._add_imports(resource, result, self.imports) changes.add_change(change.ChangeContents(resource, imported_source)) job_set.finished_job() return changes
Get the changes needed by this restructuring `resources` can be a list of `rope.base.resources.File`\s to apply the restructuring on. If `None`, the restructuring will be applied to all python files. `checks` argument has been deprecated. Use the `args` argument of the constructor. The usage of:: strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B', 'obj3.object': 'mod.C'} checks = restructuring.make_checks(strchecks) can be replaced with:: args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B', 'obj3': 'object=mod.C'} where obj1, obj2 and obj3 are wildcard names that appear in restructuring pattern.
374,182
def split_by_idxs(self, train_idx, valid_idx): "Split the data between `train_idx` and `valid_idx`." return self.split_by_list(self[train_idx], self[valid_idx])
Split the data between `train_idx` and `valid_idx`.
374,183
def check_reaction_consistency(database, solver, exchange=set(), checked=set(), zeromass=set(), weights={}): prob = solver.create_problem() compound_set = _non_localized_compounds(database) mass_compounds = compound_set.difference(zeromass) m = prob.namespace(mass_compounds, lower=1) z = prob.namespace(database.reactions, lower=0) r = prob.namespace(database.reactions) objective = z.expr((reaction_id, weights.get(reaction_id, 1)) for reaction_id in database.reactions) prob.set_objective(objective) rs = r.set(database.reactions) zs = z.set(database.reactions) prob.add_linear_constraints(zs >= rs, rs >= -zs) massbalance_lhs = {reaction_id: 0 for reaction_id in database.reactions} for (compound, reaction_id), value in iteritems(database.matrix): if compound not in zeromass: mass_var = m(compound.in_compartment(None)) massbalance_lhs[reaction_id] += mass_var * value for reaction_id, lhs in iteritems(massbalance_lhs): if reaction_id not in exchange: if reaction_id not in checked: prob.add_linear_constraints(lhs + r(reaction_id) == 0) else: prob.add_linear_constraints(lhs == 0) try: prob.solve(lp.ObjectiveSense.Minimize) except lp.SolverError as e: raise_from( MassConsistencyError(.format( e)), e) def iterate_reactions(): for reaction_id in database.reactions: residual = r.value(reaction_id) yield reaction_id, residual def iterate_compounds(): for compound in mass_compounds: yield compound, m.value(compound) return iterate_reactions(), iterate_compounds()
Check inconsistent reactions by minimizing mass residuals Return a reaction iterable, and compound iterable. The reaction iterable yields reaction ids and mass residuals. The compound iterable yields compound ids and mass assignments. Each compound is assigned a mass of at least one, and the masses are balanced using the stoichiometric matrix. In addition, each reaction has a residual mass that is included in the mass balance equations. The L1-norm of the residuals is minimized. Reactions in the checked set are assumed to have been manually checked and therefore have the residual fixed at zero.
374,184
def minkowski_distance(x, y, p=2): from math import pow assert len(y) == len(x) assert len(x) >= 1 sum = 0 for i in range(len(x)): sum += abs(x[i] - y[i]) ** p return pow(sum, 1.0 / float(p))
Calculates the minkowski distance between two points. :param x: the first point :param y: the second point :param p: the order of the minkowski algorithm. If *p=1* it is equal to the manhatten distance, if *p=2* it is equal to the euclidian distance. The higher the order, the closer it converges to the Chebyshev distance, which has *p=infinity*.
374,185
async def connect(self, client_id, conn_string): conn_id = self.adapter.unique_conn_id() self._client_info(client_id) await self.adapter.connect(conn_id, conn_string) self._hook_connect(conn_string, conn_id, client_id)
Connect to a device on behalf of a client. See :meth:`AbstractDeviceAdapter.connect`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter to connect. Raises: DeviceServerError: There is an issue with your client_id. DeviceAdapterError: The adapter had an issue connecting.
374,186
def isNumber(self, value): try: str(value) float(value) return True except ValueError: return False
Validate whether a value is a number or not
374,187
def _step(self, dataset): if dataset is None: values = [self.f_step()] else: values = [self.f_step(*x) for x in dataset] return collections.OrderedDict( zip(self._monitor_names, np.mean(values, axis=0)))
Advance the state of the optimizer by one step. Parameters ---------- dataset : :class:`Dataset <downhill.dataset.Dataset>` A dataset for optimizing the model. Returns ------- train_monitors : dict A dictionary mapping monitor names to values.
374,188
def get_centroids(self, ridx): centroids = [] with h5py.File(self.source_file, "r") as hdf5: for idx in ridx: trace = "{:s}/{:s}".format(self.idx_set["sec"], str(idx)) centroids.append(hdf5[trace + "/Centroids"].value) return numpy.concatenate(centroids)
:returns: array of centroids for the given rupture index
374,189
def extract_params(): uri = _get_uri_from_request(request) http_method = request.method headers = dict(request.headers) if in headers: del headers[] if in headers: del headers[] if request.authorization: headers[] = request.authorization body = request.form.to_dict() return uri, http_method, body, headers
Extract request params.
374,190
def pack(self, value=None): r if isinstance(value, type(self)): return value.pack() if value is None: value = self.value elif in dir(value): value = value.value try: return struct.pack(self._fmt, value) except struct.error: expected_type = type(self).__name__ actual_type = type(value).__name__ msg_args = expected_type, value, actual_type msg = .format(*msg_args) raise PackException(msg)
r"""Pack the value as a binary representation. Considering an example with UBInt8 class, that inherits from GenericType: >>> from pyof.foundation.basic_types import UBInt8 >>> objectA = UBInt8(1) >>> objectB = 5 >>> objectA.pack() b'\x01' >>> objectA.pack(objectB) b'\x05' Args: value: If the value is None, then we will pack the value of the current instance. Otherwise, if value is an instance of the same type as the current instance, then we call the pack of the value object. Otherwise, we will use the current instance pack method on the passed value. Returns: bytes: The binary representation. Raises: :exc:`~.exceptions.BadValueException`: If the value does not fit the binary format.
374,191
def register(self, resource=None, **meta): if resource is None: def wrapper(resource): return self.register(resource, **meta) return wrapper if not issubclass(resource, ResourceView): raise AssertionError("%s not subclass of ResourceView" % resource) if resource._meta.abstract: raise AssertionError("Attempt register of abstract resource: %s." % resource) meta = dict(self.meta, **meta) meta[] = meta.get(, resource._meta.name) options = type(, tuple(), meta) params = dict(api=self, Meta=options, **meta) params[] = % ( self.prefix, self.str_version.replace(, )) params[] = resource.__doc__ new_resource = type( % (resource.__name__, len(self.resources)), (resource,), params) if self.resources.get(new_resource._meta.url_name): logger.warning( "A resource is replacing the existing record for ", new_resource, self.resources.get(new_resource._meta.url_name)) self.resources[new_resource._meta.url_name] = new_resource return resource
Add resource to the API. :param resource: Resource class for registration :param **meta: Redefine Meta options for the resource :return adrest.views.Resource: Generated resource.
374,192
def split (s, delimter, trim = True, limit = 0): ret = [] special1 = [, , , , , ] special2 = ["\\' flags1 = [0, 0, 0] flags2 = [False, False] flags3 = False start = 0 nlim = 0 for i, c in enumerate(s): if c == special3: flags3 = not flags3 elif not flags3: if c in special1: index = special1.index(c) if index % 2 == 0: flags1[int(index/2)] += 1 else: flags1[int(index/2)] -= 1 elif c in special2: index = special2.index(c) flags2[index] = not flags2[index] elif c == delimter and not any(flags1) and not any(flags2): r = s[start:i] if trim: r = r.strip() ret.append(r) start = i + 1 nlim = nlim + 1 if limit and nlim >= limit: break else: flags3 = False r = s[start:] if trim: r = r.strip() ret.append(r) return ret
Split a string using a single-character delimter @params: `s`: the string `delimter`: the single-character delimter `trim`: whether to trim each part. Default: True @examples: ```python ret = split("'a,b',c", ",") # ret == ["'a,b'", "c"] # ',' inside quotes will be recognized. ``` @returns: The list of substrings
374,193
def make_unique_str(num_chars=20): chars = all_chars = chars + chars.upper() + picks = list(all_chars) return .join([choice(picks) for i in range(num_chars)])
make a random string of characters for a temp filename
374,194
def create(self, file_or_path, **kwargs): opened = False if isinstance(file_or_path, str_type()): file_or_path = open(file_or_path, ) opened = True elif not getattr(file_or_path, , False): raise Exception("A file or path to a file is required for this operation.") try: return self.client._post( self._url(), file_or_path, headers=self._resource_class.create_headers({}), file_upload=True ) finally: if opened: file_or_path.close()
Creates an upload for the given file or path.
374,195
def data_worker(**kwargs): if kwargs is not None: if "function" in kwargs: function = kwargs["function"] else: Exception("Invalid arguments, no function specified") if "input" in kwargs: input_queue = kwargs["input"] else: Exception("Invalid Arguments, no input queue") if "output" in kwargs: output_map = kwargs["output"] else: Exception("Invalid Arguments, no output map") if "token" in kwargs: argsdict = {"quandl_token": kwargs["token"]} else: if "Quandl" in function.__module__: Exception("Invalid Arguments, no Quandl token") if ("source" and "begin" and "end") in kwargs: argsdict = {"data_source": kwargs["source"], "begin": kwargs["begin"], "end": kwargs["end"]} else: if "pandas.io.data" in function.__module__: Exception("Invalid Arguments, no pandas data source specified") if ("source" in kwargs) and (("begin" and "end") not in kwargs): argsdict = {"data_source": kwargs["source"]} else: if "pandas.io.data" in function.__module__: Exception("Invalid Arguments, no pandas data source specified") else: Exception("Invalid Arguments") retries = 5 while not input_queue.empty(): data_key = input_queue.get() get_data(function, data_key, output_map, retries, argsdict)
Function to be spawned concurrently, consume data keys from input queue, and push the resulting dataframes to output map
374,196
def add_repo(self, repo): url = self._build_url(, repo, base_url=self._api) return self._boolean(self._put(url), 204, 404)
Add ``repo`` to this team. :param str repo: (required), form: 'user/repo' :returns: bool
374,197
def polfit_residuals_with_sigma_rejection( x, y, deg, times_sigma_reject, color=, size=75, xlim=None, ylim=None, xlabel=None, ylabel=None, title=None, use_r=None, geometry=(0,0,640,480), debugplot=0): if type(x) is not np.ndarray: raise ValueError("x=" + str(x) + " must be a numpy.ndarray") elif x.ndim != 1: raise ValueError("x.ndim=" + str(x.ndim) + " must be 1") if type(y) is not np.ndarray: raise ValueError("y=" + str(y) + " must be a numpy.ndarray") elif y.ndim != 1: raise ValueError("y.ndim=" + str(y.ndim) + " must be 1") npoints = x.size if npoints != y.size: raise ValueError("x.size != y.size") if type(deg) not in [np.int, np.int64]: raise ValueError("deg=" + str(deg) + " is not a valid integer") if deg >= npoints: raise ValueError("Polynomial degree=" + str(deg) + " can--> suspicious point return poly, yres, reject
Polynomial fit with iterative rejection of points. This function makes use of function polfit_residuals for display purposes. Parameters ---------- x : 1d numpy array, float X coordinates of the data being fitted. y : 1d numpy array, float Y coordinates of the data being fitted. deg : int Degree of the fitting polynomial. times_sigma_reject : float or None Number of times the standard deviation to reject points iteratively. If None, the fit does not reject any point. color : single character or 1d numpy array of characters Color for all the symbols (single character) or for each individual symbol (array of color names with the same length as 'x' or 'y'). If 'color' is a single character, the rejected points are displayed in red color, whereas when 'color' is an array of color names, rejected points are displayed with the color provided in this array. size : int Marker size for all the symbols (single character) or for each individual symbol (array of integers with the same length as 'x' or 'y'). xlim : tuple (floats) Plot limits in the X axis. ylim : tuple (floats) Plot limits in the Y axis. xlabel : string Character string for label in X axis. ylabel : string Character string for label in y axis. title : string Character string for graph title. use_r : bool If True, the function computes several fits, using R, to polynomials of degree deg, deg+1 and deg+2 (when possible). geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Return ------ poly : instance of Polynomial (numpy) Result from the polynomial fit using numpy Polynomial. Only points not flagged as rejected are employed in the fit. yres : 1d numpy array, float Residuals from polynomial fit. Note that the residuals are computed for all the points, including the rejected ones. In this way the dimension of this array is the same as the dimensions of the input 'x' and 'y' arrays. reject : 1d numpy array, bool Boolean array indicating rejected points.
374,198
def _get_devices_by_activation_state(self, state): devices_with_state = [] for device in self.devices: act = device.tm.cm.devices.device.load( name=get_device_info(device).name, partition=self.partition ) if act.failoverState == state: devices_with_state.append(device) return devices_with_state
Get a list of bigips by activation statue. :param state: str -- state to filter the returned list of devices :returns: list -- list of devices that are in the given state
374,199
def _compute_mean(self, C, f0, f1, f2, SC, mag, rrup, idxs, mean, scale_fac): mean[idxs] = (C[] + C[] * mag + C[] * (mag ** 2) + (C[] + C[] * mag) * f1[idxs] + (C[] + C[] * mag) * f2[idxs] + (C[] + C[] * mag) * f0[idxs] + C[] * rrup[idxs] + self._compute_stress_drop_adjustment(SC, mag, scale_fac))
Compute mean value (for a set of indexes) without site amplification terms. This is equation (5), p. 2191, without S term.