repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
hmmlearn/hmmlearn
lib/hmmlearn/base.py
https://github.com/hmmlearn/hmmlearn/blob/e86fe4349bce78ad6b3d3eb53e3545902d59abbd/lib/hmmlearn/base.py#L378-L394
def predict_proba(self, X, lengths=None): """Compute the posterior probability for each state in the model. X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample from ``X``. """ _, posteriors = self.score_samples(X, lengths) return posteriors
[ "def", "predict_proba", "(", "self", ",", "X", ",", "lengths", "=", "None", ")", ":", "_", ",", "posteriors", "=", "self", ".", "score_samples", "(", "X", ",", "lengths", ")", "return", "posteriors" ]
Compute the posterior probability for each state in the model. X : array-like, shape (n_samples, n_features) Feature matrix of individual samples. lengths : array-like of integers, shape (n_sequences, ), optional Lengths of the individual sequences in ``X``. The sum of these should be ``n_samples``. Returns ------- posteriors : array, shape (n_samples, n_components) State-membership probabilities for each sample from ``X``.
[ "Compute", "the", "posterior", "probability", "for", "each", "state", "in", "the", "model", "." ]
python
train
thespacedoctor/qubits
qubits/results.py
https://github.com/thespacedoctor/qubits/blob/3c02ace7226389841c6bb838d045c11bed61a3c2/qubits/results.py#L780-L945
def log_the_survey_settings( log, pathToYamlFile): """ *Create a MD log of the survey settings* **Key Arguments:** - ``log`` -- logger - ``pathToYamlFile`` -- yaml results file **Return:** - None """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import yaml ## LOCAL APPLICATION ## from datetime import datetime, date, time now = datetime.now() now = now.strftime("%Y%m%dt%H%M%S") ################ >ACTION(S) ################ # IMPORT THE SIMULATION SETTINGS fileName = pathToYamlFile stream = file(fileName, 'r') yamlContent = yaml.load(stream) snSurveyDiscoveryTimes = yamlContent['Discoveries Relative to Survey Year'] lightCurveDiscoveryTimes = yamlContent[ 'Discoveries Relative to Peak Magnitudes'] snTypes = yamlContent['SN Types'] redshifts = yamlContent['Redshifts'] cadenceDictionary = yamlContent['Cadence Dictionary'] peakAppMagList = yamlContent['Peak Apparent Magnitudes'] snCampaignLengthList = yamlContent['Campaign Length'] programSettings = yamlContent["Program Settings"] limitingMags = yamlContent["Limiting Magnitudes"] # for key in limitingMags: # log.debug('filter: %s, limit: %s' % (key, limitingMags[key])) sampleNumber = yamlContent["Simulation Sample"] peakMagnitudeDistributions = yamlContent[ "SN Absolute Peak-Magnitude Distributions"] #log.debug('snDistributions[magnitude] %s' % (snDistributions["magnitude"],)) #log.debug('snDistributions[sigma] %s' % (snDistributions["sigma"],)) relativeRatesSet = yamlContent["Relative Rate Set to Use"] relativeSNRates = yamlContent["Relative SN Rates"][relativeRatesSet] #log.debug('relativeSNRates %s' % (relativeSNRates,)) lowerRedshiftLimit = yamlContent["Lower Redshift Limit"] upperRedshiftLimit = yamlContent["Upper Redshift Limit"] #log.debug('upperRedshiftLimit %s' % (upperRedshiftLimit,)) redshiftResolution = yamlContent["Redshift Resolution"] extinctionSettings = yamlContent["Extinctions"] extinctionType = extinctionSettings["constant or random"] hostExtinctionDistributions = extinctionSettings["host"] #log.debug('hostExtinctionDistributions %s' % (hostExtinctionDistributions,)) galacticExtinctionDistribution = extinctionSettings["galactic"] #log.debug('galacticExtinctionDistribution %s' % (galacticExtinctionDistribution,)) surveyCadenceSettings = yamlContent["Survey Cadence"] #log.debug('surveyCadenceSettings %s' % (surveyCadenceSettings,)) snLightCurves = yamlContent["Lightcurves"] #log.debug('snlightCurves %s' % (snlightCurves,)) surveyArea = yamlContent["Sky Area of the Survey (square degrees)"] extraSurveyConstraints = yamlContent["Extra Survey Constraints"] weatherLossFraction = surveyCadenceSettings[ "Fraction of Year Lost to Weather etc"] observableFraction = surveyCadenceSettings["Observable Fraction of Year"] extinctionConstant = extinctionSettings["constant E(b-v)"] CCSNRateFraction = yamlContent[ "CCSN Progenitor Population Fraction of IMF"] transientToCCSNRateFraction = yamlContent["Transient to CCSN Ratio"] restFrameFilter = yamlContent["Rest Frame Filter for K-corrections"] peakMagLimit = extraSurveyConstraints["Faint-Limit of Peak Magnitude"] campaignLengthLimit = extraSurveyConstraints[ "Observable for at least ? number of days"] # CALCULATE THE SURVEY VOLUME c = converter(log=log) dists = c.redshift_to_distance( z=float(upperRedshiftLimit), WM=0.3, WV=0.7, H0=70.0 ) dl_mpc = dists["dl_mpc"] sphereOuter = (4. / 3.) * math.pi * dl_mpc**3 if float(lowerRedshiftLimit) == 0.: surveyVolume = sphereOuter else: dists = c.redshift_to_distance( z=float(lowerRedshiftLimit), WM=0.3, WV=0.7, H0=70.0 ) dl_mpc = dists["dl_mpc"] sphereInner = (4. / 3.) * math.pi * dl_mpc**3 surveyVolume = sphereOuter - sphereInner # NOW SCALE TO SKY-AREA surveyVolume = surveyVolume * surveyArea / 41253 surveyVolume = surveyVolume / (1000.)**3 if surveyVolume < 1: surveyVolume = "%(surveyVolume)0.4f" % locals() elif surveyVolume < 100: surveyVolume = "%(surveyVolume)0.2f" % locals() else: surveyVolume = "%(surveyVolume)0.1f" % locals() # CALCULATE OVERALL DETECTION FRACTIONS discoveryFraction, tooFaintFraction, shortCampaignFraction = calculate_fraction_of_sn_discovered( log, surveyCadenceSettings, snSurveyDiscoveryTimes, redshifts, peakAppMagList, snCampaignLengthList, extraSurveyConstraints, lowerRedshiftLimit, upperRedshiftLimit) discoveryFraction = discoveryFraction * 100. if discoveryFraction < 1: discoveryFraction = "%(discoveryFraction)0.4f" % locals() elif discoveryFraction < 10: discoveryFraction = "%(discoveryFraction)0.2f" % locals() else: discoveryFraction = "%(discoveryFraction)0.1f" % locals() # log.info('yamlContent %s' % (yamlContent,)) stream.close() settings_log = """ # SN Survey Simulation Results - %s The *%s*-band liming magnitudes of this simulated survey are: | Filter | Magnitude | |:---|:----| """ % (now, restFrameFilter) for k, v in limitingMags.iteritems(): settings_log += """| %s | %s |\n""" % (k, v,) settings_log += """ A total of **%s** transients where simulated in the survey, within a **redshift-range of %s-%s** and **survey footprint of %s deg<sup>2</sup>** (a **total volume of %s Gpc<sup>-3</sup> yr<sup>-1</sup>**). **%s%% of these simulated transients had solid 'discoveries'**. A constant galactic extinction of `E(B-V) = %s` is used, it's assumed that any given field in the sky is visible for %s of the survey year and the typical fraction of survey time lost to weather of %s is accounted for. Here are the relative rates and peak magnitude distributions of the SN used in the survey: | SN Type | Relative Rate | Peak Magnitude | Sigma Peak | |:---|:---|:---|:---| """ % (sampleNumber, lowerRedshiftLimit, upperRedshiftLimit, surveyArea, surveyVolume, discoveryFraction, extinctionConstant, observableFraction, weatherLossFraction,) for k, v in peakMagnitudeDistributions['magnitude'].iteritems(): settings_log += """| %s | %s | %s | %s |\n""" % ( k, relativeSNRates[k], v, peakMagnitudeDistributions['sigma'][k]) settings_log += """ If a transient is detected by the simulated survey, extra constraints are placed upon the detected object to secure positive identification as the object. 1. The peak apparent magnitude of the object must be brighter than **%s mag** 2. The object must be detectable for long enough to complete a follow-up campaign of longer than **%s days** within 1 survey year with any single filter. The transient rate for the survey volume is estimated by assuming a rate of **%s** times that of the CCSN rate (itself a fraction of **%s** of the total SFR). """ % (peakMagLimit, campaignLengthLimit, transientToCCSNRateFraction, CCSNRateFraction) return settings_log
[ "def", "log_the_survey_settings", "(", "log", ",", "pathToYamlFile", ")", ":", "################ > IMPORTS ################", "## STANDARD LIB ##", "## THIRD PARTY ##", "import", "yaml", "## LOCAL APPLICATION ##", "from", "datetime", "import", "datetime", ",", "date", ",", ...
*Create a MD log of the survey settings* **Key Arguments:** - ``log`` -- logger - ``pathToYamlFile`` -- yaml results file **Return:** - None
[ "*", "Create", "a", "MD", "log", "of", "the", "survey", "settings", "*" ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/gcp_hub_client.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L271-L286
def _MainThreadProc(self): """Entry point for the worker thread.""" registration_required = True while not self._shutdown: if registration_required: service = self._BuildService() registration_required, delay = self._RegisterDebuggee(service) if not registration_required: registration_required, delay = self._ListActiveBreakpoints(service) if self.on_idle is not None: self.on_idle() if not self._shutdown: time.sleep(delay)
[ "def", "_MainThreadProc", "(", "self", ")", ":", "registration_required", "=", "True", "while", "not", "self", ".", "_shutdown", ":", "if", "registration_required", ":", "service", "=", "self", ".", "_BuildService", "(", ")", "registration_required", ",", "delay...
Entry point for the worker thread.
[ "Entry", "point", "for", "the", "worker", "thread", "." ]
python
train
dmlc/gluon-nlp
scripts/bert/create_pretraining_data.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/create_pretraining_data.py#L533-L548
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): """Truncates a pair of sequences to a maximum sequence length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if rng.random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop()
[ "def", "truncate_seq_pair", "(", "tokens_a", ",", "tokens_b", ",", "max_num_tokens", ",", "rng", ")", ":", "while", "True", ":", "total_length", "=", "len", "(", "tokens_a", ")", "+", "len", "(", "tokens_b", ")", "if", "total_length", "<=", "max_num_tokens",...
Truncates a pair of sequences to a maximum sequence length.
[ "Truncates", "a", "pair", "of", "sequences", "to", "a", "maximum", "sequence", "length", "." ]
python
train
eXamadeus/godaddypy
godaddypy/client.py
https://github.com/eXamadeus/godaddypy/blob/67820604ffe233a67ef9f6b3a59ab85b02653e57/godaddypy/client.py#L284-L305
def update_record(self, domain, record, record_type=None, name=None): """Call to GoDaddy API to update a single DNS record :param name: only required if the record is None (deletion) :param record_type: only required if the record is None (deletion) :param domain: the domain where the DNS belongs to (eg. 'example.com') :param record: dict with record info (ex. {'name': 'dynamic', 'ttl': 3600, 'data': '1.1.1.1', 'type': 'A'}) :return: True if no exceptions occurred """ if record_type is None: record_type = record['type'] if name is None: name = record['name'] url = self.API_TEMPLATE + self.RECORDS_TYPE_NAME.format(domain=domain, type=record_type, name=name) self._put(url, json=[record]) self.logger.info( 'Updated record. Domain {} name {} type {}'.format(domain, str(record['name']), str(record['type']))) # If we didn't get any exceptions, return True to let the user know return True
[ "def", "update_record", "(", "self", ",", "domain", ",", "record", ",", "record_type", "=", "None", ",", "name", "=", "None", ")", ":", "if", "record_type", "is", "None", ":", "record_type", "=", "record", "[", "'type'", "]", "if", "name", "is", "None"...
Call to GoDaddy API to update a single DNS record :param name: only required if the record is None (deletion) :param record_type: only required if the record is None (deletion) :param domain: the domain where the DNS belongs to (eg. 'example.com') :param record: dict with record info (ex. {'name': 'dynamic', 'ttl': 3600, 'data': '1.1.1.1', 'type': 'A'}) :return: True if no exceptions occurred
[ "Call", "to", "GoDaddy", "API", "to", "update", "a", "single", "DNS", "record" ]
python
train
vladsaveliev/TargQC
targqc/summarize.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/summarize.py#L193-L223
def _correct_qualimap_insert_size_histogram(work_dir, samples): """ replacing Qualimap insert size histogram with Picard one. """ for s in samples: qualimap1_dirname = dirname(s.qualimap_ins_size_hist_fpath).replace('raw_data_qualimapReport', 'raw_data') qualimap2_dirname = dirname(s.qualimap_ins_size_hist_fpath) if exists(qualimap1_dirname): if not exists(qualimap2_dirname): shutil.move(qualimap1_dirname, qualimap2_dirname) else: shutil.rmtree(qualimap1_dirname) elif not exists(qualimap2_dirname): continue # no data from both Qualimap v.1 and Qualimap v.2 # if qualimap histogram exits and reuse_intermediate, skip if verify_file(s.qualimap_ins_size_hist_fpath, silent=True) and tc.reuse_intermediate: pass else: if verify_file(s.picard_ins_size_hist_txt_fpath): with open(s.picard_ins_size_hist_txt_fpath, 'r') as picard_f: one_line_to_stop = False for line in picard_f: if one_line_to_stop: break if line.startswith('## HISTOGRAM'): one_line_to_stop = True with file_transaction(work_dir, s.qualimap_ins_size_hist_fpath) as tx: with open(tx, 'w') as qualimap_f: for line in picard_f: qualimap_f.write(line)
[ "def", "_correct_qualimap_insert_size_histogram", "(", "work_dir", ",", "samples", ")", ":", "for", "s", "in", "samples", ":", "qualimap1_dirname", "=", "dirname", "(", "s", ".", "qualimap_ins_size_hist_fpath", ")", ".", "replace", "(", "'raw_data_qualimapReport'", ...
replacing Qualimap insert size histogram with Picard one.
[ "replacing", "Qualimap", "insert", "size", "histogram", "with", "Picard", "one", "." ]
python
train
hmmlearn/hmmlearn
lib/hmmlearn/base.py
https://github.com/hmmlearn/hmmlearn/blob/e86fe4349bce78ad6b3d3eb53e3545902d59abbd/lib/hmmlearn/base.py#L606-L631
def _initialize_sufficient_statistics(self): """Initializes sufficient statistics required for M-step. The method is *pure*, meaning that it doesn't change the state of the instance. For extensibility computed statistics are stored in a dictionary. Returns ------- nobs : int Number of samples in the data. start : array, shape (n_components, ) An array where the i-th element corresponds to the posterior probability of the first sample being generated by the i-th state. trans : array, shape (n_components, n_components) An array where the (i, j)-th element corresponds to the posterior probability of transitioning between the i-th to j-th states. """ stats = {'nobs': 0, 'start': np.zeros(self.n_components), 'trans': np.zeros((self.n_components, self.n_components))} return stats
[ "def", "_initialize_sufficient_statistics", "(", "self", ")", ":", "stats", "=", "{", "'nobs'", ":", "0", ",", "'start'", ":", "np", ".", "zeros", "(", "self", ".", "n_components", ")", ",", "'trans'", ":", "np", ".", "zeros", "(", "(", "self", ".", ...
Initializes sufficient statistics required for M-step. The method is *pure*, meaning that it doesn't change the state of the instance. For extensibility computed statistics are stored in a dictionary. Returns ------- nobs : int Number of samples in the data. start : array, shape (n_components, ) An array where the i-th element corresponds to the posterior probability of the first sample being generated by the i-th state. trans : array, shape (n_components, n_components) An array where the (i, j)-th element corresponds to the posterior probability of transitioning between the i-th to j-th states.
[ "Initializes", "sufficient", "statistics", "required", "for", "M", "-", "step", "." ]
python
train
spotify/luigi
luigi/contrib/redshift.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/redshift.py#L489-L495
def post_copy(self, cursor): """ Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. """ logger.info('Executing post copy queries') for query in self.queries: cursor.execute(query)
[ "def", "post_copy", "(", "self", ",", "cursor", ")", ":", "logger", ".", "info", "(", "'Executing post copy queries'", ")", "for", "query", "in", "self", ".", "queries", ":", "cursor", ".", "execute", "(", "query", ")" ]
Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc.
[ "Performs", "post", "-", "copy", "sql", "-", "such", "as", "cleansing", "data", "inserting", "into", "production", "table", "(", "if", "copied", "to", "temp", "table", ")", "etc", "." ]
python
train
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L233-L240
def intern( ns: sym.Symbol, name: sym.Symbol, val, dynamic: bool = False, meta=None ) -> "Var": """Intern the value bound to the symbol `name` in namespace `ns`.""" var_ns = Namespace.get_or_create(ns) var = var_ns.intern(name, Var(var_ns, name, dynamic=dynamic, meta=meta)) var.root = val return var
[ "def", "intern", "(", "ns", ":", "sym", ".", "Symbol", ",", "name", ":", "sym", ".", "Symbol", ",", "val", ",", "dynamic", ":", "bool", "=", "False", ",", "meta", "=", "None", ")", "->", "\"Var\"", ":", "var_ns", "=", "Namespace", ".", "get_or_crea...
Intern the value bound to the symbol `name` in namespace `ns`.
[ "Intern", "the", "value", "bound", "to", "the", "symbol", "name", "in", "namespace", "ns", "." ]
python
test
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L2058-L2077
def register_for_new_assets_by_genus_type(self, asset_genus_type): """Registers for notification of new assets of the given asset genus type. ``AssetReceiver.newAssets()`` is invoked when an asset is appears in this repository. arg: asset_genus_type (osid.type.Type): the genus type of the ``Asset`` to monitor raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceNotificationSession.register_for_changed_resource if not MONGO_LISTENER.receivers[self._ns][self._receiver]['u']: MONGO_LISTENER.receivers[self._ns][self._receiver]['u'] = [] if isinstance(MONGO_LISTENER.receivers[self._ns][self._receiver]['u'], list): MONGO_LISTENER.receivers[self._ns][self._receiver]['u'].append(asset_genus_type.get_identifier())
[ "def", "register_for_new_assets_by_genus_type", "(", "self", ",", "asset_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceNotificationSession.register_for_changed_resource", "if", "not", "MONGO_LISTENER", ".", "receivers", "[", "self", ".", "_ns"...
Registers for notification of new assets of the given asset genus type. ``AssetReceiver.newAssets()`` is invoked when an asset is appears in this repository. arg: asset_genus_type (osid.type.Type): the genus type of the ``Asset`` to monitor raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Registers", "for", "notification", "of", "new", "assets", "of", "the", "given", "asset", "genus", "type", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/cpio_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/cpio_file_system.py#L76-L92
def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): a path specification. Returns: bool: True if the file entry exists. """ location = getattr(path_spec, 'location', None) if location is None or not location.startswith(self.LOCATION_ROOT): return False if len(location) == 1: return True return self._cpio_archive_file.FileEntryExistsByPath(location[1:])
[ "def", "FileEntryExistsByPathSpec", "(", "self", ",", "path_spec", ")", ":", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "if", "location", "is", "None", "or", "not", "location", ".", "startswith", "(", "self", ".", "L...
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): a path specification. Returns: bool: True if the file entry exists.
[ "Determines", "if", "a", "file", "entry", "for", "a", "path", "specification", "exists", "." ]
python
train
AoiKuiyuyou/AoikLiveReload
tools/waf/aoikwafutil.py
https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/tools/waf/aoikwafutil.py#L69-L95
def print_title(title, is_end=False): """ Print title like ``----- {title} -----`` or ``===== {title} =====``. :param title: Title. :param is_end: Whether is end title. End title use ``=`` instead of ``-``. :return: None. """ # If is end title if is_end: # Use `=` sep = '=====' # If is not end title else: # Use `-` sep = '-----' # If is not end title if not is_end: # Print an empty line for visual comfort print_text() # Print the title, e.g. `----- {title} -----` print_text('# {sep} {title} {sep}'.format(title=title, sep=sep))
[ "def", "print_title", "(", "title", ",", "is_end", "=", "False", ")", ":", "# If is end title", "if", "is_end", ":", "# Use `=`", "sep", "=", "'====='", "# If is not end title", "else", ":", "# Use `-`", "sep", "=", "'-----'", "# If is not end title", "if", "not...
Print title like ``----- {title} -----`` or ``===== {title} =====``. :param title: Title. :param is_end: Whether is end title. End title use ``=`` instead of ``-``. :return: None.
[ "Print", "title", "like", "-----", "{", "title", "}", "-----", "or", "=====", "{", "title", "}", "=====", "." ]
python
train
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2107-L2120
def complete_offer(self, offer_id, complete_dict): """ Completes an offer :param complete_dict: the complete dict with the template id :param offer_id: the offer id :return: Response """ return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=COMPLETE, send_data=complete_dict )
[ "def", "complete_offer", "(", "self", ",", "offer_id", ",", "complete_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "OFFERS", ",", "billomat_id", "=", "offer_id", ",", "command", "=", "COMPLETE", ",", "send_data", "=", ...
Completes an offer :param complete_dict: the complete dict with the template id :param offer_id: the offer id :return: Response
[ "Completes", "an", "offer" ]
python
train
Locu/chronology
kronos/kronos/core/validator.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/core/validator.py#L65-L70
def validate_stream(stream): """ Check that the stream name is well-formed. """ if not STREAM_REGEX.match(stream) or len(stream) > MAX_STREAM_LENGTH: raise InvalidStreamName(stream)
[ "def", "validate_stream", "(", "stream", ")", ":", "if", "not", "STREAM_REGEX", ".", "match", "(", "stream", ")", "or", "len", "(", "stream", ")", ">", "MAX_STREAM_LENGTH", ":", "raise", "InvalidStreamName", "(", "stream", ")" ]
Check that the stream name is well-formed.
[ "Check", "that", "the", "stream", "name", "is", "well", "-", "formed", "." ]
python
train
assemblerflow/flowcraft
flowcraft/templates/trimmomatic.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/trimmomatic.py#L265-L283
def merge_default_adapters(): """Merges the default adapters file in the trimmomatic adapters directory Returns ------- str Path with the merged adapters file. """ default_adapters = [os.path.join(ADAPTERS_PATH, x) for x in os.listdir(ADAPTERS_PATH)] filepath = os.path.join(os.getcwd(), "default_adapters.fasta") with open(filepath, "w") as fh, \ fileinput.input(default_adapters) as in_fh: for line in in_fh: fh.write("{}{}".format(line, "\\n")) return filepath
[ "def", "merge_default_adapters", "(", ")", ":", "default_adapters", "=", "[", "os", ".", "path", ".", "join", "(", "ADAPTERS_PATH", ",", "x", ")", "for", "x", "in", "os", ".", "listdir", "(", "ADAPTERS_PATH", ")", "]", "filepath", "=", "os", ".", "path...
Merges the default adapters file in the trimmomatic adapters directory Returns ------- str Path with the merged adapters file.
[ "Merges", "the", "default", "adapters", "file", "in", "the", "trimmomatic", "adapters", "directory" ]
python
test
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/channel/invite.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/channel/invite.py#L152-L166
def get(self, sid): """ Constructs a InviteContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.channel.invite.InviteContext :rtype: twilio.rest.chat.v2.service.channel.invite.InviteContext """ return InviteContext( self._version, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "InviteContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'channel_sid'",...
Constructs a InviteContext :param sid: The unique string that identifies the resource :returns: twilio.rest.chat.v2.service.channel.invite.InviteContext :rtype: twilio.rest.chat.v2.service.channel.invite.InviteContext
[ "Constructs", "a", "InviteContext" ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L2676-L2732
def memory_read(self, addr, num_units, zone=None, nbits=None): """Reads memory from a target system or specific memory zone. The optional ``zone`` specifies a memory zone to access to read from, e.g. ``IDATA``, ``DDATA``, or ``CODE``. The given number of bits, if provided, must be either ``8``, ``16``, or ``32``. If not provided, always reads ``num_units`` bytes. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_units (int): number of units to read zone (str): optional memory zone name to access nbits (int): number of bits to use for each unit Returns: List of units read from the target system. Raises: JLinkException: if memory could not be read. ValueError: if ``nbits`` is not ``None``, and not in ``8``, ``16``, or ``32``. """ buf_size = num_units buf = None access = 0 if nbits is None: buf = (ctypes.c_uint8 * buf_size)() access = 0 elif nbits == 8: buf = (ctypes.c_uint8 * buf_size)() access = 1 elif nbits == 16: buf = (ctypes.c_uint16 * buf_size)() access = 2 buf_size = buf_size * access elif nbits == 32: buf = (ctypes.c_uint32 * buf_size)() access = 4 buf_size = buf_size * access else: raise ValueError('Given bit size is invalid: %s' % nbits) args = [addr, buf_size, buf, access] method = self._dll.JLINKARM_ReadMemEx if zone is not None: method = self._dll.JLINKARM_ReadMemZonedEx args.append(zone.encode()) units_read = method(*args) if units_read < 0: raise errors.JLinkReadException(units_read) return buf[:units_read]
[ "def", "memory_read", "(", "self", ",", "addr", ",", "num_units", ",", "zone", "=", "None", ",", "nbits", "=", "None", ")", ":", "buf_size", "=", "num_units", "buf", "=", "None", "access", "=", "0", "if", "nbits", "is", "None", ":", "buf", "=", "("...
Reads memory from a target system or specific memory zone. The optional ``zone`` specifies a memory zone to access to read from, e.g. ``IDATA``, ``DDATA``, or ``CODE``. The given number of bits, if provided, must be either ``8``, ``16``, or ``32``. If not provided, always reads ``num_units`` bytes. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_units (int): number of units to read zone (str): optional memory zone name to access nbits (int): number of bits to use for each unit Returns: List of units read from the target system. Raises: JLinkException: if memory could not be read. ValueError: if ``nbits`` is not ``None``, and not in ``8``, ``16``, or ``32``.
[ "Reads", "memory", "from", "a", "target", "system", "or", "specific", "memory", "zone", "." ]
python
train
XuShaohua/bcloud
bcloud/DownloadPage.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/DownloadPage.py#L372-L376
def remove_task_db(self, fs_id): '''将任务从数据库中删除''' sql = 'DELETE FROM tasks WHERE fsid=?' self.cursor.execute(sql, [fs_id, ]) self.check_commit()
[ "def", "remove_task_db", "(", "self", ",", "fs_id", ")", ":", "sql", "=", "'DELETE FROM tasks WHERE fsid=?'", "self", ".", "cursor", ".", "execute", "(", "sql", ",", "[", "fs_id", ",", "]", ")", "self", ".", "check_commit", "(", ")" ]
将任务从数据库中删除
[ "将任务从数据库中删除" ]
python
train
mongodb/mongo-python-driver
pymongo/mongo_client.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/mongo_client.py#L1175-L1206
def _select_server(self, server_selector, session, address=None): """Select a server to run an operation on this client. :Parameters: - `server_selector`: The server selector to use if the session is not pinned and no address is given. - `session`: The ClientSession for the next operation, or None. May be pinned to a mongos server address. - `address` (optional): Address when sending a message to a specific server, used for getMore. """ try: topology = self._get_topology() address = address or (session and session._pinned_address) if address: # We're running a getMore or this session is pinned to a mongos. server = topology.select_server_by_address(address) if not server: raise AutoReconnect('server %s:%d no longer available' % address) else: server = topology.select_server(server_selector) # Pin this session to the selected server if it's performing a # sharded transaction. if server.description.mongos and (session and session._in_transaction): session._pin_mongos(server) return server except PyMongoError as exc: if session and exc.has_error_label("TransientTransactionError"): session._unpin_mongos() raise
[ "def", "_select_server", "(", "self", ",", "server_selector", ",", "session", ",", "address", "=", "None", ")", ":", "try", ":", "topology", "=", "self", ".", "_get_topology", "(", ")", "address", "=", "address", "or", "(", "session", "and", "session", "...
Select a server to run an operation on this client. :Parameters: - `server_selector`: The server selector to use if the session is not pinned and no address is given. - `session`: The ClientSession for the next operation, or None. May be pinned to a mongos server address. - `address` (optional): Address when sending a message to a specific server, used for getMore.
[ "Select", "a", "server", "to", "run", "an", "operation", "on", "this", "client", "." ]
python
train
AmesCornish/buttersink
buttersink/btrfs.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/btrfs.py#L24-L30
def bytes2uuid(b): """ Return standard human-friendly UUID. """ if b.strip(chr(0)) == '': return None s = b.encode('hex') return "%s-%s-%s-%s-%s" % (s[0:8], s[8:12], s[12:16], s[16:20], s[20:])
[ "def", "bytes2uuid", "(", "b", ")", ":", "if", "b", ".", "strip", "(", "chr", "(", "0", ")", ")", "==", "''", ":", "return", "None", "s", "=", "b", ".", "encode", "(", "'hex'", ")", "return", "\"%s-%s-%s-%s-%s\"", "%", "(", "s", "[", "0", ":", ...
Return standard human-friendly UUID.
[ "Return", "standard", "human", "-", "friendly", "UUID", "." ]
python
train
bububa/pyTOP
pyTOP/simba.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L414-L425
def update(self, campaign_id, search_channels, nonsearch_channels, outside_discount, nick=None): '''xxxxx.xxxxx.campaign.platform.update =================================== 取得一个推广计划的投放平台设置''' request = TOPRequest('xxxxx.xxxxx.campaign.platform.update') request['campaign_id'] = campaign_id request['search_channels'] = search_channels request['nonsearch_channels'] = nonsearch_channels request['outside_discount'] = outside_discount if nick!=None: request['nick'] = nick self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignPlatform}) return self.result
[ "def", "update", "(", "self", ",", "campaign_id", ",", "search_channels", ",", "nonsearch_channels", ",", "outside_discount", ",", "nick", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.campaign.platform.update'", ")", "request", "[", "'ca...
xxxxx.xxxxx.campaign.platform.update =================================== 取得一个推广计划的投放平台设置
[ "xxxxx", ".", "xxxxx", ".", "campaign", ".", "platform", ".", "update", "===================================", "取得一个推广计划的投放平台设置" ]
python
train
ornlneutronimaging/ImagingReso
ImagingReso/_utilities.py
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L74-L141
def get_list_element_from_database(database='ENDF_VII'): """return a string array of all the element from the database Parameters: ========== database: string. Name of database Raises: ====== ValueError if database can not be found """ _file_path = os.path.abspath(os.path.dirname(__file__)) _ref_data_folder = os.path.join(_file_path, 'reference_data') _database_folder = os.path.join(_ref_data_folder, database) if not os.path.exists(_ref_data_folder): os.makedirs(_ref_data_folder) print("Folder to store database files has been created: '{}'".format(_ref_data_folder)) if not os.path.exists(_database_folder): print("First time using database '{}'? ".format(database)) print("I will retrieve and store a local copy of database'{}': ".format(database)) download_from_github(fname=database + '.zip', path=_ref_data_folder) # if '/_elements_list.csv' NOT exist if not os.path.exists(_database_folder + '/_elements_list.csv'): # glob all .csv files _list_files = glob.glob(_database_folder + '/*.csv') # glob all .h5 files if NO .csv file exist if not _list_files: _list_files = glob.glob(_database_folder + '/*.h5') # test if files globed _empty_list_boo = not _list_files if _empty_list_boo is True: raise ValueError("'{}' does not contain any '*.csv' or '*.h5' file.".format(_database_folder)) # convert path/to/file to filename only _list_short_filename_without_extension = [os.path.splitext(os.path.basename(_file))[0] for _file in _list_files] # isolate element names and output as list if '-' in _list_short_filename_without_extension[0]: _list_element = list(set([_name.split('-')[0] for _name in _list_short_filename_without_extension])) else: _list_letter_part = list( set([re.split(r'(\d+)', _name)[0] for _name in _list_short_filename_without_extension])) _list_element = [] for each_letter_part in _list_letter_part: if len(each_letter_part) <= 2: _list_element.append(each_letter_part) # save to current dir _list_element.sort() df_to_save = pd.DataFrame() df_to_save['elements'] = _list_element df_to_save.to_csv(_database_folder + '/_elements_list.csv') # print("NOT FOUND '{}'".format(_database_folder + '/_elements_list.csv')) # print("SAVED '{}'".format(_database_folder + '/_elements_list.csv')) # '/_elements_list.csv' exist else: df_to_read = pd.read_csv(_database_folder + '/_elements_list.csv') _list_element = list(df_to_read['elements']) # print("FOUND '{}'".format(_database_folder + '/_elements_list.csv')) # print("READ '{}'".format(_database_folder + '/_elements_list.csv')) return _list_element
[ "def", "get_list_element_from_database", "(", "database", "=", "'ENDF_VII'", ")", ":", "_file_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "_ref_data_folder", "=", "os", ".", "path", ".",...
return a string array of all the element from the database Parameters: ========== database: string. Name of database Raises: ====== ValueError if database can not be found
[ "return", "a", "string", "array", "of", "all", "the", "element", "from", "the", "database" ]
python
train
mixmastamyk/console
console/windows.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/windows.py#L160-L185
def get_color(name, stream=STD_OUTPUT_HANDLE): ''' Returns current colors of console. https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo Arguments: name: one of ('background', 'bg', 'foreground', 'fg') stream: Handle to stdout, stderr, etc. Returns: int: a color id from the conhost palette. Ids under 0x8 (8) are dark colors, above light. ''' stream = kernel32.GetStdHandle(stream) csbi = CONSOLE_SCREEN_BUFFER_INFO() kernel32.GetConsoleScreenBufferInfo(stream, byref(csbi)) color_id = csbi.wAttributes & _mask_map.get(name, name) log.debug('color_id from conhost: %d', color_id) if name in ('background', 'bg'): color_id /= 16 # divide by 16 log.debug('color_id divided: %d', color_id) # convert to ansi order color_id = _win_to_ansi_offset_map.get(color_id, color_id) log.debug('ansi color_id: %d', color_id) return color_id
[ "def", "get_color", "(", "name", ",", "stream", "=", "STD_OUTPUT_HANDLE", ")", ":", "stream", "=", "kernel32", ".", "GetStdHandle", "(", "stream", ")", "csbi", "=", "CONSOLE_SCREEN_BUFFER_INFO", "(", ")", "kernel32", ".", "GetConsoleScreenBufferInfo", "(", "stre...
Returns current colors of console. https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo Arguments: name: one of ('background', 'bg', 'foreground', 'fg') stream: Handle to stdout, stderr, etc. Returns: int: a color id from the conhost palette. Ids under 0x8 (8) are dark colors, above light.
[ "Returns", "current", "colors", "of", "console", "." ]
python
train
LonamiWebs/Telethon
telethon/network/mtprotostate.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/network/mtprotostate.py#L170-L189
def update_time_offset(self, correct_msg_id): """ Updates the time offset to the correct one given a known valid message ID. """ bad = self._get_new_msg_id() old = self.time_offset now = int(time.time()) correct = correct_msg_id >> 32 self.time_offset = correct - now if self.time_offset != old: self._last_msg_id = 0 self._log.debug( 'Updated time offset (old offset %d, bad %d, good %d, new %d)', old, bad, correct_msg_id, self.time_offset ) return self.time_offset
[ "def", "update_time_offset", "(", "self", ",", "correct_msg_id", ")", ":", "bad", "=", "self", ".", "_get_new_msg_id", "(", ")", "old", "=", "self", ".", "time_offset", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "correct", "=", "correc...
Updates the time offset to the correct one given a known valid message ID.
[ "Updates", "the", "time", "offset", "to", "the", "correct", "one", "given", "a", "known", "valid", "message", "ID", "." ]
python
train
pip-services3-python/pip-services3-components-python
pip_services3_components/count/CachedCounters.py
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/count/CachedCounters.py#L243-L253
def timestamp(self, name, value): """ Records the given timestamp. :param name: a counter name of Timestamp type. :param value: a timestamp to record. """ counter = self.get(name, CounterType.Timestamp) counter.time = value if value != None else datetime.datetime.utcnow() self._update()
[ "def", "timestamp", "(", "self", ",", "name", ",", "value", ")", ":", "counter", "=", "self", ".", "get", "(", "name", ",", "CounterType", ".", "Timestamp", ")", "counter", ".", "time", "=", "value", "if", "value", "!=", "None", "else", "datetime", "...
Records the given timestamp. :param name: a counter name of Timestamp type. :param value: a timestamp to record.
[ "Records", "the", "given", "timestamp", "." ]
python
train
django-parler/django-parler
parler/templatetags/parler_tags.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/templatetags/parler_tags.py#L67-L164
def get_translated_url(context, lang_code, object=None): """ Get the proper URL for this page in a different language. Note that this algorithm performs a "best effect" approach to give a proper URL. To make sure the proper view URL is returned, add the :class:`~parler.views.ViewUrlMixin` to your view. Example, to build a language menu:: <ul> {% for lang_code, title in LANGUAGES %} {% get_language_info for lang_code as lang %} {% get_translated_url lang_code as tr_url %} {% if tr_url %}<li{% if lang_code == LANGUAGE_CODE %} class="is-selected"{% endif %}><a href="{{ tr_url }}" hreflang="{{ lang_code }}">{{ lang.name_local|capfirst }}</a></li>{% endif %} {% endfor %} </ul> Or to inform search engines about the translated pages:: {% for lang_code, title in LANGUAGES %} {% get_translated_url lang_code as tr_url %} {% if tr_url %}<link rel="alternate" hreflang="{{ lang_code }}" href="{{ tr_url }}" />{% endif %} {% endfor %} Note that using this tag is not thread-safe if the object is shared between threads. It temporary changes the current language of the view object. The query string of the current page is preserved in the translated URL. When the ``object`` variable is explicitly provided however, the query string will not be added. In such situation, *django-parler* assumes that the object may point to a completely different page, hence to query string is added. """ view = context.get('view', None) request = context['request'] if object is not None: # Cannot reliable determine whether the current page is being translated, # or the template code provides a custom object to translate. # Hence, not passing the querystring of the current page qs = '' else: # Try a few common object variables, the SingleObjectMixin object, # The Django CMS "current_page" variable, or the "page" from django-fluent-pages and Mezzanine. # This makes this tag work with most CMSes out of the box. object = context.get('object', None) \ or context.get('current_page', None) \ or context.get('page', None) # Assuming current page, preserve query string filters. qs = request.META.get('QUERY_STRING', '') try: if view is not None: # Allow a view to specify what the URL should be. # This handles situations where the slug might be translated, # and gives you complete control over the results of this template tag. get_view_url = getattr(view, 'get_view_url', None) if get_view_url: with smart_override(lang_code): return _url_qs(view.get_view_url(), qs) # Now, the "best effort" part starts. # See if it's a DetailView that exposes the object. if object is None: object = getattr(view, 'object', None) if object is not None and hasattr(object, 'get_absolute_url'): # There is an object, get the URL in the different language. # NOTE: this *assumes* that there is a detail view, not some edit view. # In such case, a language menu would redirect a user from the edit page # to a detail page; which is still way better a 404 or homepage. if isinstance(object, TranslatableModel): # Need to handle object URL translations. # Just using smart_override() should be enough, as a translated object # should use `switch_language(self)` internally before returning an URL. # However, it doesn't hurt to help a bit here. with switch_language(object, lang_code): return _url_qs(object.get_absolute_url(), qs) else: # Always switch the language before resolving, so i18n_patterns() are supported. with smart_override(lang_code): return _url_qs(object.get_absolute_url(), qs) except TranslationDoesNotExist: # Typically projects have a fallback language, so even unknown languages will return something. # This either means fallbacks are disabled, or the fallback language is not found! return '' # Just reverse the current URL again in a new language, and see where we end up. # This doesn't handle translated slugs, but will resolve to the proper view name. resolver_match = request.resolver_match if resolver_match is None: # Can't resolve the page itself, the page is apparently a 404. # This can also happen for the homepage in an i18n_patterns situation. return '' with smart_override(lang_code): clean_kwargs = _cleanup_urlpattern_kwargs(resolver_match.kwargs) return _url_qs(reverse(resolver_match.view_name, args=resolver_match.args, kwargs=clean_kwargs, current_app=resolver_match.app_name), qs)
[ "def", "get_translated_url", "(", "context", ",", "lang_code", ",", "object", "=", "None", ")", ":", "view", "=", "context", ".", "get", "(", "'view'", ",", "None", ")", "request", "=", "context", "[", "'request'", "]", "if", "object", "is", "not", "No...
Get the proper URL for this page in a different language. Note that this algorithm performs a "best effect" approach to give a proper URL. To make sure the proper view URL is returned, add the :class:`~parler.views.ViewUrlMixin` to your view. Example, to build a language menu:: <ul> {% for lang_code, title in LANGUAGES %} {% get_language_info for lang_code as lang %} {% get_translated_url lang_code as tr_url %} {% if tr_url %}<li{% if lang_code == LANGUAGE_CODE %} class="is-selected"{% endif %}><a href="{{ tr_url }}" hreflang="{{ lang_code }}">{{ lang.name_local|capfirst }}</a></li>{% endif %} {% endfor %} </ul> Or to inform search engines about the translated pages:: {% for lang_code, title in LANGUAGES %} {% get_translated_url lang_code as tr_url %} {% if tr_url %}<link rel="alternate" hreflang="{{ lang_code }}" href="{{ tr_url }}" />{% endif %} {% endfor %} Note that using this tag is not thread-safe if the object is shared between threads. It temporary changes the current language of the view object. The query string of the current page is preserved in the translated URL. When the ``object`` variable is explicitly provided however, the query string will not be added. In such situation, *django-parler* assumes that the object may point to a completely different page, hence to query string is added.
[ "Get", "the", "proper", "URL", "for", "this", "page", "in", "a", "different", "language", "." ]
python
train
cisco-sas/kitty
kitty/remote/rpc.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/remote/rpc.py#L55-L66
def decode_string(data, encoding='hex'): ''' Decode string :param data: string to decode :param encoding: encoding to use (default: 'hex') :return: decoded string ''' if six.PY2: return data.decode(encoding) else: return codecs.decode(data.encode('ascii'), encoding)
[ "def", "decode_string", "(", "data", ",", "encoding", "=", "'hex'", ")", ":", "if", "six", ".", "PY2", ":", "return", "data", ".", "decode", "(", "encoding", ")", "else", ":", "return", "codecs", ".", "decode", "(", "data", ".", "encode", "(", "'asci...
Decode string :param data: string to decode :param encoding: encoding to use (default: 'hex') :return: decoded string
[ "Decode", "string" ]
python
train
gitpython-developers/smmap
smmap/util.py
https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L78-L83
def align(self): """Assures the previous window area is contained in the new one""" nofs = align_to_mmap(self.ofs, 0) self.size += self.ofs - nofs # keep size constant self.ofs = nofs self.size = align_to_mmap(self.size, 1)
[ "def", "align", "(", "self", ")", ":", "nofs", "=", "align_to_mmap", "(", "self", ".", "ofs", ",", "0", ")", "self", ".", "size", "+=", "self", ".", "ofs", "-", "nofs", "# keep size constant", "self", ".", "ofs", "=", "nofs", "self", ".", "size", "...
Assures the previous window area is contained in the new one
[ "Assures", "the", "previous", "window", "area", "is", "contained", "in", "the", "new", "one" ]
python
train
pgmpy/pgmpy
pgmpy/sampling/HMC.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/sampling/HMC.py#L88-L102
def _acceptance_prob(self, position, position_bar, momentum, momentum_bar): """ Returns the acceptance probability for given new position(position) and momentum """ # Parameters to help in evaluating Joint distribution P(position, momentum) _, logp = self.grad_log_pdf(position, self.model).get_gradient_log_pdf() _, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() # acceptance_prob = P(position_bar, momentum_bar)/ P(position, momentum) potential_change = logp_bar - logp # Negative change kinetic_change = 0.5 * np.float(np.dot(momentum_bar.T, momentum_bar) - np.dot(momentum.T, momentum)) # acceptance probability return np.exp(potential_change - kinetic_change)
[ "def", "_acceptance_prob", "(", "self", ",", "position", ",", "position_bar", ",", "momentum", ",", "momentum_bar", ")", ":", "# Parameters to help in evaluating Joint distribution P(position, momentum)", "_", ",", "logp", "=", "self", ".", "grad_log_pdf", "(", "positio...
Returns the acceptance probability for given new position(position) and momentum
[ "Returns", "the", "acceptance", "probability", "for", "given", "new", "position", "(", "position", ")", "and", "momentum" ]
python
train
bfontaine/p7magma
magma/session.py
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/session.py#L85-L93
def get_results_soup(self, year=None): """ ``get_soup`` on the results page. The page URL depends on the year. """ if year is None: year = self.year year = YEARS.get(year, year) return self.get_soup(URLS['results'][year])
[ "def", "get_results_soup", "(", "self", ",", "year", "=", "None", ")", ":", "if", "year", "is", "None", ":", "year", "=", "self", ".", "year", "year", "=", "YEARS", ".", "get", "(", "year", ",", "year", ")", "return", "self", ".", "get_soup", "(", ...
``get_soup`` on the results page. The page URL depends on the year.
[ "get_soup", "on", "the", "results", "page", ".", "The", "page", "URL", "depends", "on", "the", "year", "." ]
python
train
mozilla/treeherder
treeherder/seta/high_value_jobs.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/high_value_jobs.py#L34-L64
def build_removals(active_jobs, failures, target): """ active_jobs - all possible desktop & android jobs on Treeherder (no PGO) failures - list of all failures target - percentage of failures we're going to process Return list of jobs to remove and list of revisions that are regressed """ # Determine the number of failures we're going to process # A failure is a revision + all of the jobs that were fixed by it number_of_failures = int((target / 100) * len(failures)) low_value_jobs = [] for jobtype in active_jobs: # Determine if removing an active job will reduce the number of failures we would catch # or stay the same remaining_failures = check_removal(failures, [jobtype]) if len(remaining_failures) >= number_of_failures: low_value_jobs.append(jobtype) failures = remaining_failures else: failed_revisions = [] for revision in failures: if revision not in remaining_failures: failed_revisions.append(revision) logger.info("jobtype: %s is the root failure(s) of these %s revisions", jobtype, failed_revisions) return low_value_jobs
[ "def", "build_removals", "(", "active_jobs", ",", "failures", ",", "target", ")", ":", "# Determine the number of failures we're going to process", "# A failure is a revision + all of the jobs that were fixed by it", "number_of_failures", "=", "int", "(", "(", "target", "/", "1...
active_jobs - all possible desktop & android jobs on Treeherder (no PGO) failures - list of all failures target - percentage of failures we're going to process Return list of jobs to remove and list of revisions that are regressed
[ "active_jobs", "-", "all", "possible", "desktop", "&", "android", "jobs", "on", "Treeherder", "(", "no", "PGO", ")", "failures", "-", "list", "of", "all", "failures", "target", "-", "percentage", "of", "failures", "we", "re", "going", "to", "process" ]
python
train
openstack/swauth
swauth/middleware.py
https://github.com/openstack/swauth/blob/0c8eaf50a9e2b3317f3eba62f205546904bc6d74/swauth/middleware.py#L525-L577
def handle_request(self, req): """Entry point for auth requests (ones that match the self.auth_prefix). Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ req.start_time = time() handler = None try: version, account, user, _junk = split_path(req.path_info, minsegs=0, maxsegs=4, rest_with_last=True) except ValueError: return HTTPNotFound(request=req) if version in ('v1', 'v1.0', 'auth'): if req.method == 'GET': handler = self.handle_get_token elif version == 'v2': if not self.super_admin_key: return HTTPNotFound(request=req) req.path_info_pop() if req.method == 'GET': if not account and not user: handler = self.handle_get_reseller elif account: if not user: handler = self.handle_get_account elif account == '.token': req.path_info_pop() handler = self.handle_validate_token else: handler = self.handle_get_user elif req.method == 'PUT': if not user: handler = self.handle_put_account else: handler = self.handle_put_user elif req.method == 'DELETE': if not user: handler = self.handle_delete_account else: handler = self.handle_delete_user elif req.method == 'POST': if account == '.prep': handler = self.handle_prep elif user == '.services': handler = self.handle_set_services else: handler = self.handle_webadmin if not handler: req.response = HTTPBadRequest(request=req) else: req.response = handler(req) return req.response
[ "def", "handle_request", "(", "self", ",", "req", ")", ":", "req", ".", "start_time", "=", "time", "(", ")", "handler", "=", "None", "try", ":", "version", ",", "account", ",", "user", ",", "_junk", "=", "split_path", "(", "req", ".", "path_info", ",...
Entry point for auth requests (ones that match the self.auth_prefix). Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object
[ "Entry", "point", "for", "auth", "requests", "(", "ones", "that", "match", "the", "self", ".", "auth_prefix", ")", ".", "Should", "return", "a", "WSGI", "-", "style", "callable", "(", "such", "as", "swob", ".", "Response", ")", "." ]
python
train
splunk/splunk-sdk-python
splunklib/client.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/client.py#L2940-L2984
def export(self, query, **params): """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to this job's events as an XML document from the server. To parse this stream into usable Python objects, pass the handle to :class:`splunklib.results.ResultsReader`:: import splunklib.client as client import splunklib.results as results service = client.connect(...) rr = results.ResultsReader(service.jobs.export("search * | head 5")) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result assert rr.is_preview == False Running an export search is more efficient as it streams the results directly to you, rather than having to write them out to disk and make them available later. As soon as results are ready, you will receive them. The ``export`` method makes a single roundtrip to the server (as opposed to two for :meth:`create` followed by :meth:`preview`), plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. :raises `ValueError`: Raised for invalid queries. :param query: The search query. :type query: ``string`` :param params: Additional arguments (optional). For a list of valid parameters, see `GET search/jobs/export <http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_ in the REST API documentation. :type params: ``dict`` :return: The ``InputStream`` IO handle to raw XML returned from the server. """ if "exec_mode" in params: raise TypeError("Cannot specify an exec_mode to export.") params['segmentation'] = params.get('segmentation', 'none') return self.post(path_segment="export", search=query, **params).body
[ "def", "export", "(", "self", ",", "query", ",", "*", "*", "params", ")", ":", "if", "\"exec_mode\"", "in", "params", ":", "raise", "TypeError", "(", "\"Cannot specify an exec_mode to export.\"", ")", "params", "[", "'segmentation'", "]", "=", "params", ".", ...
Runs a search and immediately starts streaming preview events. This method returns a streaming handle to this job's events as an XML document from the server. To parse this stream into usable Python objects, pass the handle to :class:`splunklib.results.ResultsReader`:: import splunklib.client as client import splunklib.results as results service = client.connect(...) rr = results.ResultsReader(service.jobs.export("search * | head 5")) for result in rr: if isinstance(result, results.Message): # Diagnostic messages may be returned in the results print '%s: %s' % (result.type, result.message) elif isinstance(result, dict): # Normal events are returned as dicts print result assert rr.is_preview == False Running an export search is more efficient as it streams the results directly to you, rather than having to write them out to disk and make them available later. As soon as results are ready, you will receive them. The ``export`` method makes a single roundtrip to the server (as opposed to two for :meth:`create` followed by :meth:`preview`), plus at most two more if the ``autologin`` field of :func:`connect` is set to ``True``. :raises `ValueError`: Raised for invalid queries. :param query: The search query. :type query: ``string`` :param params: Additional arguments (optional). For a list of valid parameters, see `GET search/jobs/export <http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_ in the REST API documentation. :type params: ``dict`` :return: The ``InputStream`` IO handle to raw XML returned from the server.
[ "Runs", "a", "search", "and", "immediately", "starts", "streaming", "preview", "events", ".", "This", "method", "returns", "a", "streaming", "handle", "to", "this", "job", "s", "events", "as", "an", "XML", "document", "from", "the", "server", ".", "To", "p...
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/process.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/process.py#L2430-L2467
def peek_pointers_in_data(self, data, peekSize = 16, peekStep = 1): """ Tries to guess which values in the given data are valid pointers, and reads some data from them. @see: L{peek} @type data: str @param data: Binary data to find pointers in. @type peekSize: int @param peekSize: Number of bytes to read from each pointer found. @type peekStep: int @param peekStep: Expected data alignment. Tipically you specify 1 when data alignment is unknown, or 4 when you expect data to be DWORD aligned. Any other value may be specified. @rtype: dict( str S{->} str ) @return: Dictionary mapping stack offsets to the data they point to. """ result = dict() ptrSize = win32.sizeof(win32.LPVOID) if ptrSize == 4: ptrFmt = '<L' else: ptrFmt = '<Q' if len(data) > 0: for i in compat.xrange(0, len(data), peekStep): packed = data[i:i+ptrSize] if len(packed) == ptrSize: address = struct.unpack(ptrFmt, packed)[0] ## if not address & (~0xFFFF): continue peek_data = self.peek(address, peekSize) if peek_data: result[i] = peek_data return result
[ "def", "peek_pointers_in_data", "(", "self", ",", "data", ",", "peekSize", "=", "16", ",", "peekStep", "=", "1", ")", ":", "result", "=", "dict", "(", ")", "ptrSize", "=", "win32", ".", "sizeof", "(", "win32", ".", "LPVOID", ")", "if", "ptrSize", "==...
Tries to guess which values in the given data are valid pointers, and reads some data from them. @see: L{peek} @type data: str @param data: Binary data to find pointers in. @type peekSize: int @param peekSize: Number of bytes to read from each pointer found. @type peekStep: int @param peekStep: Expected data alignment. Tipically you specify 1 when data alignment is unknown, or 4 when you expect data to be DWORD aligned. Any other value may be specified. @rtype: dict( str S{->} str ) @return: Dictionary mapping stack offsets to the data they point to.
[ "Tries", "to", "guess", "which", "values", "in", "the", "given", "data", "are", "valid", "pointers", "and", "reads", "some", "data", "from", "them", "." ]
python
train
yyuu/botornado
boto/route53/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/route53/connection.py#L297-L320
def get_change(self, change_id): """ Get information about a proposed set of changes, as submitted by the change_rrsets method. Returns a Python data structure with status information about the changes. :type change_id: str :param change_id: The unique identifier for the set of changes. This ID is returned in the response to the change_rrsets method. """ uri = '/%s/change/%s' % (self.Version, change_id) response = self.make_request('GET', uri) body = response.read() boto.log.debug(body) if response.status >= 300: raise exception.DNSServerError(response.status, response.reason, body) e = boto.jsonresponse.Element() h = boto.jsonresponse.XmlHandler(e, None) h.parse(body) return e
[ "def", "get_change", "(", "self", ",", "change_id", ")", ":", "uri", "=", "'/%s/change/%s'", "%", "(", "self", ".", "Version", ",", "change_id", ")", "response", "=", "self", ".", "make_request", "(", "'GET'", ",", "uri", ")", "body", "=", "response", ...
Get information about a proposed set of changes, as submitted by the change_rrsets method. Returns a Python data structure with status information about the changes. :type change_id: str :param change_id: The unique identifier for the set of changes. This ID is returned in the response to the change_rrsets method.
[ "Get", "information", "about", "a", "proposed", "set", "of", "changes", "as", "submitted", "by", "the", "change_rrsets", "method", ".", "Returns", "a", "Python", "data", "structure", "with", "status", "information", "about", "the", "changes", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/wrappers.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/wrappers.py#L68-L103
def _execute(self, workdir, with_mpirun=False, exec_args=None): """ Execute the executable in a subprocess inside workdir. Some executables fail if we try to launch them with mpirun. Use with_mpirun=False to run the binary without it. """ qadapter = self.manager.qadapter if not with_mpirun: qadapter.name = None if self.verbose: print("Working in:", workdir) script = qadapter.get_script_str( job_name=self.name, launch_dir=workdir, executable=self.executable, qout_path="qout_file.path", qerr_path="qerr_file.path", stdin=self.stdin_fname, stdout=self.stdout_fname, stderr=self.stderr_fname, exec_args=exec_args ) # Write the script. script_file = os.path.join(workdir, "run" + self.name + ".sh") with open(script_file, "w") as fh: fh.write(script) os.chmod(script_file, 0o740) qjob, process = qadapter.submit_to_queue(script_file) self.stdout_data, self.stderr_data = process.communicate() self.returncode = process.returncode #raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str)) return self.returncode
[ "def", "_execute", "(", "self", ",", "workdir", ",", "with_mpirun", "=", "False", ",", "exec_args", "=", "None", ")", ":", "qadapter", "=", "self", ".", "manager", ".", "qadapter", "if", "not", "with_mpirun", ":", "qadapter", ".", "name", "=", "None", ...
Execute the executable in a subprocess inside workdir. Some executables fail if we try to launch them with mpirun. Use with_mpirun=False to run the binary without it.
[ "Execute", "the", "executable", "in", "a", "subprocess", "inside", "workdir", "." ]
python
train
DataDog/integrations-core
sqlserver/datadog_checks/sqlserver/sqlserver.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L766-L818
def fetch_metric(self, cursor, results, tags): ''' Because we need to query the metrics by matching pairs, we can't query all of them together without having to perform some matching based on the name afterwards so instead we query instance by instance. We cache the list of instance so that we don't have to look it up every time ''' if self.sql_name not in results: self.log.warning("Couldn't find {} in results".format(self.sql_name)) return tags = tags + self.tags results_list = results[self.sql_name] done_instances = [] for ndx, row in enumerate(results_list): ctype = row[0] cval = row[1] inst = row[2] object_name = row[3] if inst in done_instances: continue if (self.instance != ALL_INSTANCES and inst != self.instance) or ( self.object_name and object_name != self.object_name ): done_instances.append(inst) continue # find the next row which has the same instance cval2 = None ctype2 = None for second_row in results_list[: ndx + 1]: if inst == second_row[2]: cval2 = second_row[1] ctype2 = second_row[0] if cval2 is None: self.log.warning("Couldn't find second value for {}".format(self.sql_name)) continue done_instances.append(inst) if ctype < ctype2: value = cval base = cval2 else: value = cval2 base = cval metric_tags = list(tags) if self.instance == ALL_INSTANCES: metric_tags.append('{}:{}'.format(self.tag_by, inst.strip())) self.report_fraction(value, base, metric_tags)
[ "def", "fetch_metric", "(", "self", ",", "cursor", ",", "results", ",", "tags", ")", ":", "if", "self", ".", "sql_name", "not", "in", "results", ":", "self", ".", "log", ".", "warning", "(", "\"Couldn't find {} in results\"", ".", "format", "(", "self", ...
Because we need to query the metrics by matching pairs, we can't query all of them together without having to perform some matching based on the name afterwards so instead we query instance by instance. We cache the list of instance so that we don't have to look it up every time
[ "Because", "we", "need", "to", "query", "the", "metrics", "by", "matching", "pairs", "we", "can", "t", "query", "all", "of", "them", "together", "without", "having", "to", "perform", "some", "matching", "based", "on", "the", "name", "afterwards", "so", "in...
python
train
rycus86/ghost-client
ghost_client/api.py
https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L346-L389
def execute_get(self, resource, **kwargs): """ Execute an HTTP GET request against the API endpoints. This method is meant for internal use. :param resource: The last part of the URI :param kwargs: Additional query parameters (and optionally headers) :return: The HTTP response as JSON or `GhostException` if unsuccessful """ url = '%s/%s' % (self.base_url, resource) headers = kwargs.pop('headers', dict()) headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' if kwargs: separator = '&' if '?' in url else '?' for key, value in kwargs.items(): if hasattr(value, '__iter__') and type(value) not in six.string_types: url = '%s%s%s=%s' % (url, separator, key, ','.join(value)) else: url = '%s%s%s=%s' % (url, separator, key, value) separator = '&' if self._access_token: headers['Authorization'] = 'Bearer %s' % self._access_token else: separator = '&' if '?' in url else '?' url = '%s%sclient_id=%s&client_secret=%s' % ( url, separator, self._client_id, self._client_secret ) response = requests.get(url, headers=headers) if response.status_code // 100 != 2: raise GhostException(response.status_code, response.json().get('errors', [])) return response.json()
[ "def", "execute_get", "(", "self", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'%s/%s'", "%", "(", "self", ".", "base_url", ",", "resource", ")", "headers", "=", "kwargs", ".", "pop", "(", "'headers'", ",", "dict", "(", ")", ")...
Execute an HTTP GET request against the API endpoints. This method is meant for internal use. :param resource: The last part of the URI :param kwargs: Additional query parameters (and optionally headers) :return: The HTTP response as JSON or `GhostException` if unsuccessful
[ "Execute", "an", "HTTP", "GET", "request", "against", "the", "API", "endpoints", ".", "This", "method", "is", "meant", "for", "internal", "use", "." ]
python
train
todbot/blink1-python
blink1/kelvin.py
https://github.com/todbot/blink1-python/blob/7a5183becd9662f88da3c29afd3447403f4ef82f/blink1/kelvin.py#L24-L35
def correct_output(luminosity): """ :param luminosity: Input luminosity :return: Luminosity limited to the 0 <= l <= 255 range. """ if luminosity < 0: val = 0 elif luminosity > 255: val = 255 else: val = luminosity return round(val)
[ "def", "correct_output", "(", "luminosity", ")", ":", "if", "luminosity", "<", "0", ":", "val", "=", "0", "elif", "luminosity", ">", "255", ":", "val", "=", "255", "else", ":", "val", "=", "luminosity", "return", "round", "(", "val", ")" ]
:param luminosity: Input luminosity :return: Luminosity limited to the 0 <= l <= 255 range.
[ ":", "param", "luminosity", ":", "Input", "luminosity", ":", "return", ":", "Luminosity", "limited", "to", "the", "0", "<", "=", "l", "<", "=", "255", "range", "." ]
python
train
markchil/gptools
gptools/kernel/core.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/core.py#L709-L732
def _compute_dk_dtau_on_partition(self, tau, p): """Evaluate the term inside the sum of Faa di Bruno's formula for the given partition. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. p : list of :py:class:`Array` Each element is a block of the partition representing the derivative orders to use. Returns ------- dk_dtau : :py:class:`Array`, (`M`,) The specified derivatives over the given partition at the specified locations. """ y, r2l2 = self._compute_y(tau, return_r2l2=True) # Compute the d^(|pi|)f/dy term: dk_dtau = self._compute_dk_dy(y, len(p)) # Multiply in each of the block terms: for b in p: dk_dtau *= self._compute_dy_dtau(tau, b, r2l2) return dk_dtau
[ "def", "_compute_dk_dtau_on_partition", "(", "self", ",", "tau", ",", "p", ")", ":", "y", ",", "r2l2", "=", "self", ".", "_compute_y", "(", "tau", ",", "return_r2l2", "=", "True", ")", "# Compute the d^(|pi|)f/dy term:", "dk_dtau", "=", "self", ".", "_comput...
Evaluate the term inside the sum of Faa di Bruno's formula for the given partition. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. p : list of :py:class:`Array` Each element is a block of the partition representing the derivative orders to use. Returns ------- dk_dtau : :py:class:`Array`, (`M`,) The specified derivatives over the given partition at the specified locations.
[ "Evaluate", "the", "term", "inside", "the", "sum", "of", "Faa", "di", "Bruno", "s", "formula", "for", "the", "given", "partition", ".", "Parameters", "----------", "tau", ":", ":", "py", ":", "class", ":", "Matrix", "(", "M", "D", ")", "M", "inputs", ...
python
train
gwpy/gwpy
gwpy/plot/gps.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L69-L78
def _truncate(f, n): """Truncates/pads a float `f` to `n` decimal places without rounding From https://stackoverflow.com/a/783927/1307974 (CC-BY-SA) """ s = "{}".format(f) if "e" in s or "E" in s: return "{0:.{1}f}".format(f, n) i, p, d = s.partition(".") return ".".join([i, (d+"0"*n)[:n]])
[ "def", "_truncate", "(", "f", ",", "n", ")", ":", "s", "=", "\"{}\"", ".", "format", "(", "f", ")", "if", "\"e\"", "in", "s", "or", "\"E\"", "in", "s", ":", "return", "\"{0:.{1}f}\"", ".", "format", "(", "f", ",", "n", ")", "i", ",", "p", ","...
Truncates/pads a float `f` to `n` decimal places without rounding From https://stackoverflow.com/a/783927/1307974 (CC-BY-SA)
[ "Truncates", "/", "pads", "a", "float", "f", "to", "n", "decimal", "places", "without", "rounding" ]
python
train
Bachmann1234/diff-cover
diff_cover/report_generator.py
https://github.com/Bachmann1234/diff-cover/blob/901cb3fc986982961785e841658085ead453c6c9/diff_cover/report_generator.py#L309-L333
def _src_path_stats(self, src_path): """ Return a dict of statistics for the source file at `src_path`. """ # Find violation lines violation_lines = self.violation_lines(src_path) violations = sorted(self._diff_violations()[src_path].violations) # Load source snippets (if the report will display them) # If we cannot load the file, then fail gracefully if self.INCLUDE_SNIPPETS: try: snippets = Snippet.load_snippets_html(src_path, violation_lines) except IOError: snippets = [] else: snippets = [] return { 'percent_covered': self.percent_covered(src_path), 'violation_lines': TemplateReportGenerator.combine_adjacent_lines(violation_lines), 'violations': violations, 'snippets_html': snippets }
[ "def", "_src_path_stats", "(", "self", ",", "src_path", ")", ":", "# Find violation lines", "violation_lines", "=", "self", ".", "violation_lines", "(", "src_path", ")", "violations", "=", "sorted", "(", "self", ".", "_diff_violations", "(", ")", "[", "src_path"...
Return a dict of statistics for the source file at `src_path`.
[ "Return", "a", "dict", "of", "statistics", "for", "the", "source", "file", "at", "src_path", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/mp_image.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/mp_image.py#L188-L196
def events(self): '''check for events a list of events''' ret = [] while True: e = self.poll() if e is None: break ret.append(e) return ret
[ "def", "events", "(", "self", ")", ":", "ret", "=", "[", "]", "while", "True", ":", "e", "=", "self", ".", "poll", "(", ")", "if", "e", "is", "None", ":", "break", "ret", ".", "append", "(", "e", ")", "return", "ret" ]
check for events a list of events
[ "check", "for", "events", "a", "list", "of", "events" ]
python
train
joowani/quadriga
quadriga/client.py
https://github.com/joowani/quadriga/blob/412f88f414ef0cb53efa6d5841b9674eb9718359/quadriga/client.py#L130-L151
def book(self, name): """Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS """ self._validate_order_book(name) return OrderBook(name, self._rest_client, self._logger)
[ "def", "book", "(", "self", ",", "name", ")", ":", "self", ".", "_validate_order_book", "(", "name", ")", "return", "OrderBook", "(", "name", ",", "self", ".", "_rest_client", ",", "self", ".", "_logger", ")" ]
Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS
[ "Return", "an", "API", "wrapper", "for", "the", "given", "order", "book", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py#L377-L392
def generate_new_bracket(self): """generate a new bracket""" logger.debug( 'start to create a new SuccessiveHalving iteration, self.curr_s=%d', self.curr_s) if self.curr_s < 0: logger.info("s < 0, Finish this round of Hyperband in BOHB. Generate new round") self.curr_s = self.s_max self.brackets[self.curr_s] = Bracket(s=self.curr_s, s_max=self.s_max, eta=self.eta, max_budget=self.max_budget, optimize_mode=self.optimize_mode) next_n, next_r = self.brackets[self.curr_s].get_n_r() logger.debug( 'new SuccessiveHalving iteration, next_n=%d, next_r=%d', next_n, next_r) # rewrite with TPE generated_hyper_configs = self.brackets[self.curr_s].get_hyperparameter_configurations( next_n, next_r, self.cg) self.generated_hyper_configs = generated_hyper_configs.copy()
[ "def", "generate_new_bracket", "(", "self", ")", ":", "logger", ".", "debug", "(", "'start to create a new SuccessiveHalving iteration, self.curr_s=%d'", ",", "self", ".", "curr_s", ")", "if", "self", ".", "curr_s", "<", "0", ":", "logger", ".", "info", "(", "\"...
generate a new bracket
[ "generate", "a", "new", "bracket" ]
python
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_release.py
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_release.py#L32-L229
def parse_args(): """Parse command-line arguments""" prog = 'github-tag-release' parser = argparse.ArgumentParser( prog=prog, formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(""" Tag git repositories, in a GitHub org, that correspond to the products in a published eups distrib tag. Examples: # eups tag is derived from git tag {prog} \\ --dry-run \\ --debug \\ --limit 10 \\ --org lsst \\ --allow-team 'Data Management' \\ --allow-team 'DM Externals' \\ 'w.2018.18' 'b3595' # explicit eups tag and git tag {prog} \\ --dry-run \\ --debug \\ --limit 10 \\ --org lsst \\ --allow-team 'Data Management' \\ --allow-team 'DM Externals' \\ --external-team 'DM Externals' \\ --eups-tag v11_0_rc2 \\ 11.0.rc2 b1679 # verify a past eups tag + git tag release {prog} \\ --verify \\ --debug \\ --limit 10 \\ --org 'lsst' \\ --allow-team 'Data Management' \\ --allow-team 'DM Externals' \\ --external-team 'DM Externals' \\ --deny-team 'DM Auxilliaries' \\ --email 'sqre-admin@lists.lsst.org' \\ --user 'sqreadmin' \\ --token "$GITHUB_TOKEN" \\ --ignore-git-tagger \\ --ignore-git-message \\ --manifest 'b3595' \\ 'w.2018.18' # tag a git release from a manifest *without* a pre-existing # eups tag. {prog} \\ --dry-run \\ --debug \\ --limit 10 \\ --org 'lsst' \\ --allow-team 'Data Management' \\ --allow-team 'DM Externals' \\ --external-team 'DM Externals' \\ --deny-team 'DM Auxilliaries' \\ --email 'sqre-admin@lists.lsst.org' \\ --user 'sqreadmin' \\ --token "$GITHUB_TOKEN" \\ --ignore-git-tagger \\ --ignore-git-message \\ --manifest 'b3595' \\ --manifest-only \\ 'w.2018.18' Note that the access token must have access to these oauth scopes: * read:org * repo The token generated by `github-auth --user` should have sufficient permissions. """).format(prog=prog), epilog='Part of codekit: https://github.com/lsst-sqre/sqre-codekit' ) parser.add_argument( '--manifest', required=True, help='Name of versiondb manifest for git repo sha resolution' ' AKA bNNNN') parser.add_argument( '--org', required=True, help='Github organization') parser.add_argument( '--allow-team', action='append', required=True, help='git repos to be tagged MUST be a member of ONE or more of' ' these teams (can specify several times)') parser.add_argument( '--external-team', action='append', help='git repos in this team MUST not have tags that start with a' ' number. Any requested tag that violates this policy will be' ' prefixed with \'v\' (can specify several times)') parser.add_argument( '--deny-team', action='append', help='git repos to be tagged MUST NOT be a member of ANY of' ' these teams (can specify several times)') parser.add_argument( '--user', help='Name of person making the tag - defaults to gitconfig value') parser.add_argument( '--email', help='Email address of tagger - defaults to gitconfig value') parser.add_argument( '--token-path', default='~/.sq_github_token_delete', help='Use a token (made with github-auth) in a non-standard location') parser.add_argument( '--token', default=None, help='Literal github personal access token string') parser.add_argument( '--versiondb-base-url', default=os.getenv('LSST_VERSIONDB_BASE_URL'), help='Override the default versiondb base url') parser.add_argument( '--eupstag-base-url', default=os.getenv('LSST_EUPSTAG_BASE_URL'), help='Override the default eupstag base url') parser.add_argument( '--force-tag', action='store_true', help='Force moving pre-existing annotated git tags.') parser.add_argument( '--ignore-manifest-versions', action='store_true', help='Ignore manifest version strings' ' when cross referencing eups tag and manifest data.') parser.add_argument( '--ignore-git-message', action='store_true', help='Ignore git tag message when verifying an existing tag.') parser.add_argument( '--ignore-git-tagger', action='store_true', help='Ignore git tag "tagger"/author when verifying an existing tag.') parser.add_argument( '--limit', default=None, type=int, help='Maximum number of products/repos to tags. (useful for testing)') parser.add_argument( '--fail-fast', action='store_true', help='Fail immediately on github API error.') parser.add_argument( '--no-fail-fast', action='store_const', const=False, dest='fail_fast', help='DO NOT Fail immediately on github API error(s). (default)') parser.add_argument( '-d', '--debug', action='count', default=codetools.debug_lvl_from_env(), help='Debug mode (can specify several times)') parser.add_argument('-v', '--version', action=codetools.ScmVersionAction) parser.add_argument('tag') manifest_group = parser.add_mutually_exclusive_group() manifest_group.add_argument( '--eups-tag', help='(mutually exclusive with --manifest-only)') manifest_group.add_argument( '--manifest-only', action='store_true', help='Do not cross reference a published eups tag with the manifest' ' -- use only the metadata from the manifest to determine' ' git tag location.' ' This allows a git tag to be created without a prior eups tag.' ' (mutually exclusive with --eups-tag)') dryrun_group = parser.add_mutually_exclusive_group() dryrun_group.add_argument( '--dry-run', action='store_true', help='Do not create/update tag(s) or modify any state.' ' (mutually exclusive with --verify)') dryrun_group.add_argument( '--verify', action='store_true', help='Verify that all git tags for a release are present and correct.' ' will not create/update tag(s) or modify any state.' ' (mutually exclusive with --dry-run)') return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "prog", "=", "'github-tag-release'", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "prog", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ",", "description", "=", "textwrap", ...
Parse command-line arguments
[ "Parse", "command", "-", "line", "arguments" ]
python
train
Kortemme-Lab/klab
klab/general/strutil.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L15-L19
def parse_range(s, range_separator = '-'): ''' Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s. Written by Laurens Kraal 2014. ''' return reduce(lambda x,y: x+y, (map(lambda r: (range(int(r.split(range_separator)[0]), int(r.split(range_separator)[1])+1)) if range_separator in r else [int(r)], s.split(','))))
[ "def", "parse_range", "(", "s", ",", "range_separator", "=", "'-'", ")", ":", "return", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "(", "map", "(", "lambda", "r", ":", "(", "range", "(", "int", "(", "r", ".", "split", "(",...
Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s. Written by Laurens Kraal 2014.
[ "Parses", "the", "string", "s", "which", "contains", "indices", "and", "ranges", "and", "returns", "the", "explicit", "list", "of", "integers", "defined", "by", "s", ".", "Written", "by", "Laurens", "Kraal", "2014", "." ]
python
train
saltstack/salt
salt/modules/pf.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pf.py#L99-L135
def loglevel(level): ''' Set the debug level which limits the severity of log messages printed by ``pf(4)``. level: Log level. Should be one of the following: emerg, alert, crit, err, warning, notice, info or debug (OpenBSD); or none, urgent, misc, loud (FreeBSD). CLI example: .. code-block:: bash salt '*' pf.loglevel emerg ''' # There's no way to getting the previous loglevel so imply we've # always made a change. ret = {'changes': True} myos = __grains__['os'] if myos == 'FreeBSD': all_levels = ['none', 'urgent', 'misc', 'loud'] else: all_levels = ['emerg', 'alert', 'crit', 'err', 'warning', 'notice', 'info', 'debug'] if level not in all_levels: raise SaltInvocationError('Unknown loglevel: {0}'.format(level)) result = __salt__['cmd.run_all']('pfctl -x {0}'.format(level), output_loglevel='trace', python_shell=False) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered setting loglevel', info={'errors': [result['stderr']], 'changes': False} ) return ret
[ "def", "loglevel", "(", "level", ")", ":", "# There's no way to getting the previous loglevel so imply we've", "# always made a change.", "ret", "=", "{", "'changes'", ":", "True", "}", "myos", "=", "__grains__", "[", "'os'", "]", "if", "myos", "==", "'FreeBSD'", ":...
Set the debug level which limits the severity of log messages printed by ``pf(4)``. level: Log level. Should be one of the following: emerg, alert, crit, err, warning, notice, info or debug (OpenBSD); or none, urgent, misc, loud (FreeBSD). CLI example: .. code-block:: bash salt '*' pf.loglevel emerg
[ "Set", "the", "debug", "level", "which", "limits", "the", "severity", "of", "log", "messages", "printed", "by", "pf", "(", "4", ")", "." ]
python
train
pypa/pipenv
pipenv/vendor/pathlib2/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1230-L1237
def _raw_open(self, flags, mode=0o777): """ Open the file pointed by this path and return a file descriptor, as os.open() does. """ if self._closed: self._raise_closed() return self._accessor.open(self, flags, mode)
[ "def", "_raw_open", "(", "self", ",", "flags", ",", "mode", "=", "0o777", ")", ":", "if", "self", ".", "_closed", ":", "self", ".", "_raise_closed", "(", ")", "return", "self", ".", "_accessor", ".", "open", "(", "self", ",", "flags", ",", "mode", ...
Open the file pointed by this path and return a file descriptor, as os.open() does.
[ "Open", "the", "file", "pointed", "by", "this", "path", "and", "return", "a", "file", "descriptor", "as", "os", ".", "open", "()", "does", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/transport/server/standard.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/server/standard.py#L288-L309
async def open_interface(self, client_id, conn_string, interface): """Open a device interface on behalf of a client. See :meth:`AbstractDeviceAdapter.open_interface`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter. interface (str): The name of the interface to open. Raises: DeviceServerError: There is an issue with your client_id such as not being connected to the device. DeviceAdapterError: The adapter had an issue opening the interface. """ conn_id = self._client_connection(client_id, conn_string) # Hook first so there is no race on getting the first event self._hook_open_interface(conn_string, interface, client_id) await self.adapter.open_interface(conn_id, interface)
[ "async", "def", "open_interface", "(", "self", ",", "client_id", ",", "conn_string", ",", "interface", ")", ":", "conn_id", "=", "self", ".", "_client_connection", "(", "client_id", ",", "conn_string", ")", "# Hook first so there is no race on getting the first event", ...
Open a device interface on behalf of a client. See :meth:`AbstractDeviceAdapter.open_interface`. Args: client_id (str): The client we are working for. conn_string (str): A connection string that will be passed to the underlying device adapter. interface (str): The name of the interface to open. Raises: DeviceServerError: There is an issue with your client_id such as not being connected to the device. DeviceAdapterError: The adapter had an issue opening the interface.
[ "Open", "a", "device", "interface", "on", "behalf", "of", "a", "client", "." ]
python
train
rodluger/everest
everest/missions/k2/pbs.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L322-L350
def _Publish(campaign, subcampaign, strkwargs): ''' The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``. ''' # Get kwargs from string kwargs = pickle.loads(strkwargs.replace('%%%', '\n').encode('utf-8')) # Check the cadence cadence = kwargs.get('cadence', 'lc') # Model wrapper m = FunctionWrapper(EverestModel, season=campaign, publish=True, **kwargs) # Set up our custom exception handler sys.excepthook = ExceptionHook # Initialize our multiprocessing pool with Pool() as pool: # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all the stars stars = GetK2Campaign(campaign, epics_only=True, cadence=cadence) # Run pool.map(m, stars)
[ "def", "_Publish", "(", "campaign", ",", "subcampaign", ",", "strkwargs", ")", ":", "# Get kwargs from string", "kwargs", "=", "pickle", ".", "loads", "(", "strkwargs", ".", "replace", "(", "'%%%'", ",", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", ")...
The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``.
[ "The", "actual", "function", "that", "publishes", "a", "given", "campaign", ";", "this", "must", "be", "called", "from", "missions", "/", "k2", "/", "publish", ".", "pbs", "." ]
python
train
area4lib/area4
area4/util.py
https://github.com/area4lib/area4/blob/7f71b58d6b44b1a61284a8a01f26afd3138b9b17/area4/util.py#L42-L62
def reduce_to_unit(divider): """ Reduce a repeating divider to the smallest repeating unit possible. Note: this function is used by make-div :param divider: the divider :return: smallest repeating unit possible :rtype: str :Example: 'XxXxXxX' -> 'Xx' """ for unit_size in range(1, len(divider) // 2 + 1): length = len(divider) unit = divider[:unit_size] # Ignores mismatches in final characters: divider_item = divider[:unit_size * (length // unit_size)] if unit * (length // unit_size) == divider_item: return unit return divider
[ "def", "reduce_to_unit", "(", "divider", ")", ":", "for", "unit_size", "in", "range", "(", "1", ",", "len", "(", "divider", ")", "//", "2", "+", "1", ")", ":", "length", "=", "len", "(", "divider", ")", "unit", "=", "divider", "[", ":", "unit_size"...
Reduce a repeating divider to the smallest repeating unit possible. Note: this function is used by make-div :param divider: the divider :return: smallest repeating unit possible :rtype: str :Example: 'XxXxXxX' -> 'Xx'
[ "Reduce", "a", "repeating", "divider", "to", "the", "smallest", "repeating", "unit", "possible", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/cases/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L582-L595
def status(institute_id, case_name): """Update status of a specific case.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) user_obj = store.user(current_user.email) status = request.form.get('status', case_obj['status']) link = url_for('.case', institute_id=institute_id, case_name=case_name) if status == 'archive': store.archive_case(institute_obj, case_obj, user_obj, status, link) else: store.update_status(institute_obj, case_obj, user_obj, status, link) return redirect(request.referrer)
[ "def", "status", "(", "institute_id", ",", "case_name", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "user_obj", "=", "store", ".", "user", "(", "current_user", ".", "email", ...
Update status of a specific case.
[ "Update", "status", "of", "a", "specific", "case", "." ]
python
test
zhanglab/psamm
psamm/massconsistency.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/massconsistency.py#L141-L186
def check_compound_consistency(database, solver, exchange=set(), zeromass=set()): """Yield each compound in the database with assigned mass Each compound will be assigned a mass and the number of compounds having a positive mass will be approximately maximized. This is an implementation of the solution originally proposed by [Gevorgyan08]_ but using the new method proposed by [Thiele14]_ to avoid MILP constraints. This is similar to the way Fastcore avoids MILP contraints. """ # Create mass balance problem prob = solver.create_problem() compound_set = _non_localized_compounds(database) mass_compounds = compound_set.difference(zeromass) # Define mass variables m = prob.namespace(mass_compounds, lower=0) # Define z variables z = prob.namespace(mass_compounds, lower=0, upper=1) prob.set_objective(z.sum(mass_compounds)) prob.add_linear_constraints(m.set(mass_compounds) >= z.set(mass_compounds)) massbalance_lhs = {reaction_id: 0 for reaction_id in database.reactions} for (compound, reaction_id), value in iteritems(database.matrix): if compound not in zeromass: mass_var = m(compound.in_compartment(None)) massbalance_lhs[reaction_id] += mass_var * value for reaction_id, lhs in iteritems(massbalance_lhs): if reaction_id not in exchange: prob.add_linear_constraints(lhs == 0) # Solve try: prob.solve(lp.ObjectiveSense.Maximize) except lp.SolverError as e: raise_from( MassConsistencyError('Failed to solve mass consistency: {}'.format( e)), e) for compound in mass_compounds: yield compound, m.value(compound)
[ "def", "check_compound_consistency", "(", "database", ",", "solver", ",", "exchange", "=", "set", "(", ")", ",", "zeromass", "=", "set", "(", ")", ")", ":", "# Create mass balance problem", "prob", "=", "solver", ".", "create_problem", "(", ")", "compound_set"...
Yield each compound in the database with assigned mass Each compound will be assigned a mass and the number of compounds having a positive mass will be approximately maximized. This is an implementation of the solution originally proposed by [Gevorgyan08]_ but using the new method proposed by [Thiele14]_ to avoid MILP constraints. This is similar to the way Fastcore avoids MILP contraints.
[ "Yield", "each", "compound", "in", "the", "database", "with", "assigned", "mass" ]
python
train
polysquare/polysquare-setuptools-lint
polysquare_setuptools_lint/__init__.py
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L479-L490
def _file_lines(self, filename): """Get lines for filename, caching opened files.""" try: return self._file_lines_cache[filename] except KeyError: if os.path.isfile(filename): with open(filename) as python_file: self._file_lines_cache[filename] = python_file.readlines() else: self._file_lines_cache[filename] = "" return self._file_lines_cache[filename]
[ "def", "_file_lines", "(", "self", ",", "filename", ")", ":", "try", ":", "return", "self", ".", "_file_lines_cache", "[", "filename", "]", "except", "KeyError", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(...
Get lines for filename, caching opened files.
[ "Get", "lines", "for", "filename", "caching", "opened", "files", "." ]
python
train
voidpp/python-tools
voidpp_tools/daemon.py
https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/daemon.py#L99-L116
def start(self): """ Start the daemon """ if self.is_running(): msg = "Daemon already running (pidfile:%s)" % self.pidfile self.logger.error(msg) return msg initres = self.init() if not initres[0]: return initres[1] # Start the daemon self.daemonize() return self.run()
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "is_running", "(", ")", ":", "msg", "=", "\"Daemon already running (pidfile:%s)\"", "%", "self", ".", "pidfile", "self", ".", "logger", ".", "error", "(", "msg", ")", "return", "msg", "initres", "...
Start the daemon
[ "Start", "the", "daemon" ]
python
train
proteanhq/protean
src/protean/impl/email/local_mem.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/impl/email/local_mem.py#L19-L27
def send_messages(self, messages): """Redirect messages to the dummy outbox""" msg_count = 0 for message in messages: # .message() triggers header validation message.message() email.outbox.append(message) msg_count += 1 return msg_count
[ "def", "send_messages", "(", "self", ",", "messages", ")", ":", "msg_count", "=", "0", "for", "message", "in", "messages", ":", "# .message() triggers header validation", "message", ".", "message", "(", ")", "email", ".", "outbox", ".", "append", "(", "message...
Redirect messages to the dummy outbox
[ "Redirect", "messages", "to", "the", "dummy", "outbox" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L595-L598
def create_serv_obj(self, tenant_id): """Creates and stores the service object associated with a tenant. """ self.service_attr[tenant_id] = ServiceIpSegTenantMap() self.store_tenant_obj(tenant_id, self.service_attr[tenant_id])
[ "def", "create_serv_obj", "(", "self", ",", "tenant_id", ")", ":", "self", ".", "service_attr", "[", "tenant_id", "]", "=", "ServiceIpSegTenantMap", "(", ")", "self", ".", "store_tenant_obj", "(", "tenant_id", ",", "self", ".", "service_attr", "[", "tenant_id"...
Creates and stores the service object associated with a tenant.
[ "Creates", "and", "stores", "the", "service", "object", "associated", "with", "a", "tenant", "." ]
python
train
junzis/pyModeS
pyModeS/decoder/bds/bds20.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds20.py#L50-L73
def cs20(msg): """Aircraft callsign Args: msg (String): 28 bytes hexadecimal message (BDS40) string Returns: string: callsign, max. 8 chars """ chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ#####_###############0123456789######' d = hex2bin(data(msg)) cs = '' cs += chars[bin2int(d[8:14])] cs += chars[bin2int(d[14:20])] cs += chars[bin2int(d[20:26])] cs += chars[bin2int(d[26:32])] cs += chars[bin2int(d[32:38])] cs += chars[bin2int(d[38:44])] cs += chars[bin2int(d[44:50])] cs += chars[bin2int(d[50:56])] return cs
[ "def", "cs20", "(", "msg", ")", ":", "chars", "=", "'#ABCDEFGHIJKLMNOPQRSTUVWXYZ#####_###############0123456789######'", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "cs", "=", "''", "cs", "+=", "chars", "[", "bin2int", "(", "d", "[", "8", ":", ...
Aircraft callsign Args: msg (String): 28 bytes hexadecimal message (BDS40) string Returns: string: callsign, max. 8 chars
[ "Aircraft", "callsign" ]
python
train
Komnomnomnom/swigibpy
swigibpy.py
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L2441-L2443
def tickString(self, tickerId, tickType, value): """tickString(EWrapper self, TickerId tickerId, TickType tickType, IBString const & value)""" return _swigibpy.EWrapper_tickString(self, tickerId, tickType, value)
[ "def", "tickString", "(", "self", ",", "tickerId", ",", "tickType", ",", "value", ")", ":", "return", "_swigibpy", ".", "EWrapper_tickString", "(", "self", ",", "tickerId", ",", "tickType", ",", "value", ")" ]
tickString(EWrapper self, TickerId tickerId, TickType tickType, IBString const & value)
[ "tickString", "(", "EWrapper", "self", "TickerId", "tickerId", "TickType", "tickType", "IBString", "const", "&", "value", ")" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L6089-L6096
def saveFormatFileTo(self, cur, encoding, format): """Dump an XML document to an I/O buffer. Warning ! This call xmlOutputBufferClose() on buf which is not available after this call. """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlSaveFormatFileTo(self._o, cur__o, encoding, format) return ret
[ "def", "saveFormatFileTo", "(", "self", ",", "cur", ",", "encoding", ",", "format", ")", ":", "if", "cur", "is", "None", ":", "cur__o", "=", "None", "else", ":", "cur__o", "=", "cur", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlSaveFormatFileTo", "("...
Dump an XML document to an I/O buffer. Warning ! This call xmlOutputBufferClose() on buf which is not available after this call.
[ "Dump", "an", "XML", "document", "to", "an", "I", "/", "O", "buffer", ".", "Warning", "!", "This", "call", "xmlOutputBufferClose", "()", "on", "buf", "which", "is", "not", "available", "after", "this", "call", "." ]
python
train
wonambi-python/wonambi
wonambi/attr/annotations.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/annotations.py#L1315-L1366
def get_cycles(self): """Return the cycle start and end times. Returns ------- list of tuple of float start and end times for each cycle, in seconds from recording start and the cycle index starting at 1 """ cycles = self.rater.find('cycles') if not cycles: return None starts = sorted( [float(mrkr.text) for mrkr in cycles.findall('cyc_start')]) ends = sorted( [float(mrkr.text) for mrkr in cycles.findall('cyc_end')]) cyc_list = [] if not starts or not ends: return None if all(i < starts[0] for i in ends): raise ValueError('First cycle has no start.') for (this_start, next_start) in zip(starts, starts[1:] + [inf]): # if an end is smaller than the next start, make it the end # otherwise, the next_start is the end end_between_starts = [end for end in ends \ if this_start < end <= next_start] if len(end_between_starts) > 1: raise ValueError('Found more than one cycle end for same ' 'cycle') if end_between_starts: one_cycle = (this_start, end_between_starts[0]) else: one_cycle = (this_start, next_start) if one_cycle[1] == inf: raise ValueError('Last cycle has no end.') cyc_list.append(one_cycle) output = [] for i, j in enumerate(cyc_list): cyc = j[0], j[1], i + 1 output.append(cyc) return output
[ "def", "get_cycles", "(", "self", ")", ":", "cycles", "=", "self", ".", "rater", ".", "find", "(", "'cycles'", ")", "if", "not", "cycles", ":", "return", "None", "starts", "=", "sorted", "(", "[", "float", "(", "mrkr", ".", "text", ")", "for", "mrk...
Return the cycle start and end times. Returns ------- list of tuple of float start and end times for each cycle, in seconds from recording start and the cycle index starting at 1
[ "Return", "the", "cycle", "start", "and", "end", "times", "." ]
python
train
noahbenson/neuropythy
neuropythy/geometry/mesh.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/mesh.py#L2648-L2706
def to_map_projection(arg, hemi=Ellipsis, chirality=Ellipsis, center=Ellipsis, center_right=Ellipsis, radius=Ellipsis, method=Ellipsis, registration=Ellipsis, sphere_radius=Ellipsis, pre_affine=Ellipsis, post_affine=Ellipsis, meta_data=Ellipsis): ''' to_map_projection(mp) yields mp if mp is a map projection object. to_map_projection((name, hemi)) is equivalent to map_projection(name, chirality=hemi). to_map_projection((name, opts)) uses the given options dictionary as options to map_projection; (name, hemi, opts) is also allowed as input. to_map_projection(filename) yields the map projection loaded from the given filename. to_map_projection('<name>:<hemi>') is equivalent to to_map_projection(('<name>', '<hemi>')). to_map_projection('<name>') is equivalent to to_map_projection(('<name>', 'lr')). to_map_projection((affine, hemi)) converts the given affine transformation, which must be a transformation from spherical coordinates to 2D map coordinates (once the transformed z-value is dropped), to a map projection. The hemi argument may alternately be an options mapping. The to_map_projection() function may also be called with the the elements of the above tuples passed directly; i.e. to_map_projection(name, hemi) is equivalent to to_map_projection((name,hemi)). Additionaly, all optional arguments to the map_projection function may be given and will be copied into the map_projection that is returned. Note that the named chirality argument is used to set the chirality of the returned map projection but never to specify the chirality of a map projection that is being looked up or loaded; for that use the second argument, second tuple entry, or hemi keyword. ''' kw = dict(center=center, center_right=center_right, radius=radius, chirality=chirality, method=method, registration=registration, sphere_radius=sphere_radius, pre_affine=pre_affine, post_affine=post_affine, meta_data=meta_data) kw = {k:v for (k,v) in six.iteritems(kw) if v is not Ellipsis} if pimms.is_vector(arg): if len(arg) == 1: arg = arg[0] elif len(arg) == 2: (arg, tmp) = arg if pimms.is_map(tmp): kw = {k:v for (k,v) in six.iteritems(pimms.merge(tmp, kw)) if v is not Ellipsis} elif hemi is Ellipsis: hemi = arg elif len(arg) == 3: (arg, h, opts) = arg kw = {k:v for (k,v) in six.iteritems(pimms.merge(opts, kw)) if v is not Ellipsis} if hemi is Ellipsis: hemi = h else: raise ValueError('Invalid vector argument given to to_map_projection()') hemi = deduce_chirality(hemi) mp = None if is_map_projection(arg): mp = arg elif pimms.is_str(arg): # first see if there's a hemi appended if ':' in arg: spl = arg.split(':') (a,h) = (':'.join(spl[:-1]), spl[-1]) try: (hemtmp, arg) = (to_hemi_str(h), a) if hemi is None: hemi = hemtmp except Exception: pass # otherwise, strings alone might be map projection names or filenames mp = map_projection(arg, hemi) else: raise ValueError('Cannot interpret argument to to_map_projection') if len(kw) == 0: return mp else: return mp.copy(**kw)
[ "def", "to_map_projection", "(", "arg", ",", "hemi", "=", "Ellipsis", ",", "chirality", "=", "Ellipsis", ",", "center", "=", "Ellipsis", ",", "center_right", "=", "Ellipsis", ",", "radius", "=", "Ellipsis", ",", "method", "=", "Ellipsis", ",", "registration"...
to_map_projection(mp) yields mp if mp is a map projection object. to_map_projection((name, hemi)) is equivalent to map_projection(name, chirality=hemi). to_map_projection((name, opts)) uses the given options dictionary as options to map_projection; (name, hemi, opts) is also allowed as input. to_map_projection(filename) yields the map projection loaded from the given filename. to_map_projection('<name>:<hemi>') is equivalent to to_map_projection(('<name>', '<hemi>')). to_map_projection('<name>') is equivalent to to_map_projection(('<name>', 'lr')). to_map_projection((affine, hemi)) converts the given affine transformation, which must be a transformation from spherical coordinates to 2D map coordinates (once the transformed z-value is dropped), to a map projection. The hemi argument may alternately be an options mapping. The to_map_projection() function may also be called with the the elements of the above tuples passed directly; i.e. to_map_projection(name, hemi) is equivalent to to_map_projection((name,hemi)). Additionaly, all optional arguments to the map_projection function may be given and will be copied into the map_projection that is returned. Note that the named chirality argument is used to set the chirality of the returned map projection but never to specify the chirality of a map projection that is being looked up or loaded; for that use the second argument, second tuple entry, or hemi keyword.
[ "to_map_projection", "(", "mp", ")", "yields", "mp", "if", "mp", "is", "a", "map", "projection", "object", ".", "to_map_projection", "((", "name", "hemi", "))", "is", "equivalent", "to", "map_projection", "(", "name", "chirality", "=", "hemi", ")", ".", "t...
python
train
moonlitesolutions/SolrClient
SolrClient/indexq.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/indexq.py#L240-L279
def complete(self, filepath): ''' Marks the item as complete by moving it to the done directory and optionally gzipping it. ''' if not os.path.exists(filepath): raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath)) if self._devel: self.logger.debug("Completing - {} ".format(filepath)) if self.rotate_complete: try: complete_dir = str(self.rotate_complete()) except Exception as e: self.logger.error("rotate_complete function failed with the following exception.") self.logger.exception(e) raise newdir = os.path.join(self._done_dir, complete_dir) newpath = os.path.join(newdir, os.path.split(filepath)[-1] ) if not os.path.isdir(newdir): self.logger.debug("Making new directory: {}".format(newdir)) os.makedirs(newdir) else: newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] ) try: if self._compress_complete: if not filepath.endswith('.gz'): # Compressing complete, but existing file not compressed # Compress and move it and kick out newpath += '.gz' self._compress_and_move(filepath, newpath) return newpath # else the file is already compressed and can just be moved #if not compressing completed file, just move it shutil.move(filepath, newpath) self.logger.info(" Completed - {}".format(filepath)) except Exception as e: self.logger.error("Couldn't Complete {}".format(filepath)) self.logger.exception(e) raise return newpath
[ "def", "complete", "(", "self", ",", "filepath", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "raise", "FileNotFoundError", "(", "\"Can't Complete {}, it doesn't exist\"", ".", "format", "(", "filepath", ")", ")", "if",...
Marks the item as complete by moving it to the done directory and optionally gzipping it.
[ "Marks", "the", "item", "as", "complete", "by", "moving", "it", "to", "the", "done", "directory", "and", "optionally", "gzipping", "it", "." ]
python
train
Accelize/pycosio
pycosio/storage/oss.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/oss.py#L238-L250
def _list_locators(self): """ Lists locators. Returns: generator of tuple: locator name str, locator header dict """ with _handle_oss_error(): response = _oss.Service( self.client, endpoint=self._endpoint).list_buckets() for bucket in response.buckets: yield bucket.name, self._model_to_dict(bucket, ('name',))
[ "def", "_list_locators", "(", "self", ")", ":", "with", "_handle_oss_error", "(", ")", ":", "response", "=", "_oss", ".", "Service", "(", "self", ".", "client", ",", "endpoint", "=", "self", ".", "_endpoint", ")", ".", "list_buckets", "(", ")", "for", ...
Lists locators. Returns: generator of tuple: locator name str, locator header dict
[ "Lists", "locators", "." ]
python
train
cloudmesh/cloudmesh-common
cloudmesh/common/ssh/authorized_keys.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/ssh/authorized_keys.py#L17-L40
def get_fingerprint_from_public_key(pubkey): """Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str """ # TODO: why is there a tmpdir? with tempdir() as workdir: key = os.path.join(workdir, 'key.pub') with open(key, 'w') as fd: fd.write(pubkey) cmd = [ 'ssh-keygen', '-l', '-f', key, ] p = Subprocess(cmd) output = p.stdout.strip() bits, fingerprint, _ = output.split(' ', 2) return fingerprint
[ "def", "get_fingerprint_from_public_key", "(", "pubkey", ")", ":", "# TODO: why is there a tmpdir?", "with", "tempdir", "(", ")", "as", "workdir", ":", "key", "=", "os", ".", "path", ".", "join", "(", "workdir", ",", "'key.pub'", ")", "with", "open", "(", "k...
Generate the fingerprint of a public key :param str pubkey: the value of the public key :returns: fingerprint :rtype: str
[ "Generate", "the", "fingerprint", "of", "a", "public", "key" ]
python
train
nilp0inter/cpe
cpe/comp/cpecomp_simple.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp_simple.py#L367-L390
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_str: new value of component :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component """ old_value = self._encoded_value self._encoded_value = comp_str # Check the value of component try: self._parse(comp_att) except ValueError: # Restore old value of component self._encoded_value = old_value raise # Convert encoding value to standard value (WFN) self._decode()
[ "def", "set_value", "(", "self", ",", "comp_str", ",", "comp_att", ")", ":", "old_value", "=", "self", ".", "_encoded_value", "self", ".", "_encoded_value", "=", "comp_str", "# Check the value of component", "try", ":", "self", ".", "_parse", "(", "comp_att", ...
Set the value of component. By default, the component has a simple value. :param string comp_str: new value of component :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component
[ "Set", "the", "value", "of", "component", ".", "By", "default", "the", "component", "has", "a", "simple", "value", "." ]
python
train
sassoftware/saspy
saspy/sasets.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasets.py#L153-L207
def ucm(self, data: ['SASdata', str] = None, autoreg: str = None, blockseason: str = None, by: str = None, cycle: str = None, deplag: str = None, estimate: [str, bool] = None, forecast: str = None, id: str = None, irregular: [str, bool] = None, level: [str, bool] = None, model: str = None, nloptions: str = None, out: [str, 'SASdata'] = None, outlier: str = None, performance: str = None, randomreg: str = None, season: str = None, slope: [str, bool] = None, splinereg: str = None, splineseason: str = None, procopts: str = None, stmtpassthrough: str = None, **kwargs: dict) -> 'SASresults': """ Python method to call the UCM procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_ucm_syntax.htm :param data: SASdata object or string. This parameter is required. :parm autoreg: The autoreg variable can only be a string type. :parm blockseason: The blockseason variable can only be a string type. :parm by: The by variable can only be a string type. :parm cycle: The cycle variable can only be a string type. :parm deplag: The deplag variable can only be a string type. :parm estimate: The estimate variable can be a string or boolean type. :parm forecast: The forecast variable can only be a string type. :parm id: The id variable can only be a string type. :parm irregular: The irregular variable can be a string or boolean type. :parm level: The level variable can be a string or boolean type. :parm model: The model variable can only be a string type. :parm nloptions: The nloptions variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm outlier: The outlier variable can only be a string type. :parm performance: The performance variable can only be a string type. :parm randomreg: The randomreg variable can only be a string type. :parm season: The season variable can only be a string type. :parm slope: The slope variable can be a string or boolean type. :parm splinereg: The splinereg variable can only be a string type. :parm splineseason: The splineseason variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object """
[ "def", "ucm", "(", "self", ",", "data", ":", "[", "'SASdata'", ",", "str", "]", "=", "None", ",", "autoreg", ":", "str", "=", "None", ",", "blockseason", ":", "str", "=", "None", ",", "by", ":", "str", "=", "None", ",", "cycle", ":", "str", "="...
Python method to call the UCM procedure Documentation link: http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_ucm_syntax.htm :param data: SASdata object or string. This parameter is required. :parm autoreg: The autoreg variable can only be a string type. :parm blockseason: The blockseason variable can only be a string type. :parm by: The by variable can only be a string type. :parm cycle: The cycle variable can only be a string type. :parm deplag: The deplag variable can only be a string type. :parm estimate: The estimate variable can be a string or boolean type. :parm forecast: The forecast variable can only be a string type. :parm id: The id variable can only be a string type. :parm irregular: The irregular variable can be a string or boolean type. :parm level: The level variable can be a string or boolean type. :parm model: The model variable can only be a string type. :parm nloptions: The nloptions variable can only be a string type. :parm out: The out variable can be a string or SASdata type. :parm outlier: The outlier variable can only be a string type. :parm performance: The performance variable can only be a string type. :parm randomreg: The randomreg variable can only be a string type. :parm season: The season variable can only be a string type. :parm slope: The slope variable can be a string or boolean type. :parm splinereg: The splinereg variable can only be a string type. :parm splineseason: The splineseason variable can only be a string type. :parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type. :parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type. :return: SAS Result Object
[ "Python", "method", "to", "call", "the", "UCM", "procedure" ]
python
train
Becksteinlab/GromacsWrapper
gromacs/utilities.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/utilities.py#L411-L435
def in_dir(directory, create=True): """Context manager to execute a code block in a directory. * The directory is created if it does not exist (unless create=False is set) * At the end or after an exception code always returns to the directory that was the current directory before entering the block. """ startdir = os.getcwd() try: try: os.chdir(directory) logger.debug("Working in {directory!r}...".format(**vars())) except OSError as err: if create and err.errno == errno.ENOENT: os.makedirs(directory) os.chdir(directory) logger.info("Working in {directory!r} (newly created)...".format(**vars())) else: logger.exception("Failed to start working in {directory!r}.".format(**vars())) raise yield os.getcwd() finally: os.chdir(startdir)
[ "def", "in_dir", "(", "directory", ",", "create", "=", "True", ")", ":", "startdir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "try", ":", "os", ".", "chdir", "(", "directory", ")", "logger", ".", "debug", "(", "\"Working in {directory!r}...\"", ...
Context manager to execute a code block in a directory. * The directory is created if it does not exist (unless create=False is set) * At the end or after an exception code always returns to the directory that was the current directory before entering the block.
[ "Context", "manager", "to", "execute", "a", "code", "block", "in", "a", "directory", "." ]
python
valid
pypa/setuptools
setuptools/dist.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/dist.py#L878-L898
def include(self, **attrs): """Add items to distribution that are named in keyword arguments For example, 'dist.include(py_modules=["x"])' would add 'x' to the distribution's 'py_modules' attribute, if it was not already there. Currently, this method only supports inclusion for attributes that are lists or tuples. If you need to add support for adding to other attributes in this or a subclass, you can add an '_include_X' method, where 'X' is the name of the attribute. The method will be called with the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' will try to call 'dist._include_foo({"bar":"baz"})', which can then handle whatever special inclusion logic is needed. """ for k, v in attrs.items(): include = getattr(self, '_include_' + k, None) if include: include(v) else: self._include_misc(k, v)
[ "def", "include", "(", "self", ",", "*", "*", "attrs", ")", ":", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", ":", "include", "=", "getattr", "(", "self", ",", "'_include_'", "+", "k", ",", "None", ")", "if", "include", ":", "in...
Add items to distribution that are named in keyword arguments For example, 'dist.include(py_modules=["x"])' would add 'x' to the distribution's 'py_modules' attribute, if it was not already there. Currently, this method only supports inclusion for attributes that are lists or tuples. If you need to add support for adding to other attributes in this or a subclass, you can add an '_include_X' method, where 'X' is the name of the attribute. The method will be called with the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' will try to call 'dist._include_foo({"bar":"baz"})', which can then handle whatever special inclusion logic is needed.
[ "Add", "items", "to", "distribution", "that", "are", "named", "in", "keyword", "arguments" ]
python
train
inasafe/inasafe
safe/common/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/utilities.py#L488-L520
def get_utm_epsg(longitude, latitude, crs=None): """Return epsg code of the utm zone according to X, Y coordinates. By default, the CRS is EPSG:4326. If the CRS is provided, first X,Y will be reprojected from the input CRS to WGS84. The code is based on the code: http://gis.stackexchange.com/questions/34401 :param longitude: The longitude. :type longitude: float :param latitude: The latitude. :type latitude: float :param crs: The coordinate reference system of the latitude, longitude. :type crs: QgsCoordinateReferenceSystem """ if crs is None or crs.authid() == 'EPSG:4326': epsg = 32600 if latitude < 0.0: epsg += 100 epsg += get_utm_zone(longitude) return epsg else: epsg_4326 = QgsCoordinateReferenceSystem('EPSG:4326') transform = QgsCoordinateTransform( crs, epsg_4326, QgsProject.instance()) geom = QgsGeometry.fromPointXY(QgsPointXY(longitude, latitude)) geom.transform(transform) point = geom.asPoint() # The point is now in 4326, we can call the function again. return get_utm_epsg(point.x(), point.y())
[ "def", "get_utm_epsg", "(", "longitude", ",", "latitude", ",", "crs", "=", "None", ")", ":", "if", "crs", "is", "None", "or", "crs", ".", "authid", "(", ")", "==", "'EPSG:4326'", ":", "epsg", "=", "32600", "if", "latitude", "<", "0.0", ":", "epsg", ...
Return epsg code of the utm zone according to X, Y coordinates. By default, the CRS is EPSG:4326. If the CRS is provided, first X,Y will be reprojected from the input CRS to WGS84. The code is based on the code: http://gis.stackexchange.com/questions/34401 :param longitude: The longitude. :type longitude: float :param latitude: The latitude. :type latitude: float :param crs: The coordinate reference system of the latitude, longitude. :type crs: QgsCoordinateReferenceSystem
[ "Return", "epsg", "code", "of", "the", "utm", "zone", "according", "to", "X", "Y", "coordinates", "." ]
python
train
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L5060-L5082
def pool_start(name, **kwargs): ''' Start a defined libvirt storage pool. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_start default ''' conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) return not bool(pool.create()) finally: conn.close()
[ "def", "pool_start", "(", "name", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "try", ":", "pool", "=", "conn", ".", "storagePoolLookupByName", "(", "name", ")", "return", "not", "bool", "(", "pool", "."...
Start a defined libvirt storage pool. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_start default
[ "Start", "a", "defined", "libvirt", "storage", "pool", "." ]
python
train
loli/medpy
medpy/features/texture.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/texture.py#L33-L126
def coarseness(image, voxelspacing = None, mask = slice(None)): r""" Takes a simple or multi-spectral image and returns the coarseness of the texture. Step1 At each pixel, compute six averages for the windows of size 2**k x 2**k, k=0,1,...,5, around the pixel. Step2 At each pixel, compute absolute differences E between the pairs of non overlapping averages in every directions. step3 At each pixel, find the value of k that maximises the difference Ek in either direction and set the best size Sbest=2**k step4 Compute the coarseness feature Fcrs by averaging Sbest over the entire image. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image or a slice object Returns ------- coarseness : float The size of coarseness of the given texture. It is basically the size of repeating elements in the image. See Also -------- """ # Step1: At each pixel (x,y), compute six averages for the windows # of size 2**k x 2**k, k=0,1,...,5, around the pixel. image = numpy.asarray(image, dtype=numpy.float32) # set default mask or apply given mask if not type(mask) is slice: if not type(mask[0] is slice): mask = numpy.array(mask, copy=False, dtype = numpy.bool) image = image[mask] # set default voxel spacing if not suppliec if None == voxelspacing: voxelspacing = tuple([1.] * image.ndim) if len(voxelspacing) != image.ndim: print("Voxel spacing and image dimensions do not fit.") return None # set padding for image border control padSize = numpy.asarray([(numpy.rint((2**5.0) * voxelspacing[jj]),0) for jj in range(image.ndim)]).astype(numpy.int) Apad = numpy.pad(image,pad_width=padSize, mode='reflect') # Allocate memory E = numpy.empty((6,image.ndim)+image.shape) # prepare some slicer rawSlicer = [slice(None)] * image.ndim slicerForImageInPad = [slice(padSize[d][0],None)for d in range(image.ndim)] for k in range(6): size_vs = tuple(numpy.rint((2**k) * voxelspacing[jj]) for jj in range(image.ndim)) A = uniform_filter(Apad, size = size_vs, mode = 'mirror') # Step2: At each pixel, compute absolute differences E(x,y) between # the pairs of non overlapping averages in the horizontal and vertical directions. for d in range(image.ndim): borders = numpy.rint((2**k) * voxelspacing[d]) slicerPad_k_d = slicerForImageInPad[:] slicerPad_k_d[d]= slice((padSize[d][0]-borders if borders < padSize[d][0] else 0),None) A_k_d = A[slicerPad_k_d] AslicerL = rawSlicer[:] AslicerL[d] = slice(0, -borders) AslicerR = rawSlicer[:] AslicerR[d] = slice(borders, None) E[k,d,...] = numpy.abs(A_k_d[AslicerL] - A_k_d[AslicerR]) # step3: At each pixel, find the value of k that maximises the difference Ek(x,y) # in either direction and set the best size Sbest(x,y)=2**k k_max = E.max(1).argmax(0) dim = E.argmax(1) dim_vox_space = numpy.asarray([voxelspacing[dim[k_max.flat[i]].flat[i]] for i in range(k_max.size)]).reshape(k_max.shape) S = (2**k_max) * dim_vox_space # step4: Compute the coarseness feature Fcrs by averaging Sbest(x,y) over the entire image. return S.mean()
[ "def", "coarseness", "(", "image", ",", "voxelspacing", "=", "None", ",", "mask", "=", "slice", "(", "None", ")", ")", ":", "# Step1: At each pixel (x,y), compute six averages for the windows", "# of size 2**k x 2**k, k=0,1,...,5, around the pixel.", "image", "=", "numpy",...
r""" Takes a simple or multi-spectral image and returns the coarseness of the texture. Step1 At each pixel, compute six averages for the windows of size 2**k x 2**k, k=0,1,...,5, around the pixel. Step2 At each pixel, compute absolute differences E between the pairs of non overlapping averages in every directions. step3 At each pixel, find the value of k that maximises the difference Ek in either direction and set the best size Sbest=2**k step4 Compute the coarseness feature Fcrs by averaging Sbest over the entire image. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image or a slice object Returns ------- coarseness : float The size of coarseness of the given texture. It is basically the size of repeating elements in the image. See Also --------
[ "r", "Takes", "a", "simple", "or", "multi", "-", "spectral", "image", "and", "returns", "the", "coarseness", "of", "the", "texture", ".", "Step1", "At", "each", "pixel", "compute", "six", "averages", "for", "the", "windows", "of", "size", "2", "**", "k",...
python
train
ronaldguillen/wave
wave/validators.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/validators.py#L95-L109
def enforce_required_fields(self, attrs): """ The `UniqueTogetherValidator` always forces an implied 'required' state on the fields it applies to. """ if self.instance is not None: return missing = { field_name: self.missing_message for field_name in self.fields if field_name not in attrs } if missing: raise ValidationError(missing)
[ "def", "enforce_required_fields", "(", "self", ",", "attrs", ")", ":", "if", "self", ".", "instance", "is", "not", "None", ":", "return", "missing", "=", "{", "field_name", ":", "self", ".", "missing_message", "for", "field_name", "in", "self", ".", "field...
The `UniqueTogetherValidator` always forces an implied 'required' state on the fields it applies to.
[ "The", "UniqueTogetherValidator", "always", "forces", "an", "implied", "required", "state", "on", "the", "fields", "it", "applies", "to", "." ]
python
train
redcap-tools/PyCap
redcap/project.py
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L354-L382
def backfill_fields(self, fields, forms): """ Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms """ if forms and not fields: new_fields = [self.def_field] elif fields and self.def_field not in fields: new_fields = list(fields) if self.def_field not in fields: new_fields.append(self.def_field) elif not fields: new_fields = self.field_names else: new_fields = list(fields) return new_fields
[ "def", "backfill_fields", "(", "self", ",", "fields", ",", "forms", ")", ":", "if", "forms", "and", "not", "fields", ":", "new_fields", "=", "[", "self", ".", "def_field", "]", "elif", "fields", "and", "self", ".", "def_field", "not", "in", "fields", "...
Properly backfill fields to explicitly request specific keys. The issue is that >6.X servers *only* return requested fields so to improve backwards compatiblity for PyCap clients, add specific fields when required. Parameters ---------- fields: list requested fields forms: list requested forms Returns ------- new fields, forms
[ "Properly", "backfill", "fields", "to", "explicitly", "request", "specific", "keys", ".", "The", "issue", "is", "that", ">", "6", ".", "X", "servers", "*", "only", "*", "return", "requested", "fields", "so", "to", "improve", "backwards", "compatiblity", "for...
python
train
TheGhouls/oct
oct/tools/results_to_csv.py
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/tools/results_to_csv.py#L8-L49
def to_csv(args): """Take a sqlite filled database of results and return a csv file :param str result_file: the path of the sqlite database :param str output_file: the path of the csv output file :param str delimiter: the desired delimiter for the output csv file """ result_file = args.result_file output_file = args.output_file delimiter = args.delimiter if not os.path.isfile(result_file): raise OSError("Results file does not exists") headers = ['elapsed', 'epoch', 'turret_name', 'scriptrun_time', 'error'] headers_row = {} set_database(result_file, db, {}) results = Result.select() for item in results: result_item = item.to_dict() for k in result_item['custom_timers'].keys(): if k not in headers: headers.append(k) headers_row[k] = k with open(output_file, "w+") as f: writer = csv.DictWriter(f, fieldnames=headers, delimiter=delimiter) headers_row.update({ 'elapsed': 'elapsed time', 'epoch': 'epoch (in seconds)', 'turret_name': 'turret name', 'scriptrun_time': 'transaction time', 'error': 'error' }) writer.writerow(headers_row) for result_item in results: line = result_item.to_dict() for key, value in line['custom_timers'].items(): line[key] = value del line['custom_timers'] writer.writerow(line)
[ "def", "to_csv", "(", "args", ")", ":", "result_file", "=", "args", ".", "result_file", "output_file", "=", "args", ".", "output_file", "delimiter", "=", "args", ".", "delimiter", "if", "not", "os", ".", "path", ".", "isfile", "(", "result_file", ")", ":...
Take a sqlite filled database of results and return a csv file :param str result_file: the path of the sqlite database :param str output_file: the path of the csv output file :param str delimiter: the desired delimiter for the output csv file
[ "Take", "a", "sqlite", "filled", "database", "of", "results", "and", "return", "a", "csv", "file" ]
python
train
NYUCCL/psiTurk
psiturk/experiment.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/experiment.py#L564-L593
def update(uid=None): """ Save experiment data, which should be a JSON object and will be stored after converting to string. """ app.logger.info("PUT /sync route with id: %s" % uid) try: user = Participant.query.\ filter(Participant.uniqueid == uid).\ one() except exc.SQLAlchemyError: app.logger.error("DB error: Unique user not found.") if hasattr(request, 'json'): user.datastring = request.data.decode('utf-8').encode( 'ascii', 'xmlcharrefreplace' ) db_session.add(user) db_session.commit() try: data = json.loads(user.datastring) except: data = {} trial = data.get("currenttrial", None) app.logger.info("saved data for %s (current trial: %s)", uid, trial) resp = {"status": "user data saved"} return jsonify(**resp)
[ "def", "update", "(", "uid", "=", "None", ")", ":", "app", ".", "logger", ".", "info", "(", "\"PUT /sync route with id: %s\"", "%", "uid", ")", "try", ":", "user", "=", "Participant", ".", "query", ".", "filter", "(", "Participant", ".", "uniqueid", "=="...
Save experiment data, which should be a JSON object and will be stored after converting to string.
[ "Save", "experiment", "data", "which", "should", "be", "a", "JSON", "object", "and", "will", "be", "stored", "after", "converting", "to", "string", "." ]
python
train
JelleAalbers/multihist
multihist.py
https://github.com/JelleAalbers/multihist/blob/072288277f807e7e388fdf424c3921c80576f3ab/multihist.py#L482-L486
def central_likelihood(self, axis): """Returns new histogram with all values replaced by their central likelihoods along axis.""" result = self.cumulative_density(axis) result.histogram = 1 - 2 * np.abs(result.histogram - 0.5) return result
[ "def", "central_likelihood", "(", "self", ",", "axis", ")", ":", "result", "=", "self", ".", "cumulative_density", "(", "axis", ")", "result", ".", "histogram", "=", "1", "-", "2", "*", "np", ".", "abs", "(", "result", ".", "histogram", "-", "0.5", "...
Returns new histogram with all values replaced by their central likelihoods along axis.
[ "Returns", "new", "histogram", "with", "all", "values", "replaced", "by", "their", "central", "likelihoods", "along", "axis", "." ]
python
train
django-extensions/django-extensions
django_extensions/management/commands/pipchecker.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/pipchecker.py#L116-L119
def _urlopen_as_json(self, url, headers=None): """Shorcut for return contents as json""" req = Request(url, headers=headers) return json.loads(urlopen(req).read())
[ "def", "_urlopen_as_json", "(", "self", ",", "url", ",", "headers", "=", "None", ")", ":", "req", "=", "Request", "(", "url", ",", "headers", "=", "headers", ")", "return", "json", ".", "loads", "(", "urlopen", "(", "req", ")", ".", "read", "(", ")...
Shorcut for return contents as json
[ "Shorcut", "for", "return", "contents", "as", "json" ]
python
train
oanda/v20-python
src/v20/instrument.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/instrument.py#L712-L816
def price( self, instrument, **kwargs ): """ Fetch a price for an instrument. Accounts are not associated in any way with this endpoint. Args: instrument: Name of the Instrument time: The time at which the desired price is in effect. The current price is returned if no time is provided. Returns: v20.response.Response containing the results from submitting the request """ request = Request( 'GET', '/v3/instruments/{instrument}/price' ) request.set_path_param( 'instrument', instrument ) request.set_param( 'time', kwargs.get('time') ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} # # Parse responses as defined by the API specification # if str(response.status) == "200": if jbody.get('price') is not None: parsed_body['price'] = \ self.ctx.pricing_common.Price.from_dict( jbody['price'], self.ctx ) elif str(response.status) == "400": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "404": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') # # Unexpected response status # else: parsed_body = jbody response.body = parsed_body return response
[ "def", "price", "(", "self", ",", "instrument", ",", "*", "*", "kwargs", ")", ":", "request", "=", "Request", "(", "'GET'", ",", "'/v3/instruments/{instrument}/price'", ")", "request", ".", "set_path_param", "(", "'instrument'", ",", "instrument", ")", "reques...
Fetch a price for an instrument. Accounts are not associated in any way with this endpoint. Args: instrument: Name of the Instrument time: The time at which the desired price is in effect. The current price is returned if no time is provided. Returns: v20.response.Response containing the results from submitting the request
[ "Fetch", "a", "price", "for", "an", "instrument", ".", "Accounts", "are", "not", "associated", "in", "any", "way", "with", "this", "endpoint", "." ]
python
train
klen/muffin-debugtoolbar
muffin_debugtoolbar/panels.py
https://github.com/klen/muffin-debugtoolbar/blob/b650b35fbe2035888f6bba5dac3073ef01c94dc6/muffin_debugtoolbar/panels.py#L57-L66
def render_content(self): """Render the panel's content.""" if not self.has_content: return "" template = self.template if isinstance(self.template, str): template = self.app.ps.jinja2.env.get_template(self.template) context = self.render_vars() content = template.render(app=self.app, request=self.request, **context) return content
[ "def", "render_content", "(", "self", ")", ":", "if", "not", "self", ".", "has_content", ":", "return", "\"\"", "template", "=", "self", ".", "template", "if", "isinstance", "(", "self", ".", "template", ",", "str", ")", ":", "template", "=", "self", "...
Render the panel's content.
[ "Render", "the", "panel", "s", "content", "." ]
python
train
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L2291-L2309
def get(method, hmc, uri, uri_parms, logon_required): """Operation: List Logical Partitions of CPC (empty result in DPM mode.""" cpc_oid = uri_parms[0] query_str = uri_parms[1] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) result_lpars = [] if not cpc.dpm_enabled: filter_args = parse_query_parms(method, uri, query_str) for lpar in cpc.lpars.list(filter_args): result_lpar = {} for prop in lpar.properties: if prop in ('object-uri', 'name', 'status'): result_lpar[prop] = lpar.properties[prop] result_lpars.append(result_lpar) return {'logical-partitions': result_lpars}
[ "def", "get", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "logon_required", ")", ":", "cpc_oid", "=", "uri_parms", "[", "0", "]", "query_str", "=", "uri_parms", "[", "1", "]", "try", ":", "cpc", "=", "hmc", ".", "cpcs", ".", "look...
Operation: List Logical Partitions of CPC (empty result in DPM mode.
[ "Operation", ":", "List", "Logical", "Partitions", "of", "CPC", "(", "empty", "result", "in", "DPM", "mode", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/models/classifiers/utils.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/classifiers/utils.py#L24-L49
def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray: """ Convert labels to one-hot vectors for multi-class multi-label classification Args: labels: list of samples where each sample is a class or a list of classes which sample belongs with classes: array of classes' names Returns: 2d array with one-hot representation of given samples """ n_classes = len(classes) y = [] for sample in labels: curr = np.zeros(n_classes) if isinstance(sample, list): for intent in sample: if intent not in classes: log.warning('Unknown intent {} detected. Assigning no class'.format(intent)) else: curr[np.where(np.array(classes) == intent)[0]] = 1 else: curr[np.where(np.array(classes) == sample)[0]] = 1 y.append(curr) y = np.asarray(y) return y
[ "def", "labels2onehot", "(", "labels", ":", "[", "List", "[", "str", "]", ",", "List", "[", "List", "[", "str", "]", "]", ",", "np", ".", "ndarray", "]", ",", "classes", ":", "[", "list", ",", "np", ".", "ndarray", "]", ")", "->", "np", ".", ...
Convert labels to one-hot vectors for multi-class multi-label classification Args: labels: list of samples where each sample is a class or a list of classes which sample belongs with classes: array of classes' names Returns: 2d array with one-hot representation of given samples
[ "Convert", "labels", "to", "one", "-", "hot", "vectors", "for", "multi", "-", "class", "multi", "-", "label", "classification" ]
python
test
addisonlynch/iexfinance
iexfinance/__init__.py
https://github.com/addisonlynch/iexfinance/blob/40f0bdcc51b329031d06178020fd774494250456/iexfinance/__init__.py#L202-L208
def get_stats_monthly(start=None, end=None, **kwargs): """ MOVED to iexfinance.iexdata.get_stats_summary """ import warnings warnings.warn(WNG_MSG % ("get_stats_monthly", "iexdata.get_stats_summary")) return MonthlySummaryReader(start=start, end=end, **kwargs).fetch()
[ "def", "get_stats_monthly", "(", "start", "=", "None", ",", "end", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "WNG_MSG", "%", "(", "\"get_stats_monthly\"", ",", "\"iexdata.get_stats_summary\"", ")", ")...
MOVED to iexfinance.iexdata.get_stats_summary
[ "MOVED", "to", "iexfinance", ".", "iexdata", ".", "get_stats_summary" ]
python
train
secdev/scapy
scapy/arch/windows/__init__.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/windows/__init__.py#L686-L694
def dev_from_name(self, name): """Return the first pcap device name for a given Windows device name. """ try: return next(iface for iface in six.itervalues(self) if (iface.name == name or iface.description == name)) except (StopIteration, RuntimeError): raise ValueError("Unknown network interface %r" % name)
[ "def", "dev_from_name", "(", "self", ",", "name", ")", ":", "try", ":", "return", "next", "(", "iface", "for", "iface", "in", "six", ".", "itervalues", "(", "self", ")", "if", "(", "iface", ".", "name", "==", "name", "or", "iface", ".", "description"...
Return the first pcap device name for a given Windows device name.
[ "Return", "the", "first", "pcap", "device", "name", "for", "a", "given", "Windows", "device", "name", "." ]
python
train
BYU-PCCL/holodeck
holodeck/agents.py
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/agents.py#L79-L98
def teleport(self, location=None, rotation=None): """Teleports the agent to a specific location, with a specific rotation. Args: location (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters. If None, keeps the current location. Defaults to None. rotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent. If None, keeps the current rotation. Defaults to None. Returns: None """ val = 0 if location is not None: val += 1 np.copyto(self._teleport_buffer, location) if rotation is not None: np.copyto(self._rotation_buffer, rotation) val += 2 self._teleport_bool_buffer[0] = val
[ "def", "teleport", "(", "self", ",", "location", "=", "None", ",", "rotation", "=", "None", ")", ":", "val", "=", "0", "if", "location", "is", "not", "None", ":", "val", "+=", "1", "np", ".", "copyto", "(", "self", ".", "_teleport_buffer", ",", "lo...
Teleports the agent to a specific location, with a specific rotation. Args: location (np.ndarray, optional): An array with three elements specifying the target world coordinate in meters. If None, keeps the current location. Defaults to None. rotation (np.ndarray, optional): An array with three elements specifying the target rotation of the agent. If None, keeps the current rotation. Defaults to None. Returns: None
[ "Teleports", "the", "agent", "to", "a", "specific", "location", "with", "a", "specific", "rotation", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/estimators/estimator_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/estimators/estimator_base.py#L325-L350
def fit(self, X, y=None, **params): """ Fit an H2O model as part of a scikit-learn pipeline or grid search. A warning will be issued if a caller other than sklearn attempts to use this method. :param H2OFrame X: An H2OFrame consisting of the predictor variables. :param H2OFrame y: An H2OFrame consisting of the response variable. :param params: Extra arguments. :returns: The current instance of H2OEstimator for method chaining. """ stk = inspect.stack()[1:] warn = True for s in stk: mod = inspect.getmodule(s[0]) if mod: warn = "sklearn" not in mod.__name__ if not warn: break if warn: warnings.warn("\n\n\t`fit` is not recommended outside of the sklearn framework. Use `train` instead.", UserWarning, stacklevel=2) training_frame = X.cbind(y) if y is not None else X x = X.names y = y.names[0] if y is not None else None self.train(x, y, training_frame, **params) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "params", ")", ":", "stk", "=", "inspect", ".", "stack", "(", ")", "[", "1", ":", "]", "warn", "=", "True", "for", "s", "in", "stk", ":", "mod", "=", "inspect", ".",...
Fit an H2O model as part of a scikit-learn pipeline or grid search. A warning will be issued if a caller other than sklearn attempts to use this method. :param H2OFrame X: An H2OFrame consisting of the predictor variables. :param H2OFrame y: An H2OFrame consisting of the response variable. :param params: Extra arguments. :returns: The current instance of H2OEstimator for method chaining.
[ "Fit", "an", "H2O", "model", "as", "part", "of", "a", "scikit", "-", "learn", "pipeline", "or", "grid", "search", "." ]
python
test
bram85/topydo
topydo/lib/ChangeSet.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/ChangeSet.py#L121-L134
def delete(self, p_timestamp=None, p_write=True): """ Removes backup from the backup file. """ timestamp = p_timestamp or self.timestamp index = self._get_index() try: del self.backup_dict[timestamp] index.remove(index[[change[0] for change in index].index(timestamp)]) self._save_index(index) if p_write: self._write() except KeyError: pass
[ "def", "delete", "(", "self", ",", "p_timestamp", "=", "None", ",", "p_write", "=", "True", ")", ":", "timestamp", "=", "p_timestamp", "or", "self", ".", "timestamp", "index", "=", "self", ".", "_get_index", "(", ")", "try", ":", "del", "self", ".", ...
Removes backup from the backup file.
[ "Removes", "backup", "from", "the", "backup", "file", "." ]
python
train
Autodesk/aomi
aomi/cli.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/cli.py#L403-L411
def do_thaw(client, args): """Execute the thaw operation, pulling in an actual Vault client if neccesary""" vault_client = None if args.gpg_pass_path: vault_client = client.connect(args) aomi.filez.thaw(vault_client, args.icefile, args) sys.exit(0)
[ "def", "do_thaw", "(", "client", ",", "args", ")", ":", "vault_client", "=", "None", "if", "args", ".", "gpg_pass_path", ":", "vault_client", "=", "client", ".", "connect", "(", "args", ")", "aomi", ".", "filez", ".", "thaw", "(", "vault_client", ",", ...
Execute the thaw operation, pulling in an actual Vault client if neccesary
[ "Execute", "the", "thaw", "operation", "pulling", "in", "an", "actual", "Vault", "client", "if", "neccesary" ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/interface.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/interface.py#L574-L593
def abort(self): """ Set abort. When Control-C has been pressed. """ on_abort = self.application.on_abort self._abort_flag = True self._redraw() if on_abort == AbortAction.RAISE_EXCEPTION: def keyboard_interrupt(): raise KeyboardInterrupt() self._set_return_callable(keyboard_interrupt) elif on_abort == AbortAction.RETRY: self.reset() self.renderer.request_absolute_cursor_position() self.current_buffer.reset() elif on_abort == AbortAction.RETURN_NONE: self.set_return_value(None)
[ "def", "abort", "(", "self", ")", ":", "on_abort", "=", "self", ".", "application", ".", "on_abort", "self", ".", "_abort_flag", "=", "True", "self", ".", "_redraw", "(", ")", "if", "on_abort", "==", "AbortAction", ".", "RAISE_EXCEPTION", ":", "def", "ke...
Set abort. When Control-C has been pressed.
[ "Set", "abort", ".", "When", "Control", "-", "C", "has", "been", "pressed", "." ]
python
train
sendwithus/sendwithus_python
sendwithus/__init__.py
https://github.com/sendwithus/sendwithus_python/blob/8ae50d514febd44f7d9be3c838b4d92f99412832/sendwithus/__init__.py#L470-L566
def send( self, email_id, recipient, email_data=None, sender=None, cc=None, bcc=None, tags=[], headers={}, esp_account=None, locale=None, email_version_name=None, inline=None, files=[], timeout=None ): """ API call to send an email """ if not email_data: email_data = {} # for backwards compatibility, will be removed if isinstance(recipient, string_types): warnings.warn( "Passing email directly for recipient is deprecated", DeprecationWarning) recipient = {'address': recipient} payload = { 'email_id': email_id, 'recipient': recipient, 'email_data': email_data } if sender: payload['sender'] = sender if cc: if not type(cc) == list: logger.error( 'kwarg cc must be type(list), got %s' % type(cc)) payload['cc'] = cc if bcc: if not type(bcc) == list: logger.error( 'kwarg bcc must be type(list), got %s' % type(bcc)) payload['bcc'] = bcc if tags: if not type(tags) == list: logger.error( 'kwarg tags must be type(list), got %s' % (type(tags))) payload['tags'] = tags if headers: if not type(headers) == dict: logger.error( 'kwarg headers must be type(dict), got %s' % ( type(headers) ) ) payload['headers'] = headers if esp_account: if not isinstance(esp_account, string_types): logger.error( 'kwarg esp_account must be a string, got %s' % ( type(esp_account) ) ) payload['esp_account'] = esp_account if locale: if not isinstance(locale, string_types): logger.error( 'kwarg locale must be a string, got %s' % (type(locale)) ) payload['locale'] = locale if email_version_name: if not isinstance(email_version_name, string_types): logger.error( 'kwarg email_version_name must be a string, got %s' % ( type(email_version_name))) payload['version_name'] = email_version_name if inline: payload['inline'] = self._make_file_dict(inline) if files: payload['files'] = [self._make_file_dict(f) for f in files] return self._api_request( self.SEND_ENDPOINT, self.HTTP_POST, payload=payload, timeout=timeout )
[ "def", "send", "(", "self", ",", "email_id", ",", "recipient", ",", "email_data", "=", "None", ",", "sender", "=", "None", ",", "cc", "=", "None", ",", "bcc", "=", "None", ",", "tags", "=", "[", "]", ",", "headers", "=", "{", "}", ",", "esp_accou...
API call to send an email
[ "API", "call", "to", "send", "an", "email" ]
python
valid
mikedh/trimesh
trimesh/base.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L2485-L2501
def intersection(self, other, engine=None): """ Boolean intersection between this mesh and n other meshes Parameters --------- other : trimesh.Trimesh, or list of trimesh.Trimesh objects Meshes to calculate intersections with Returns --------- intersection : trimesh.Trimesh Mesh of the volume contained by all passed meshes """ result = boolean.intersection(meshes=np.append(self, other), engine=engine) return result
[ "def", "intersection", "(", "self", ",", "other", ",", "engine", "=", "None", ")", ":", "result", "=", "boolean", ".", "intersection", "(", "meshes", "=", "np", ".", "append", "(", "self", ",", "other", ")", ",", "engine", "=", "engine", ")", "return...
Boolean intersection between this mesh and n other meshes Parameters --------- other : trimesh.Trimesh, or list of trimesh.Trimesh objects Meshes to calculate intersections with Returns --------- intersection : trimesh.Trimesh Mesh of the volume contained by all passed meshes
[ "Boolean", "intersection", "between", "this", "mesh", "and", "n", "other", "meshes" ]
python
train
elliterate/capybara.py
capybara/result.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/result.py#L54-L89
def compare_count(self): """ Returns how the result count compares to the query options. The return value is negative if too few results were found, zero if enough were found, and positive if too many were found. Returns: int: -1, 0, or 1. """ if self.query.options["count"] is not None: count_opt = int(self.query.options["count"]) self._cache_at_least(count_opt + 1) return cmp(len(self._result_cache), count_opt) if self.query.options["minimum"] is not None: min_opt = int(self.query.options["minimum"]) if not self._cache_at_least(min_opt): return -1 if self.query.options["maximum"] is not None: max_opt = int(self.query.options["maximum"]) if self._cache_at_least(max_opt + 1): return 1 if self.query.options["between"] is not None: between = self.query.options["between"] min_opt, max_opt = between[0], between[-1] if not self._cache_at_least(min_opt): return -1 if self._cache_at_least(max_opt + 1): return 1 return 0 return 0
[ "def", "compare_count", "(", "self", ")", ":", "if", "self", ".", "query", ".", "options", "[", "\"count\"", "]", "is", "not", "None", ":", "count_opt", "=", "int", "(", "self", ".", "query", ".", "options", "[", "\"count\"", "]", ")", "self", ".", ...
Returns how the result count compares to the query options. The return value is negative if too few results were found, zero if enough were found, and positive if too many were found. Returns: int: -1, 0, or 1.
[ "Returns", "how", "the", "result", "count", "compares", "to", "the", "query", "options", "." ]
python
test
saltstack/salt
salt/netapi/rest_tornado/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/__init__.py#L87-L133
def start(): ''' Start the saltnado! ''' mod_opts = __opts__.get(__virtualname__, {}) if 'num_processes' not in mod_opts: mod_opts['num_processes'] = 1 if mod_opts['num_processes'] > 1 and mod_opts.get('debug', False) is True: raise Exception(( 'Tornado\'s debug implementation is not compatible with multiprocess. ' 'Either disable debug, or set num_processes to 1.' )) # the kwargs for the HTTPServer kwargs = {} if not mod_opts.get('disable_ssl', False): if 'ssl_crt' not in mod_opts: log.error("Not starting '%s'. Options 'ssl_crt' and " "'ssl_key' are required if SSL is not disabled.", __name__) return None # cert is required, key may be optional # https://docs.python.org/2/library/ssl.html#ssl.wrap_socket ssl_opts = {'certfile': mod_opts['ssl_crt']} if mod_opts.get('ssl_key', False): ssl_opts.update({'keyfile': mod_opts['ssl_key']}) kwargs['ssl_options'] = ssl_opts import tornado.httpserver http_server = tornado.httpserver.HTTPServer(get_application(__opts__), **kwargs) try: http_server.bind(mod_opts['port'], address=mod_opts.get('address'), backlog=mod_opts.get('backlog', 128), ) http_server.start(mod_opts['num_processes']) except Exception: log.error('Rest_tornado unable to bind to port %s', mod_opts['port'], exc_info=True) raise SystemExit(1) try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: raise SystemExit(0)
[ "def", "start", "(", ")", ":", "mod_opts", "=", "__opts__", ".", "get", "(", "__virtualname__", ",", "{", "}", ")", "if", "'num_processes'", "not", "in", "mod_opts", ":", "mod_opts", "[", "'num_processes'", "]", "=", "1", "if", "mod_opts", "[", "'num_pro...
Start the saltnado!
[ "Start", "the", "saltnado!" ]
python
train
samastur/pyimagediet
pyimagediet/process.py
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L171-L192
def backup_file(filename, backup_ext): ''' Make a file backup if: - backup extension is defined - backup file does not exist yet If backup file exists: - do nothing if it is same - complain loudly otherwise Return file name of the back up if successful. None otherwise. ''' ext = backup_ext.strip(" .") if ext: backup_filename = ".".join([filename, ext]) if not exists(backup_filename): shutil.copyfile(filename, backup_filename) else: if not filecmp.cmp(filename, backup_filename): raise DietException('Cannot make backup because a different' 'file with that name already exists.') return backup_filename return None
[ "def", "backup_file", "(", "filename", ",", "backup_ext", ")", ":", "ext", "=", "backup_ext", ".", "strip", "(", "\" .\"", ")", "if", "ext", ":", "backup_filename", "=", "\".\"", ".", "join", "(", "[", "filename", ",", "ext", "]", ")", "if", "not", "...
Make a file backup if: - backup extension is defined - backup file does not exist yet If backup file exists: - do nothing if it is same - complain loudly otherwise Return file name of the back up if successful. None otherwise.
[ "Make", "a", "file", "backup", "if", ":", "-", "backup", "extension", "is", "defined", "-", "backup", "file", "does", "not", "exist", "yet", "If", "backup", "file", "exists", ":", "-", "do", "nothing", "if", "it", "is", "same", "-", "complain", "loudly...
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completerlib.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completerlib.py#L192-L221
def module_completion(line): """ Returns a list containing the completion possibilities for an import line. The line looks like this : 'import xml.d' 'from xml.dom import' """ words = line.split(' ') nwords = len(words) # from whatever <tab> -> 'import ' if nwords == 3 and words[0] == 'from': return ['import '] # 'from xy<tab>' or 'import xy<tab>' if nwords < 3 and (words[0] in ['import','from']) : if nwords == 1: return get_root_modules() mod = words[1].split('.') if len(mod) < 2: return get_root_modules() completion_list = try_import('.'.join(mod[:-1]), True) return ['.'.join(mod[:-1] + [el]) for el in completion_list] # 'from xyz import abc<tab>' if nwords >= 3 and words[0] == 'from': mod = words[1] return try_import(mod)
[ "def", "module_completion", "(", "line", ")", ":", "words", "=", "line", ".", "split", "(", "' '", ")", "nwords", "=", "len", "(", "words", ")", "# from whatever <tab> -> 'import '", "if", "nwords", "==", "3", "and", "words", "[", "0", "]", "==", "'from'...
Returns a list containing the completion possibilities for an import line. The line looks like this : 'import xml.d' 'from xml.dom import'
[ "Returns", "a", "list", "containing", "the", "completion", "possibilities", "for", "an", "import", "line", "." ]
python
test
smarie/python-parsyfiles
parsyfiles/type_inspection_tools.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/type_inspection_tools.py#L395-L409
def is_pep484_nonable(typ): """ Checks if a given type is nonable, meaning that it explicitly or implicitly declares a Union with NoneType. Nested TypeVars and Unions are supported. :param typ: :return: """ # TODO rely on typing_inspect if there is an answer to https://github.com/ilevkivskyi/typing_inspect/issues/14 if typ is type(None): return True elif is_typevar(typ) or is_union_type(typ): return any(is_pep484_nonable(tt) for tt in get_alternate_types_resolving_forwardref_union_and_typevar(typ)) else: return False
[ "def", "is_pep484_nonable", "(", "typ", ")", ":", "# TODO rely on typing_inspect if there is an answer to https://github.com/ilevkivskyi/typing_inspect/issues/14", "if", "typ", "is", "type", "(", "None", ")", ":", "return", "True", "elif", "is_typevar", "(", "typ", ")", "...
Checks if a given type is nonable, meaning that it explicitly or implicitly declares a Union with NoneType. Nested TypeVars and Unions are supported. :param typ: :return:
[ "Checks", "if", "a", "given", "type", "is", "nonable", "meaning", "that", "it", "explicitly", "or", "implicitly", "declares", "a", "Union", "with", "NoneType", ".", "Nested", "TypeVars", "and", "Unions", "are", "supported", "." ]
python
train
mbi/django-simple-captcha
captcha/fields.py
https://github.com/mbi/django-simple-captcha/blob/e96cd8f63e41e658d103d12d6486b34195aee555/captcha/fields.py#L127-L132
def get_context(self, name, value, attrs): """Add captcha specific variables to context.""" context = super(CaptchaTextInput, self).get_context(name, value, attrs) context['image'] = self.image_url() context['audio'] = self.audio_url() return context
[ "def", "get_context", "(", "self", ",", "name", ",", "value", ",", "attrs", ")", ":", "context", "=", "super", "(", "CaptchaTextInput", ",", "self", ")", ".", "get_context", "(", "name", ",", "value", ",", "attrs", ")", "context", "[", "'image'", "]", ...
Add captcha specific variables to context.
[ "Add", "captcha", "specific", "variables", "to", "context", "." ]
python
train
saltstack/salt
salt/modules/xbpspkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xbpspkg.py#L533-L575
def _locate_repo_files(repo, rewrite=False): ''' Find what file a repo is called in. Helper function for add_repo() and del_repo() repo url of the repo to locate (persistent). rewrite Whether to remove matching repository settings during this process. Returns a list of absolute paths. ''' ret_val = [] files = [] conf_dirs = ['/etc/xbps.d/', '/usr/share/xbps.d/'] name_glob = '*.conf' # Matches a line where first printing is "repository" and there is an equals # sign before the repo, an optional forwardslash at the end of the repo name, # and it's possible for there to be a comment after repository=repo regex = re.compile(r'\s*repository\s*=\s*'+repo+r'/?\s*(#.*)?$') for cur_dir in conf_dirs: files.extend(glob.glob(cur_dir+name_glob)) for filename in files: write_buff = [] with salt.utils.files.fopen(filename, 'r') as cur_file: for line in cur_file: if regex.match(salt.utils.stringutils.to_unicode(line)): ret_val.append(filename) else: write_buff.append(line) if rewrite and filename in ret_val: if write_buff: with salt.utils.files.fopen(filename, 'w') as rewrite_file: rewrite_file.writelines(write_buff) else: # Prune empty files os.remove(filename) return ret_val
[ "def", "_locate_repo_files", "(", "repo", ",", "rewrite", "=", "False", ")", ":", "ret_val", "=", "[", "]", "files", "=", "[", "]", "conf_dirs", "=", "[", "'/etc/xbps.d/'", ",", "'/usr/share/xbps.d/'", "]", "name_glob", "=", "'*.conf'", "# Matches a line where...
Find what file a repo is called in. Helper function for add_repo() and del_repo() repo url of the repo to locate (persistent). rewrite Whether to remove matching repository settings during this process. Returns a list of absolute paths.
[ "Find", "what", "file", "a", "repo", "is", "called", "in", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1036-L1059
def get_seh_chain(self): """ @rtype: list of tuple( int, int ) @return: List of structured exception handlers. Each SEH is represented as a tuple of two addresses: - Address of this SEH block - Address of the SEH callback function Do not confuse this with the contents of the SEH block itself, where the first member is a pointer to the B{next} block instead. @raise NotImplementedError: This method is only supported in 32 bits versions of Windows. """ seh_chain = list() try: process = self.get_process() seh = self.get_seh_chain_pointer() while seh != 0xFFFFFFFF: seh_func = process.read_pointer( seh + 4 ) seh_chain.append( (seh, seh_func) ) seh = process.read_pointer( seh ) except WindowsError: seh_chain.append( (seh, None) ) return seh_chain
[ "def", "get_seh_chain", "(", "self", ")", ":", "seh_chain", "=", "list", "(", ")", "try", ":", "process", "=", "self", ".", "get_process", "(", ")", "seh", "=", "self", ".", "get_seh_chain_pointer", "(", ")", "while", "seh", "!=", "0xFFFFFFFF", ":", "s...
@rtype: list of tuple( int, int ) @return: List of structured exception handlers. Each SEH is represented as a tuple of two addresses: - Address of this SEH block - Address of the SEH callback function Do not confuse this with the contents of the SEH block itself, where the first member is a pointer to the B{next} block instead. @raise NotImplementedError: This method is only supported in 32 bits versions of Windows.
[ "@rtype", ":", "list", "of", "tuple", "(", "int", "int", ")", "@return", ":", "List", "of", "structured", "exception", "handlers", ".", "Each", "SEH", "is", "represented", "as", "a", "tuple", "of", "two", "addresses", ":", "-", "Address", "of", "this", ...
python
train