repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/policy/policy_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/policy/policy_client.py#L127-L143
def get_policy_evaluation(self, project, evaluation_id): """GetPolicyEvaluation. [Preview API] Gets the present evaluation state of a policy. :param str project: Project ID or project name :param str evaluation_id: ID of the policy evaluation to be retrieved. :rtype: :class:`<PolicyEvaluationRecord> <azure.devops.v5_0.policy.models.PolicyEvaluationRecord>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if evaluation_id is not None: route_values['evaluationId'] = self._serialize.url('evaluation_id', evaluation_id, 'str') response = self._send(http_method='GET', location_id='46aecb7a-5d2c-4647-897b-0209505a9fe4', version='5.0-preview.1', route_values=route_values) return self._deserialize('PolicyEvaluationRecord', response)
[ "def", "get_policy_evaluation", "(", "self", ",", "project", ",", "evaluation_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", ...
GetPolicyEvaluation. [Preview API] Gets the present evaluation state of a policy. :param str project: Project ID or project name :param str evaluation_id: ID of the policy evaluation to be retrieved. :rtype: :class:`<PolicyEvaluationRecord> <azure.devops.v5_0.policy.models.PolicyEvaluationRecord>`
[ "GetPolicyEvaluation", ".", "[", "Preview", "API", "]", "Gets", "the", "present", "evaluation", "state", "of", "a", "policy", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "evaluation_id", ":", "ID...
python
train
numenta/htmresearch
htmresearch/frameworks/pytorch/duty_cycle_metrics.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/pytorch/duty_cycle_metrics.py#L55-L69
def plotDutyCycles(dutyCycle, filePath): """ Create plot showing histogram of duty cycles :param dutyCycle: (torch tensor) the duty cycle of each unit :param filePath: (str) Full filename of image file """ _,entropy = binaryEntropy(dutyCycle) bins = np.linspace(0.0, 0.3, 200) plt.hist(dutyCycle, bins, alpha=0.5, label='All cols') plt.title("Histogram of duty cycles, entropy=" + str(float(entropy))) plt.xlabel("Duty cycle") plt.ylabel("Number of units") plt.savefig(filePath) plt.close()
[ "def", "plotDutyCycles", "(", "dutyCycle", ",", "filePath", ")", ":", "_", ",", "entropy", "=", "binaryEntropy", "(", "dutyCycle", ")", "bins", "=", "np", ".", "linspace", "(", "0.0", ",", "0.3", ",", "200", ")", "plt", ".", "hist", "(", "dutyCycle", ...
Create plot showing histogram of duty cycles :param dutyCycle: (torch tensor) the duty cycle of each unit :param filePath: (str) Full filename of image file
[ "Create", "plot", "showing", "histogram", "of", "duty", "cycles" ]
python
train
quantumlib/Cirq
cirq/google/sim/xmon_simulator.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_simulator.py#L144-L159
def _run( self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int, ) -> Dict[str, List[np.ndarray]]: """See definition in `cirq.SimulatesSamples`.""" circuit = protocols.resolve_parameters(circuit, param_resolver) _verify_xmon_circuit(circuit) # Delegate to appropriate method based on contents. if circuit.are_all_measurements_terminal(): return self._run_sweep_sample(circuit, repetitions) else: return self._run_sweep_repeat(circuit, repetitions)
[ "def", "_run", "(", "self", ",", "circuit", ":", "circuits", ".", "Circuit", ",", "param_resolver", ":", "study", ".", "ParamResolver", ",", "repetitions", ":", "int", ",", ")", "->", "Dict", "[", "str", ",", "List", "[", "np", ".", "ndarray", "]", "...
See definition in `cirq.SimulatesSamples`.
[ "See", "definition", "in", "cirq", ".", "SimulatesSamples", "." ]
python
train
twisted/mantissa
xmantissa/people.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/people.py#L1122-L1160
def editPerson(self, person, nickname, edits): """ Change the name and contact information associated with the given L{Person}. @type person: L{Person} @param person: The person which will be modified. @type nickname: C{unicode} @param nickname: The new value for L{Person.name} @type edits: C{list} @param edits: list of tuples of L{IContactType} providers and corresponding L{ListChanges} objects or dictionaries of parameter values. """ for existing in self.store.query(Person, Person.name == nickname): if existing is person: continue raise ValueError( "A person with the name %r exists already." % (nickname,)) oldname = person.name person.name = nickname self._callOnOrganizerPlugins('personNameChanged', person, oldname) for contactType, submission in edits: if contactType.allowMultipleContactItems: for edit in submission.edit: self.editContactItem( contactType, edit.object, edit.values) for create in submission.create: create.setter( self.createContactItem( contactType, person, create.values)) for delete in submission.delete: delete.deleteFromStore() else: (contactItem,) = contactType.getContactItems(person) self.editContactItem( contactType, contactItem, submission)
[ "def", "editPerson", "(", "self", ",", "person", ",", "nickname", ",", "edits", ")", ":", "for", "existing", "in", "self", ".", "store", ".", "query", "(", "Person", ",", "Person", ".", "name", "==", "nickname", ")", ":", "if", "existing", "is", "per...
Change the name and contact information associated with the given L{Person}. @type person: L{Person} @param person: The person which will be modified. @type nickname: C{unicode} @param nickname: The new value for L{Person.name} @type edits: C{list} @param edits: list of tuples of L{IContactType} providers and corresponding L{ListChanges} objects or dictionaries of parameter values.
[ "Change", "the", "name", "and", "contact", "information", "associated", "with", "the", "given", "L", "{", "Person", "}", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L1250-L1278
def validate_rmq_cluster_running_nodes(self, sentry_units): """Check that all rmq unit hostnames are represented in the cluster_status output of all units. :param host_names: dict of juju unit names to host names :param units: list of sentry unit pointers (all rmq units) :returns: None if successful, otherwise return error message """ host_names = self.get_unit_hostnames(sentry_units) errors = [] # Query every unit for cluster_status running nodes for query_unit in sentry_units: query_unit_name = query_unit.info['unit_name'] running_nodes = self.get_rmq_cluster_running_nodes(query_unit) # Confirm that every unit is represented in the queried unit's # cluster_status running nodes output. for validate_unit in sentry_units: val_host_name = host_names[validate_unit.info['unit_name']] val_node_name = 'rabbit@{}'.format(val_host_name) if val_node_name not in running_nodes: errors.append('Cluster member check failed on {}: {} not ' 'in {}\n'.format(query_unit_name, val_node_name, running_nodes)) if errors: return ''.join(errors)
[ "def", "validate_rmq_cluster_running_nodes", "(", "self", ",", "sentry_units", ")", ":", "host_names", "=", "self", ".", "get_unit_hostnames", "(", "sentry_units", ")", "errors", "=", "[", "]", "# Query every unit for cluster_status running nodes", "for", "query_unit", ...
Check that all rmq unit hostnames are represented in the cluster_status output of all units. :param host_names: dict of juju unit names to host names :param units: list of sentry unit pointers (all rmq units) :returns: None if successful, otherwise return error message
[ "Check", "that", "all", "rmq", "unit", "hostnames", "are", "represented", "in", "the", "cluster_status", "output", "of", "all", "units", "." ]
python
train
pandas-dev/pandas
pandas/core/nanops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L443-L491
def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
[ "def", "nanmean", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", ",", "mask", ",", "dtype", ",", "dtype_max", ",", "_", "=", "_get_values", "(", "values", ",", "skipna", ",", "0", ...
Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5
[ "Compute", "the", "mean", "of", "the", "element", "along", "an", "axis", "ignoring", "NaNs" ]
python
train
libtcod/python-tcod
tdl/event.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tdl/event.py#L291-L303
def run(self): """Delegate control over to this App instance. This function will process all events and send them to the special methods ev_* and key_*. A call to :any:`App.suspend` will return the control flow back to where this function is called. And then the App can be run again. But a single App instance can not be run multiple times simultaneously. """ if getattr(self, '_App__running', False): raise _tdl.TDLError('An App can not be run multiple times simultaneously') self.__running = True while self.__running: self.runOnce()
[ "def", "run", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'_App__running'", ",", "False", ")", ":", "raise", "_tdl", ".", "TDLError", "(", "'An App can not be run multiple times simultaneously'", ")", "self", ".", "__running", "=", "True", "whil...
Delegate control over to this App instance. This function will process all events and send them to the special methods ev_* and key_*. A call to :any:`App.suspend` will return the control flow back to where this function is called. And then the App can be run again. But a single App instance can not be run multiple times simultaneously.
[ "Delegate", "control", "over", "to", "this", "App", "instance", ".", "This", "function", "will", "process", "all", "events", "and", "send", "them", "to", "the", "special", "methods", "ev_", "*", "and", "key_", "*", "." ]
python
train
SuperCowPowers/workbench
workbench/workers/rekall_adapter/rekall_adapter.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/rekall_adapter/rekall_adapter.py#L147-L150
def open(self, directory=None, filename=None, mode="rb"): """Opens a file for writing or reading.""" path = os.path.join(directory, filename) return open(path, mode)
[ "def", "open", "(", "self", ",", "directory", "=", "None", ",", "filename", "=", "None", ",", "mode", "=", "\"rb\"", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "return", "open", "(", "path", ",", ...
Opens a file for writing or reading.
[ "Opens", "a", "file", "for", "writing", "or", "reading", "." ]
python
train
wbond/oscrypto
oscrypto/_win/tls.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_win/tls.py#L1258-L1318
def write(self, data): """ Writes data to the TLS-wrapped socket :param data: A byte string to write to the socket :raises: socket.socket - when a non-TLS socket error occurs oscrypto.errors.TLSError - when a TLS-related error occurs ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library """ if self._context_handle_pointer is None: self._raise_closed() if not self._encrypt_data_buffer: self._encrypt_data_buffer = buffer_from_bytes(self._header_size + self._message_size + self._trailer_size) self._encrypt_desc, self._encrypt_buffers = self._create_buffers(4) self._encrypt_buffers[0].BufferType = Secur32Const.SECBUFFER_STREAM_HEADER self._encrypt_buffers[0].cbBuffer = self._header_size self._encrypt_buffers[0].pvBuffer = cast(secur32, 'BYTE *', self._encrypt_data_buffer) self._encrypt_buffers[1].BufferType = Secur32Const.SECBUFFER_DATA self._encrypt_buffers[1].pvBuffer = ref(self._encrypt_data_buffer, self._header_size) self._encrypt_buffers[2].BufferType = Secur32Const.SECBUFFER_STREAM_TRAILER self._encrypt_buffers[2].cbBuffer = self._trailer_size self._encrypt_buffers[2].pvBuffer = ref(self._encrypt_data_buffer, self._header_size + self._message_size) while len(data) > 0: to_write = min(len(data), self._message_size) write_to_buffer(self._encrypt_data_buffer, data[0:to_write], self._header_size) self._encrypt_buffers[1].cbBuffer = to_write self._encrypt_buffers[2].pvBuffer = ref(self._encrypt_data_buffer, self._header_size + to_write) result = secur32.EncryptMessage( self._context_handle_pointer, 0, self._encrypt_desc, 0 ) if result != Secur32Const.SEC_E_OK: handle_error(result, TLSError) to_send = native(int, self._encrypt_buffers[0].cbBuffer) to_send += native(int, self._encrypt_buffers[1].cbBuffer) to_send += native(int, self._encrypt_buffers[2].cbBuffer) try: self._socket.send(bytes_from_buffer(self._encrypt_data_buffer, to_send)) except (socket_.error) as e: if e.errno == 10053: raise_disconnection() raise data = data[to_send:]
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "self", ".", "_context_handle_pointer", "is", "None", ":", "self", ".", "_raise_closed", "(", ")", "if", "not", "self", ".", "_encrypt_data_buffer", ":", "self", ".", "_encrypt_data_buffer", "=", "b...
Writes data to the TLS-wrapped socket :param data: A byte string to write to the socket :raises: socket.socket - when a non-TLS socket error occurs oscrypto.errors.TLSError - when a TLS-related error occurs ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library
[ "Writes", "data", "to", "the", "TLS", "-", "wrapped", "socket" ]
python
valid
inasafe/inasafe
safe/gui/tools/wizard/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/utilities.py#L251-L265
def get_image_path(definition): """Helper to get path of image from a definition in resource directory. :param definition: A definition (hazard, exposure). :type definition: dict :returns: The definition's image path. :rtype: str """ path = resources_path( 'img', 'wizard', 'keyword-subcategory-%s.svg' % definition['key']) if os.path.exists(path): return path else: return not_set_image_path
[ "def", "get_image_path", "(", "definition", ")", ":", "path", "=", "resources_path", "(", "'img'", ",", "'wizard'", ",", "'keyword-subcategory-%s.svg'", "%", "definition", "[", "'key'", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":...
Helper to get path of image from a definition in resource directory. :param definition: A definition (hazard, exposure). :type definition: dict :returns: The definition's image path. :rtype: str
[ "Helper", "to", "get", "path", "of", "image", "from", "a", "definition", "in", "resource", "directory", "." ]
python
train
fedora-python/pyp2rpm
pyp2rpm/metadata_extractors.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/metadata_extractors.py#L498-L515
def sphinx_dir(self): """Returns directory with sphinx documentation, if there is such. Returns: Full path to sphinx documentation dir inside the archive, or None if there is no such. """ # search for sphinx dir doc/ or docs/ under the first directory in # archive (e.g. spam-1.0.0/doc) candidate_dirs = self.archive.get_directories_re( settings.SPHINX_DIR_RE, full_path=True) # search for conf.py in the dirs (TODO: what if more are found?) for directory in candidate_dirs: contains_conf_py = self.archive.get_files_re( r'{0}/conf.py$'.format(re.escape(directory)), full_path=True) in_tests = 'tests' in directory.split(os.sep) if contains_conf_py and not in_tests: return directory
[ "def", "sphinx_dir", "(", "self", ")", ":", "# search for sphinx dir doc/ or docs/ under the first directory in", "# archive (e.g. spam-1.0.0/doc)", "candidate_dirs", "=", "self", ".", "archive", ".", "get_directories_re", "(", "settings", ".", "SPHINX_DIR_RE", ",", "full_pat...
Returns directory with sphinx documentation, if there is such. Returns: Full path to sphinx documentation dir inside the archive, or None if there is no such.
[ "Returns", "directory", "with", "sphinx", "documentation", "if", "there", "is", "such", ".", "Returns", ":", "Full", "path", "to", "sphinx", "documentation", "dir", "inside", "the", "archive", "or", "None", "if", "there", "is", "no", "such", "." ]
python
train
mozilla/funfactory
funfactory/utils.py
https://github.com/mozilla/funfactory/blob/c9bbf1c534eaa15641265bc75fa87afca52b7dd6/funfactory/utils.py#L9-L23
def absolutify(url): """Takes a URL and prepends the SITE_URL""" site_url = getattr(settings, 'SITE_URL', False) # If we don't define it explicitly if not site_url: protocol = settings.PROTOCOL hostname = settings.DOMAIN port = settings.PORT if (protocol, port) in (('https://', 443), ('http://', 80)): site_url = ''.join(map(str, (protocol, hostname))) else: site_url = ''.join(map(str, (protocol, hostname, ':', port))) return site_url + url
[ "def", "absolutify", "(", "url", ")", ":", "site_url", "=", "getattr", "(", "settings", ",", "'SITE_URL'", ",", "False", ")", "# If we don't define it explicitly", "if", "not", "site_url", ":", "protocol", "=", "settings", ".", "PROTOCOL", "hostname", "=", "se...
Takes a URL and prepends the SITE_URL
[ "Takes", "a", "URL", "and", "prepends", "the", "SITE_URL" ]
python
train
Gorialis/jishaku
jishaku/paginators.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/paginators.py#L72-L84
def pages(self): """ Returns the paginator's pages without prematurely closing the active page. """ # protected access has to be permitted here to not close the paginator's pages # pylint: disable=protected-access paginator_pages = list(self.paginator._pages) if len(self.paginator._current_page) > 1: paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or '')) # pylint: enable=protected-access return paginator_pages
[ "def", "pages", "(", "self", ")", ":", "# protected access has to be permitted here to not close the paginator's pages", "# pylint: disable=protected-access", "paginator_pages", "=", "list", "(", "self", ".", "paginator", ".", "_pages", ")", "if", "len", "(", "self", ".",...
Returns the paginator's pages without prematurely closing the active page.
[ "Returns", "the", "paginator", "s", "pages", "without", "prematurely", "closing", "the", "active", "page", "." ]
python
train
walchko/update
update/update.py
https://github.com/walchko/update/blob/1619593571a091893a66fbd2d34f87382d9a03d5/update/update.py#L132-L143
def kernel(): """ Handle linux kernel update """ print('================================') print(' WARNING: upgrading the kernel') print('================================') time.sleep(5) print('-[kernel]----------') cmd('rpi-update', True) print(' >> You MUST reboot to load the new kernel <<')
[ "def", "kernel", "(", ")", ":", "print", "(", "'================================'", ")", "print", "(", "' WARNING: upgrading the kernel'", ")", "print", "(", "'================================'", ")", "time", ".", "sleep", "(", "5", ")", "print", "(", "'-[kernel]--...
Handle linux kernel update
[ "Handle", "linux", "kernel", "update" ]
python
train
kristianfoerster/melodist
melodist/temperature.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L215-L235
def get_shift_by_data(temp_hourly, lon, lat, time_zone): '''function to get max temp shift (monthly) by hourly data Parameters ---- hourly_data_obs : observed hourly data lat : latitude in DezDeg lon : longitude in DezDeg time_zone: timezone ''' daily_index = temp_hourly.resample('D').mean().index sun_times = melodist.util.get_sun_times(daily_index, lon, lat, time_zone) idxmax = temp_hourly.groupby(temp_hourly.index.date).idxmax() idxmax.index = pd.to_datetime(idxmax.index) max_temp_hour_obs = idxmax.dropna().apply(lambda d: d.hour) max_temp_hour_pot = sun_times.sunnoon max_delta = max_temp_hour_obs - max_temp_hour_pot mean_monthly_delta = max_delta.groupby(max_delta.index.month).mean() return mean_monthly_delta
[ "def", "get_shift_by_data", "(", "temp_hourly", ",", "lon", ",", "lat", ",", "time_zone", ")", ":", "daily_index", "=", "temp_hourly", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ".", "index", "sun_times", "=", "melodist", ".", "util", ".", ...
function to get max temp shift (monthly) by hourly data Parameters ---- hourly_data_obs : observed hourly data lat : latitude in DezDeg lon : longitude in DezDeg time_zone: timezone
[ "function", "to", "get", "max", "temp", "shift", "(", "monthly", ")", "by", "hourly", "data", "Parameters", "----", "hourly_data_obs", ":", "observed", "hourly", "data", "lat", ":", "latitude", "in", "DezDeg", "lon", ":", "longitude", "in", "DezDeg", "time_z...
python
train
hotdoc/hotdoc
hotdoc/core/symbols.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/symbols.py#L94-L114
def update_children_comments(self): """ Banana banana """ if self.comment is None: return for sym in self.get_children_symbols(): if isinstance(sym, ParameterSymbol): sym.comment = self.comment.params.get(sym.argname) elif isinstance(sym, FieldSymbol): if not sym.comment or not sym.comment.description: sym.comment = self.comment.params.get(sym.member_name) elif isinstance(sym, EnumMemberSymbol): if not sym.comment or not sym.comment.description: sym.comment = self.comment.params.get(sym.unique_name) elif isinstance(sym, ReturnItemSymbol): tag = self.comment.tags.get('returns') sym.comment = comment_from_tag(tag) elif type(sym) == Symbol: sym.comment = self.comment.params.get(sym.display_name)
[ "def", "update_children_comments", "(", "self", ")", ":", "if", "self", ".", "comment", "is", "None", ":", "return", "for", "sym", "in", "self", ".", "get_children_symbols", "(", ")", ":", "if", "isinstance", "(", "sym", ",", "ParameterSymbol", ")", ":", ...
Banana banana
[ "Banana", "banana" ]
python
train
mrname/haralyzer
haralyzer/assets.py
https://github.com/mrname/haralyzer/blob/5ef38b8cfc044d2dfeacf2dd4d1efb810228309d/haralyzer/assets.py#L266-L274
def _get_asset_size_trans(self, asset_type): """ Helper function to dynamically create *_size properties. """ if asset_type == 'page': assets = self.entries else: assets = getattr(self, '{0}_files'.format(asset_type), None) return self.get_total_size_trans(assets)
[ "def", "_get_asset_size_trans", "(", "self", ",", "asset_type", ")", ":", "if", "asset_type", "==", "'page'", ":", "assets", "=", "self", ".", "entries", "else", ":", "assets", "=", "getattr", "(", "self", ",", "'{0}_files'", ".", "format", "(", "asset_typ...
Helper function to dynamically create *_size properties.
[ "Helper", "function", "to", "dynamically", "create", "*", "_size", "properties", "." ]
python
train
inveniosoftware/invenio-oauth2server
invenio_oauth2server/ext.py
https://github.com/inveniosoftware/invenio-oauth2server/blob/7033d3495c1a2b830e101e43918e92a37bbb49f2/invenio_oauth2server/ext.py#L202-L214
def init_app(self, app, **kwargs): """Flask application initialization. :param app: An instance of :class:`flask.Flask`. """ self.init_config(app) allowed_urlencode_chars = app.config.get( 'OAUTH2SERVER_ALLOWED_URLENCODE_CHARACTERS') if allowed_urlencode_chars: InvenioOAuth2ServerREST.monkeypatch_oauthlib_urlencode_chars( allowed_urlencode_chars) app.before_request(verify_oauth_token_and_set_current_user)
[ "def", "init_app", "(", "self", ",", "app", ",", "*", "*", "kwargs", ")", ":", "self", ".", "init_config", "(", "app", ")", "allowed_urlencode_chars", "=", "app", ".", "config", ".", "get", "(", "'OAUTH2SERVER_ALLOWED_URLENCODE_CHARACTERS'", ")", "if", "allo...
Flask application initialization. :param app: An instance of :class:`flask.Flask`.
[ "Flask", "application", "initialization", "." ]
python
train
Crunch-io/crunch-cube
src/cr/cube/dimension.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/dimension.py#L753-L763
def addend_ids(self): """tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded. """ return tuple( arg for arg in self._subtotal_dict.get("args", []) if arg in self.valid_elements.element_ids )
[ "def", "addend_ids", "(", "self", ")", ":", "return", "tuple", "(", "arg", "for", "arg", "in", "self", ".", "_subtotal_dict", ".", "get", "(", "\"args\"", ",", "[", "]", ")", "if", "arg", "in", "self", ".", "valid_elements", ".", "element_ids", ")" ]
tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded.
[ "tuple", "of", "int", "ids", "of", "elements", "contributing", "to", "this", "subtotal", "." ]
python
train
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/ssh.py#L197-L206
def remove(self, path, recursive=True): """ Remove file or directory at location `path`. """ if recursive: cmd = ["rm", "-r", path] else: cmd = ["rm", path] self.remote_context.check_output(cmd)
[ "def", "remove", "(", "self", ",", "path", ",", "recursive", "=", "True", ")", ":", "if", "recursive", ":", "cmd", "=", "[", "\"rm\"", ",", "\"-r\"", ",", "path", "]", "else", ":", "cmd", "=", "[", "\"rm\"", ",", "path", "]", "self", ".", "remote...
Remove file or directory at location `path`.
[ "Remove", "file", "or", "directory", "at", "location", "path", "." ]
python
train
joealcorn/xbox
xbox/vendor/requests/packages/urllib3/_collections.py
https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/_collections.py#L151-L160
def add(self, key, value): """Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz' """ self._data.setdefault(key.lower(), []).append((key, value))
[ "def", "add", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_data", ".", "setdefault", "(", "key", ".", "lower", "(", ")", ",", "[", "]", ")", ".", "append", "(", "(", "key", ",", "value", ")", ")" ]
Adds a (name, value) pair, doesn't overwrite the value if it already exists. >>> headers = HTTPHeaderDict(foo='bar') >>> headers.add('Foo', 'baz') >>> headers['foo'] 'bar, baz'
[ "Adds", "a", "(", "name", "value", ")", "pair", "doesn", "t", "overwrite", "the", "value", "if", "it", "already", "exists", "." ]
python
train
bitlabstudio/python-server-metrics
server_metrics/memcached.py
https://github.com/bitlabstudio/python-server-metrics/blob/5f61e02db549a727d3d1d8e11ad2672728da3c4a/server_metrics/memcached.py#L8-L27
def get_memcached_usage(socket=None): """ Returns memcached statistics. :param socket: Path to memcached's socket file. """ cmd = 'echo \'stats\' | nc -U {0}'.format(socket) output = getoutput(cmd) curr_items = None bytes_ = None rows = output.split('\n')[:-1] for row in rows: row = row.split() if row[1] == 'curr_items': curr_items = int(row[2]) if row[1] == 'bytes': bytes_ = int(row[2]) return (bytes_, curr_items)
[ "def", "get_memcached_usage", "(", "socket", "=", "None", ")", ":", "cmd", "=", "'echo \\'stats\\' | nc -U {0}'", ".", "format", "(", "socket", ")", "output", "=", "getoutput", "(", "cmd", ")", "curr_items", "=", "None", "bytes_", "=", "None", "rows", "=", ...
Returns memcached statistics. :param socket: Path to memcached's socket file.
[ "Returns", "memcached", "statistics", "." ]
python
train
xenon-middleware/pyxenon
xenon/server.py
https://github.com/xenon-middleware/pyxenon/blob/d61109ad339ee9bb9f0723471d532727b0f235ad/xenon/server.py#L116-L151
def init(port=None, do_not_exit=False, disable_tls=False, log_level='WARNING'): """Start the Xenon GRPC server on the specified port, or, if a service is already running on that port, connect to that. If no port is given, a random port is selected. This means that, by default, every python instance will start its own instance of a xenon-grpc process. :param port: the port number :param do_not_exit: by default the GRPC server is shut down after Python exits (through the `atexit` module), setting this value to `True` will prevent that from happening.""" logger = logging.getLogger('xenon') logger.setLevel(logging.INFO) logger_handler = logging.StreamHandler() logger_handler.setFormatter(logging.Formatter(style='{')) logger_handler.setLevel(getattr(logging, log_level)) logger.addHandler(logger_handler) if port is None: port = find_free_port() if __server__.process is not None: logger.warning( "You tried to run init(), but the server is already running.") return __server__ __server__.port = port __server__.disable_tls = disable_tls __server__.__enter__() if not do_not_exit: atexit.register(__server__.__exit__, None, None, None) return __server__
[ "def", "init", "(", "port", "=", "None", ",", "do_not_exit", "=", "False", ",", "disable_tls", "=", "False", ",", "log_level", "=", "'WARNING'", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'xenon'", ")", "logger", ".", "setLevel", "(", ...
Start the Xenon GRPC server on the specified port, or, if a service is already running on that port, connect to that. If no port is given, a random port is selected. This means that, by default, every python instance will start its own instance of a xenon-grpc process. :param port: the port number :param do_not_exit: by default the GRPC server is shut down after Python exits (through the `atexit` module), setting this value to `True` will prevent that from happening.
[ "Start", "the", "Xenon", "GRPC", "server", "on", "the", "specified", "port", "or", "if", "a", "service", "is", "already", "running", "on", "that", "port", "connect", "to", "that", "." ]
python
train
robotools/fontParts
Lib/fontParts/base/glyph.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/glyph.py#L650-L671
def clear(self, contours=True, components=True, anchors=True, guidelines=True, image=True): """ Clear the glyph. >>> glyph.clear() This clears: - contours - components - anchors - guidelines - image It's possible to turn off the clearing of portions of the glyph with the listed arguments. >>> glyph.clear(guidelines=False) """ self._clear(contours=contours, components=components, anchors=anchors, guidelines=guidelines, image=image)
[ "def", "clear", "(", "self", ",", "contours", "=", "True", ",", "components", "=", "True", ",", "anchors", "=", "True", ",", "guidelines", "=", "True", ",", "image", "=", "True", ")", ":", "self", ".", "_clear", "(", "contours", "=", "contours", ",",...
Clear the glyph. >>> glyph.clear() This clears: - contours - components - anchors - guidelines - image It's possible to turn off the clearing of portions of the glyph with the listed arguments. >>> glyph.clear(guidelines=False)
[ "Clear", "the", "glyph", "." ]
python
train
bitesofcode/projexui
projexui/views/xscriptview.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/views/xscriptview.py#L67-L74
def saveXml(self, xml): """ Saves this view's content to XML. :param xml | <str> """ xscript = ElementTree.SubElement(xml, 'script') xscript.text = escape(self._edit.toPlainText())
[ "def", "saveXml", "(", "self", ",", "xml", ")", ":", "xscript", "=", "ElementTree", ".", "SubElement", "(", "xml", ",", "'script'", ")", "xscript", ".", "text", "=", "escape", "(", "self", ".", "_edit", ".", "toPlainText", "(", ")", ")" ]
Saves this view's content to XML. :param xml | <str>
[ "Saves", "this", "view", "s", "content", "to", "XML", ".", ":", "param", "xml", "|", "<str", ">" ]
python
train
DinoTools/python-overpy
overpy/__init__.py
https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1537-L1542
def _handle_end_area(self): """ Handle closing area element """ self._result.append(Area(result=self._result, **self._curr)) self._curr = {}
[ "def", "_handle_end_area", "(", "self", ")", ":", "self", ".", "_result", ".", "append", "(", "Area", "(", "result", "=", "self", ".", "_result", ",", "*", "*", "self", ".", "_curr", ")", ")", "self", ".", "_curr", "=", "{", "}" ]
Handle closing area element
[ "Handle", "closing", "area", "element" ]
python
train
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L3197-L3328
def default_privileges_grant(name, object_name, object_type, defprivileges=None, grant_option=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2019.0.0 Grant default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_grant user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role to which default privileges should be granted object_name Name of the object on which the grant is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to grant, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the recipient of the default privilege can in turn grant it to others prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, pdefrivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if has_default_privileges(name, object_name, object_type, defprivileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info('The object: %s of type: %s already has default privileges: %s set', object_name, object_type, defprivileges) return False _grants = ','.join(_defprivs) if object_type in ['table', 'sequence']: on_part = '{0}."{1}"'.format(prepend, object_name) elif object_type == 'function': on_part = '{0}'.format(object_name) else: on_part = '"{0}"'.format(object_name) if grant_option: if object_type == 'group': query = ' ALTER DEFAULT PRIVILEGES GRANT {0} TO "{1}" WITH ADMIN OPTION'.format( object_name, name) elif (object_type in ('table', 'sequence', 'function') and object_name.upper() == 'ALL'): query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO ' \ '"{3}" WITH GRANT OPTION'.format( _grants, object_type.upper(), prepend, name) else: query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}" WITH GRANT OPTION'.format( _grants, object_type.upper(), on_part, name) else: if object_type == 'group': query = 'ALTER DEFAULT PRIVILEGES GRANT {0} TO "{1}"'.format(object_name, name) elif (object_type in ('table', 'sequence') and object_name.upper() == 'ALL'): query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}"'.format( _grants, object_type.upper(), prepend, name) else: query = ' ALTER DEFAULT PRIVILEGES IN SCHEMA {2} GRANT {0} ON {1}S TO "{3}"'.format( _grants, object_type.upper(), prepend, name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
[ "def", "default_privileges_grant", "(", "name", ",", "object_name", ",", "object_type", ",", "defprivileges", "=", "None", ",", "grant_option", "=", "None", ",", "prepend", "=", "'public'", ",", "maintenance_db", "=", "None", ",", "user", "=", "None", ",", "...
.. versionadded:: 2019.0.0 Grant default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_grant user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role to which default privileges should be granted object_name Name of the object on which the grant is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to grant, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL grant_option If grant_option is set to True, the recipient of the default privilege can in turn grant it to others prepend Table and Sequence object types live under a schema so this should be provided if the object is not under the default `public` schema maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
[ "..", "versionadded", "::", "2019", ".", "0", ".", "0" ]
python
train
ic-labs/django-icekit
icekit/publishing/managers.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/managers.py#L315-L330
def only(self, *args, **kwargs): """ Override default implementation to ensure that we *always* include the `publishing_is_draft` field when `only` is invoked, to avoid eternal recursion errors if `only` is called then we check for this item attribute in our custom `iterator`. Discovered the need for this by tracking down an eternal recursion error in the `only` query performed in fluent_pages.urlresolvers._get_pages_of_type """ field_names = args if 'publishing_is_draft' not in field_names: field_names += ('publishing_is_draft',) return super(PublishingQuerySet, self) \ .only(*field_names, **kwargs)
[ "def", "only", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "field_names", "=", "args", "if", "'publishing_is_draft'", "not", "in", "field_names", ":", "field_names", "+=", "(", "'publishing_is_draft'", ",", ")", "return", "super", "("...
Override default implementation to ensure that we *always* include the `publishing_is_draft` field when `only` is invoked, to avoid eternal recursion errors if `only` is called then we check for this item attribute in our custom `iterator`. Discovered the need for this by tracking down an eternal recursion error in the `only` query performed in fluent_pages.urlresolvers._get_pages_of_type
[ "Override", "default", "implementation", "to", "ensure", "that", "we", "*", "always", "*", "include", "the", "publishing_is_draft", "field", "when", "only", "is", "invoked", "to", "avoid", "eternal", "recursion", "errors", "if", "only", "is", "called", "then", ...
python
train
bpsmith/tia
tia/analysis/model/pos.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pos.py#L95-L104
def subset(self, subtxns): """Construct a new Positions object from the new Txns object (which is assumed to be a subset) of current Txns object""" result = Positions(subtxns) if hasattr(self, '_frame'): result._frame = self._frame.ix[subtxns.pids] # passing in array results in index name being removed for some reason??? if result._frame.index.name != self._frame.index.name: result._frame.index.name = self._frame.index.name return result
[ "def", "subset", "(", "self", ",", "subtxns", ")", ":", "result", "=", "Positions", "(", "subtxns", ")", "if", "hasattr", "(", "self", ",", "'_frame'", ")", ":", "result", ".", "_frame", "=", "self", ".", "_frame", ".", "ix", "[", "subtxns", ".", "...
Construct a new Positions object from the new Txns object (which is assumed to be a subset) of current Txns object
[ "Construct", "a", "new", "Positions", "object", "from", "the", "new", "Txns", "object", "(", "which", "is", "assumed", "to", "be", "a", "subset", ")", "of", "current", "Txns", "object" ]
python
train
maweigert/gputools
gputools/denoise/bilateral3.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/denoise/bilateral3.py#L17-L44
def bilateral3(data, size_filter, sigma_p, sigma_x = 10.): """bilateral filter """ dtype = data.dtype.type dtypes_kernels = {np.float32:"bilat3_float",} if not dtype in dtypes_kernels: logger.info("data type %s not supported yet (%s), casting to float:"%(dtype,list(dtypes_kernels.keys()))) data = data.astype(np.float32) dtype = data.dtype.type img = OCLImage.from_array(data) res = OCLArray.empty_like(data) prog = OCLProgram(abspath("kernels/bilateral3.cl")) logger.debug("in bilateral3, image shape: {}".format(img.shape)) prog.run_kernel(dtypes_kernels[dtype], img.shape,None, img,res.data, np.int32(img.shape[0]),np.int32(img.shape[1]), np.int32(size_filter),np.float32(sigma_x),np.float32(sigma_p)) return res.get()
[ "def", "bilateral3", "(", "data", ",", "size_filter", ",", "sigma_p", ",", "sigma_x", "=", "10.", ")", ":", "dtype", "=", "data", ".", "dtype", ".", "type", "dtypes_kernels", "=", "{", "np", ".", "float32", ":", "\"bilat3_float\"", ",", "}", "if", "not...
bilateral filter
[ "bilateral", "filter" ]
python
train
hugapi/hug
hug/introspect.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/introspect.py#L43-L48
def arguments(function, extra_arguments=0): """Returns the name of all arguments a function takes""" if not hasattr(function, '__code__'): return () return function.__code__.co_varnames[:function.__code__.co_argcount + extra_arguments]
[ "def", "arguments", "(", "function", ",", "extra_arguments", "=", "0", ")", ":", "if", "not", "hasattr", "(", "function", ",", "'__code__'", ")", ":", "return", "(", ")", "return", "function", ".", "__code__", ".", "co_varnames", "[", ":", "function", "....
Returns the name of all arguments a function takes
[ "Returns", "the", "name", "of", "all", "arguments", "a", "function", "takes" ]
python
train
spotify/docker_interface
docker_interface/util.py
https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L221-L244
def set_default_from_schema(instance, schema): """ Populate default values on an `instance` given a `schema`. Parameters ---------- instance : dict instance to populate default values for schema : dict JSON schema with default values Returns ------- instance : dict instance with populated default values """ for name, property_ in schema.get('properties', {}).items(): # Set the defaults at this level of the schema if 'default' in property_: instance.setdefault(name, property_['default']) # Descend one level if the property is an object if 'properties' in property_: set_default_from_schema(instance.setdefault(name, {}), property_) return instance
[ "def", "set_default_from_schema", "(", "instance", ",", "schema", ")", ":", "for", "name", ",", "property_", "in", "schema", ".", "get", "(", "'properties'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "# Set the defaults at this level of the schema", "if...
Populate default values on an `instance` given a `schema`. Parameters ---------- instance : dict instance to populate default values for schema : dict JSON schema with default values Returns ------- instance : dict instance with populated default values
[ "Populate", "default", "values", "on", "an", "instance", "given", "a", "schema", "." ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L849-L859
def _supported_types_for_metadata(metadata): """Returns the types we have metadata for based on the PhoneMetadata object passed in, which must be non-None.""" numtypes = set() for numtype in PhoneNumberType.values(): if numtype in (PhoneNumberType.FIXED_LINE_OR_MOBILE, PhoneNumberType.UNKNOWN): # Never return FIXED_LINE_OR_MOBILE (it is a convenience type, and represents that a # particular number type can't be determined) or UNKNOWN (the non-type). continue if _desc_has_data(_number_desc_by_type(metadata, numtype)): numtypes.add(numtype) return numtypes
[ "def", "_supported_types_for_metadata", "(", "metadata", ")", ":", "numtypes", "=", "set", "(", ")", "for", "numtype", "in", "PhoneNumberType", ".", "values", "(", ")", ":", "if", "numtype", "in", "(", "PhoneNumberType", ".", "FIXED_LINE_OR_MOBILE", ",", "Phon...
Returns the types we have metadata for based on the PhoneMetadata object passed in, which must be non-None.
[ "Returns", "the", "types", "we", "have", "metadata", "for", "based", "on", "the", "PhoneMetadata", "object", "passed", "in", "which", "must", "be", "non", "-", "None", "." ]
python
train
nagius/snmp_passpersist
snmp_passpersist.py
https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L197-L204
def add_oid_entry(self, oid, type, value, label=None): """General function to add an oid entry to the MIB subtree.""" if self.debug: print('DEBUG: %s %s %s %s'%(oid,type,value,label)) item={'type': str(type), 'value': str(value)} if label is not None: item['label']=str(label) self.pending[oid]=item
[ "def", "add_oid_entry", "(", "self", ",", "oid", ",", "type", ",", "value", ",", "label", "=", "None", ")", ":", "if", "self", ".", "debug", ":", "print", "(", "'DEBUG: %s %s %s %s'", "%", "(", "oid", ",", "type", ",", "value", ",", "label", ")", "...
General function to add an oid entry to the MIB subtree.
[ "General", "function", "to", "add", "an", "oid", "entry", "to", "the", "MIB", "subtree", "." ]
python
train
decryptus/sonicprobe
sonicprobe/validator/country.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/validator/country.py#L277-L287
def validate(value): """ checks if given value is a valid country codes @param string value @return bool """ if not helpers.has_len(value): return False return COUNTRIES.has_key(str(value).lower())
[ "def", "validate", "(", "value", ")", ":", "if", "not", "helpers", ".", "has_len", "(", "value", ")", ":", "return", "False", "return", "COUNTRIES", ".", "has_key", "(", "str", "(", "value", ")", ".", "lower", "(", ")", ")" ]
checks if given value is a valid country codes @param string value @return bool
[ "checks", "if", "given", "value", "is", "a", "valid", "country", "codes" ]
python
train
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L517-L530
def get_var_shape(self, name): """ Return shape of the array. """ rank = self.get_var_rank(name) name = create_string_buffer(name) arraytype = ndpointer(dtype='int32', ndim=1, shape=(MAXDIMS, ), flags='F') shape = np.empty((MAXDIMS, ), dtype='int32', order='F') self.library.get_var_shape.argtypes = [c_char_p, arraytype] self.library.get_var_shape(name, shape) return tuple(shape[:rank])
[ "def", "get_var_shape", "(", "self", ",", "name", ")", ":", "rank", "=", "self", ".", "get_var_rank", "(", "name", ")", "name", "=", "create_string_buffer", "(", "name", ")", "arraytype", "=", "ndpointer", "(", "dtype", "=", "'int32'", ",", "ndim", "=", ...
Return shape of the array.
[ "Return", "shape", "of", "the", "array", "." ]
python
train
kapot65/python-df-parser
dfparser/rsh_parser.py
https://github.com/kapot65/python-df-parser/blob/bb3eec0fb7ca85d72cb1d9ed7415efe074594f26/dfparser/rsh_parser.py#L275-L328
def dump_to_rsb(params: dict, times: np.ndarray, data: np.ndarray) -> bytes: """Сохранение данных в формате rsb. @params -- параметры набора @times -- абсолютные времена блоков в наносекундах @data -- данные блоков (block_num, block_size) @return -- сериализованные данные """ assert isinstance(times, np.ndarray) assert times.ndim == 1 assert isinstance(data, np.ndarray) assert data.ndim == 2 assert len(data) == len(times) params['b_size'] = data.shape[1] params['events_num'] = data.shape[0] start = int(times.min() * 1e-9) end = int(times.max() * 1e-9) if 'start_time' not in params: params['start_time'] = datetime.fromtimestamp(start).isoformat() if 'end_time' not in params: params['end_time'] = datetime.fromtimestamp(end).isoformat() text_header = bytearray(5120) text = serialise_to_rsh(params).encode('cp1251') text_header[:len(text)] = text params['text_header_size'] = len(text) binary_header = serialize_to_rsb(params) bin_data = b'' ch_num = params['channel_number'] ev_size = params['b_size'] for i, event_data in enumerate(data): event = bytearray( np.zeros(96 + 2 * ch_num * ev_size, np.byte).tostring()) text_hdr = datetime.fromtimestamp(int(times[i] * 10e-9)).isoformat() event[:len(text_hdr)] = text_hdr.encode('cp1251') event[64:68] = struct.pack('I', i) event[72:80] = struct.pack('Q', int(times[i] * 10e-9)) event[80:88] = struct.pack('Q', int(times[i])) event[96:] = event_data.astype(np.int16).tostring() bin_data += event return bytes(text_header + binary_header + bin_data)
[ "def", "dump_to_rsb", "(", "params", ":", "dict", ",", "times", ":", "np", ".", "ndarray", ",", "data", ":", "np", ".", "ndarray", ")", "->", "bytes", ":", "assert", "isinstance", "(", "times", ",", "np", ".", "ndarray", ")", "assert", "times", ".", ...
Сохранение данных в формате rsb. @params -- параметры набора @times -- абсолютные времена блоков в наносекундах @data -- данные блоков (block_num, block_size) @return -- сериализованные данные
[ "Сохранение", "данных", "в", "формате", "rsb", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/fc_sans/managed_sans.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/fc_sans/managed_sans.py#L125-L138
def create_endpoints_csv_file(self, timeout=-1): """ Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response. """ uri = "{}/endpoints/".format(self.data["uri"]) return self._helper.do_post(uri, {}, timeout, None)
[ "def", "create_endpoints_csv_file", "(", "self", ",", "timeout", "=", "-", "1", ")", ":", "uri", "=", "\"{}/endpoints/\"", ".", "format", "(", "self", ".", "data", "[", "\"uri\"", "]", ")", "return", "self", ".", "_helper", ".", "do_post", "(", "uri", ...
Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response.
[ "Creates", "an", "endpoints", "CSV", "file", "for", "a", "SAN", "." ]
python
train
langloisjp/pysvclog
servicelog.py
https://github.com/langloisjp/pysvclog/blob/ab429bb12e13dca63ffce082e633d8879b6e3854/servicelog.py#L66-L74
def send(self, jsonstr): """ Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}') """ udp_sock = socket(AF_INET, SOCK_DGRAM) udp_sock.sendto(jsonstr.encode('utf-8'), self.addr)
[ "def", "send", "(", "self", ",", "jsonstr", ")", ":", "udp_sock", "=", "socket", "(", "AF_INET", ",", "SOCK_DGRAM", ")", "udp_sock", ".", "sendto", "(", "jsonstr", ".", "encode", "(", "'utf-8'", ")", ",", "self", ".", "addr", ")" ]
Send jsonstr to the UDP collector >>> logger = UDPLogger() >>> logger.send('{"key": "value"}')
[ "Send", "jsonstr", "to", "the", "UDP", "collector" ]
python
train
mseclab/PyJFuzz
gramfuzz/gramfuzz/rand.py
https://github.com/mseclab/PyJFuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/gramfuzz/gramfuzz/rand.py#L50-L70
def randfloat(a, b=None): """Return a random float :param float a: Either the minimum value (inclusive) if ``b`` is set, or the maximum value if ``b`` is not set (non-inclusive, in which case the minimum is implicitly 0.0) :param float b: The maximum value to generate (non-inclusive) :returns: float """ if b is None: max_ = a min_ = 0.0 else: min_ = a max_ = b diff = max_ - min_ res = _random() res *= diff res += min_ return res
[ "def", "randfloat", "(", "a", ",", "b", "=", "None", ")", ":", "if", "b", "is", "None", ":", "max_", "=", "a", "min_", "=", "0.0", "else", ":", "min_", "=", "a", "max_", "=", "b", "diff", "=", "max_", "-", "min_", "res", "=", "_random", "(", ...
Return a random float :param float a: Either the minimum value (inclusive) if ``b`` is set, or the maximum value if ``b`` is not set (non-inclusive, in which case the minimum is implicitly 0.0) :param float b: The maximum value to generate (non-inclusive) :returns: float
[ "Return", "a", "random", "float" ]
python
test
cackharot/suds-py3
suds/xsd/sxbasic.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/sxbasic.py#L501-L513
def bind(cls, ns, location=None): """ Bind a namespace to a schema location (URI). This is used for imports that don't specify a schemaLocation. @param ns: A namespace-uri. @type ns: str @param location: The (optional) schema location for the namespace. (default=ns). @type location: str """ if location is None: location = ns cls.locations[ns] = location
[ "def", "bind", "(", "cls", ",", "ns", ",", "location", "=", "None", ")", ":", "if", "location", "is", "None", ":", "location", "=", "ns", "cls", ".", "locations", "[", "ns", "]", "=", "location" ]
Bind a namespace to a schema location (URI). This is used for imports that don't specify a schemaLocation. @param ns: A namespace-uri. @type ns: str @param location: The (optional) schema location for the namespace. (default=ns). @type location: str
[ "Bind", "a", "namespace", "to", "a", "schema", "location", "(", "URI", ")", ".", "This", "is", "used", "for", "imports", "that", "don", "t", "specify", "a", "schemaLocation", "." ]
python
train
SeabornGames/RequestClient
seaborn/request_client/repr_wrapper.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/repr_wrapper.py#L356-L361
def repr_setup(self, name=None, col_names=None, col_types=None): """ This wasn't safe to pass into init because of the inheritance """ self._name = name or self._name self._col_types = col_types or self._col_types
[ "def", "repr_setup", "(", "self", ",", "name", "=", "None", ",", "col_names", "=", "None", ",", "col_types", "=", "None", ")", ":", "self", ".", "_name", "=", "name", "or", "self", ".", "_name", "self", ".", "_col_types", "=", "col_types", "or", "sel...
This wasn't safe to pass into init because of the inheritance
[ "This", "wasn", "t", "safe", "to", "pass", "into", "init", "because", "of", "the", "inheritance" ]
python
train
tjcsl/cslbot
cslbot/commands/cve.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/cve.py#L29-L56
def cmd(send, msg, args): """Gets info on a CVE id from MITRE's CVE database Syntax: {command} <cveid> """ elements = msg.split('-') if len(elements) > 3 or len(elements) < 2: send("Invalid CVE format") return # If there are three fields, ignore the first (we don't actually need to send CVE- if len(elements) == 3: if elements[0].upper() != 'CVE': send("Invalid CVE format") return elements.pop(0) # The first digit field should be exactly four digits long, the second is 4+ if not re.search(r"^[\d]{4}$", elements[0]) or not re.search(r"^[\d]{4,}$", elements[1]): send("Invalid CVE format") return search = "%s-%s" % (elements[0], elements[1]) url = 'http://cve.mitre.org/cgi-bin/cvename.cgi?name=%s' % search html = fromstring(get(url).text) title = html.find(".//title").text.splitlines()[2] if title.startswith('ERROR'): output = 'Invalid CVE Number' else: key = args['config']['api']['bitlykey'] output = "%s -- %s" % (title, get_short(url, key)) send(output)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "elements", "=", "msg", ".", "split", "(", "'-'", ")", "if", "len", "(", "elements", ")", ">", "3", "or", "len", "(", "elements", ")", "<", "2", ":", "send", "(", "\"Invalid CVE format...
Gets info on a CVE id from MITRE's CVE database Syntax: {command} <cveid>
[ "Gets", "info", "on", "a", "CVE", "id", "from", "MITRE", "s", "CVE", "database", "Syntax", ":", "{", "command", "}", "<cveid", ">" ]
python
train
etobella/python-xmlsig
src/xmlsig/algorithms/base.py
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/algorithms/base.py#L31-L52
def get_public_key(key_info, ctx): """ Get the public key if its defined in X509Certificate node. Otherwise, take self.public_key element :param sign: Signature node :type sign: lxml.etree.Element :return: Public key to use """ x509_certificate = key_info.find( 'ds:KeyInfo/ds:X509Data/ds:X509Certificate', namespaces={'ds': ns.DSigNs} ) if x509_certificate is not None: return load_der_x509_certificate( base64.b64decode(x509_certificate.text), default_backend() ).public_key() if ctx.public_key is not None: return ctx.public_key if isinstance(ctx.private_key, (str, bytes)): return ctx.private_key return ctx.private_key.public_key()
[ "def", "get_public_key", "(", "key_info", ",", "ctx", ")", ":", "x509_certificate", "=", "key_info", ".", "find", "(", "'ds:KeyInfo/ds:X509Data/ds:X509Certificate'", ",", "namespaces", "=", "{", "'ds'", ":", "ns", ".", "DSigNs", "}", ")", "if", "x509_certificate...
Get the public key if its defined in X509Certificate node. Otherwise, take self.public_key element :param sign: Signature node :type sign: lxml.etree.Element :return: Public key to use
[ "Get", "the", "public", "key", "if", "its", "defined", "in", "X509Certificate", "node", ".", "Otherwise", "take", "self", ".", "public_key", "element", ":", "param", "sign", ":", "Signature", "node", ":", "type", "sign", ":", "lxml", ".", "etree", ".", "...
python
train
prawn-cake/vk-requests
vk_requests/streaming.py
https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L132-L138
def remove_rule(self, tag): """Remove a rule by tag """ resp = requests.delete(url=self.REQUEST_URL.format(**self._params), json={'tag': tag}) return resp.json()
[ "def", "remove_rule", "(", "self", ",", "tag", ")", ":", "resp", "=", "requests", ".", "delete", "(", "url", "=", "self", ".", "REQUEST_URL", ".", "format", "(", "*", "*", "self", ".", "_params", ")", ",", "json", "=", "{", "'tag'", ":", "tag", "...
Remove a rule by tag
[ "Remove", "a", "rule", "by", "tag" ]
python
train
CalebBell/thermo
thermo/vapor_pressure.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/vapor_pressure.py#L140-L183
def TRC_Antoine_extended(T, Tc, to, A, B, C, n, E, F): r'''Calculates vapor pressure of a chemical using the TRC Extended Antoine equation. Parameters are chemical dependent, and said to be from the Thermodynamics Research Center (TRC) at Texas A&M. Coefficients for various chemicals can be found in [1]_. .. math:: \log_{10} P^{sat} = A - \frac{B}{T + C} + 0.43429x^n + Ex^8 + Fx^{12} x = \max \left(\frac{T-t_o-273.15}{T_c}, 0 \right) Parameters ---------- T : float Temperature of fluid, [K] A, B, C, n, E, F : floats Regressed coefficients for the Antoine Extended (TRC) equation, specific for each chemical, [-] Returns ------- Psat : float Vapor pressure calculated with coefficients [Pa] Notes ----- Assumes coefficients are for calculating vapor pressure in Pascal. Coefficients should be consistent with input temperatures in Kelvin; Examples -------- Tetrafluoromethane, coefficients from [1]_, at 180 K: >>> TRC_Antoine_extended(180.0, 227.51, -120., 8.95894, 510.595, -15.95, ... 2.41377, -93.74, 7425.9) 706317.0898414153 References ---------- .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000. ''' x = max((T - to - 273.15)/Tc, 0.) return 10.**(A - B/(T+C) + 0.43429*x**n + E*x**8 + F*x**12)
[ "def", "TRC_Antoine_extended", "(", "T", ",", "Tc", ",", "to", ",", "A", ",", "B", ",", "C", ",", "n", ",", "E", ",", "F", ")", ":", "x", "=", "max", "(", "(", "T", "-", "to", "-", "273.15", ")", "/", "Tc", ",", "0.", ")", "return", "10."...
r'''Calculates vapor pressure of a chemical using the TRC Extended Antoine equation. Parameters are chemical dependent, and said to be from the Thermodynamics Research Center (TRC) at Texas A&M. Coefficients for various chemicals can be found in [1]_. .. math:: \log_{10} P^{sat} = A - \frac{B}{T + C} + 0.43429x^n + Ex^8 + Fx^{12} x = \max \left(\frac{T-t_o-273.15}{T_c}, 0 \right) Parameters ---------- T : float Temperature of fluid, [K] A, B, C, n, E, F : floats Regressed coefficients for the Antoine Extended (TRC) equation, specific for each chemical, [-] Returns ------- Psat : float Vapor pressure calculated with coefficients [Pa] Notes ----- Assumes coefficients are for calculating vapor pressure in Pascal. Coefficients should be consistent with input temperatures in Kelvin; Examples -------- Tetrafluoromethane, coefficients from [1]_, at 180 K: >>> TRC_Antoine_extended(180.0, 227.51, -120., 8.95894, 510.595, -15.95, ... 2.41377, -93.74, 7425.9) 706317.0898414153 References ---------- .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000.
[ "r", "Calculates", "vapor", "pressure", "of", "a", "chemical", "using", "the", "TRC", "Extended", "Antoine", "equation", ".", "Parameters", "are", "chemical", "dependent", "and", "said", "to", "be", "from", "the", "Thermodynamics", "Research", "Center", "(", "...
python
valid
DarkEnergySurvey/ugali
ugali/analysis/mcmc.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/mcmc.py#L160-L170
def lnprior(self,theta): """ Logarithm of the prior """ params,priors = self.params,self.priors kwargs = dict(list(zip(params,theta))) err = np.seterr(invalid='raise') try: lnprior = np.sum(np.log([priors[k](v) for k,v in list(kwargs.items())])) except (FloatingPointError,ValueError): lnprior = -np.inf np.seterr(**err) return lnprior
[ "def", "lnprior", "(", "self", ",", "theta", ")", ":", "params", ",", "priors", "=", "self", ".", "params", ",", "self", ".", "priors", "kwargs", "=", "dict", "(", "list", "(", "zip", "(", "params", ",", "theta", ")", ")", ")", "err", "=", "np", ...
Logarithm of the prior
[ "Logarithm", "of", "the", "prior" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/storage_utils.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/storage_utils.py#L171-L191
def find_endurance_tier_iops_per_gb(volume): """Find the tier for the given endurance volume (IOPS per GB) :param volume: The volume for which the tier level is desired :return: Returns a float value indicating the IOPS per GB for the volume """ tier = volume['storageTierLevel'] iops_per_gb = 0.25 if tier == "LOW_INTENSITY_TIER": iops_per_gb = 0.25 elif tier == "READHEAVY_TIER": iops_per_gb = 2 elif tier == "WRITEHEAVY_TIER": iops_per_gb = 4 elif tier == "10_IOPS_PER_GB": iops_per_gb = 10 else: raise ValueError("Could not find tier IOPS per GB for this volume") return iops_per_gb
[ "def", "find_endurance_tier_iops_per_gb", "(", "volume", ")", ":", "tier", "=", "volume", "[", "'storageTierLevel'", "]", "iops_per_gb", "=", "0.25", "if", "tier", "==", "\"LOW_INTENSITY_TIER\"", ":", "iops_per_gb", "=", "0.25", "elif", "tier", "==", "\"READHEAVY_...
Find the tier for the given endurance volume (IOPS per GB) :param volume: The volume for which the tier level is desired :return: Returns a float value indicating the IOPS per GB for the volume
[ "Find", "the", "tier", "for", "the", "given", "endurance", "volume", "(", "IOPS", "per", "GB", ")" ]
python
train
waleedka/hiddenlayer
hiddenlayer/graph.py
https://github.com/waleedka/hiddenlayer/blob/294f8732b271cbdd6310c55bdf5ce855cbf61c75/hiddenlayer/graph.py#L311-L355
def build_dot(self): """Generate a GraphViz Dot graph. Returns a GraphViz Digraph object. """ from graphviz import Digraph # Build GraphViz Digraph dot = Digraph() dot.attr("graph", bgcolor=self.theme["background_color"], color=self.theme["outline_color"], fontsize=self.theme["font_size"], fontcolor=self.theme["font_color"], fontname=self.theme["font_name"], margin=self.theme["margin"], rankdir="LR", pad=self.theme["padding"]) dot.attr("node", shape="box", style="filled", margin="0,0", fillcolor=self.theme["fill_color"], color=self.theme["outline_color"], fontsize=self.theme["font_size"], fontcolor=self.theme["font_color"], fontname=self.theme["font_name"]) dot.attr("edge", style="solid", color=self.theme["outline_color"], fontsize=self.theme["font_size"], fontcolor=self.theme["font_color"], fontname=self.theme["font_name"]) for k, n in self.nodes.items(): label = "<tr><td cellpadding='6'>{}</td></tr>".format(n.title) if n.caption: label += "<tr><td>{}</td></tr>".format(n.caption) if n.repeat > 1: label += "<tr><td align='right' cellpadding='2'>x{}</td></tr>".format(n.repeat) label = "<<table border='0' cellborder='0' cellpadding='0'>" + label + "</table>>" dot.node(str(k), label) for a, b, label in self.edges: if isinstance(label, (list, tuple)): label = "x".join([str(l or "?") for l in label]) dot.edge(str(a), str(b), label) return dot
[ "def", "build_dot", "(", "self", ")", ":", "from", "graphviz", "import", "Digraph", "# Build GraphViz Digraph", "dot", "=", "Digraph", "(", ")", "dot", ".", "attr", "(", "\"graph\"", ",", "bgcolor", "=", "self", ".", "theme", "[", "\"background_color\"", "]"...
Generate a GraphViz Dot graph. Returns a GraphViz Digraph object.
[ "Generate", "a", "GraphViz", "Dot", "graph", "." ]
python
train
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L1369-L1376
def get_value(self, column=0, row=0): """ Returns a the value at column, row. """ x = self._widget.item(row, column) if x==None: return x else: return str(self._widget.item(row,column).text())
[ "def", "get_value", "(", "self", ",", "column", "=", "0", ",", "row", "=", "0", ")", ":", "x", "=", "self", ".", "_widget", ".", "item", "(", "row", ",", "column", ")", "if", "x", "==", "None", ":", "return", "x", "else", ":", "return", "str", ...
Returns a the value at column, row.
[ "Returns", "a", "the", "value", "at", "column", "row", "." ]
python
train
internetarchive/warc
warc/arc.py
https://github.com/internetarchive/warc/blob/8f05a000a23bbd6501217e37cfd862ffdf19da7f/warc/arc.py#L264-L295
def _write_header(self): "Writes out an ARC header" if "org" not in self.file_headers: warnings.warn("Using 'unknown' for Archiving organisation name") self.file_headers['org'] = "Unknown" if "date" not in self.file_headers: now = datetime.datetime.utcnow() warnings.warn("Using '%s' for Archiving time"%now) self.file_headers['date'] = now if "ip_address" not in self.file_headers: warnings.warn("Using '127.0.0.1' as IP address of machine that's archiving") self.file_headers['ip_address'] = "127.0.0.1" if self.version == 1: payload = "1 0 %(org)s\nURL IP-address Archive-date Content-type Archive-length"%dict(org = self.file_headers['org']) elif self.version == 2: payload = "2 0 %(org)s\nURL IP-address Archive-date Content-type Result-code Checksum Location Offset Filename Archive-length" else: raise IOError("Can't write an ARC file with version '\"%s\"'"%self.version) fname = os.path.basename(self.fileobj.name) header = ARCHeader(url = "filedesc://%s"%fname, ip_address = self.file_headers['ip_address'], date = self.file_headers['date'], content_type = "text/plain", length = len(payload), result_code = "200", checksum = "-", location = "-", offset = str(self.fileobj.tell()), filename = fname) arc_file_header_record = ARCRecord(header, payload%self.file_headers) self.write(arc_file_header_record)
[ "def", "_write_header", "(", "self", ")", ":", "if", "\"org\"", "not", "in", "self", ".", "file_headers", ":", "warnings", ".", "warn", "(", "\"Using 'unknown' for Archiving organisation name\"", ")", "self", ".", "file_headers", "[", "'org'", "]", "=", "\"Unkno...
Writes out an ARC header
[ "Writes", "out", "an", "ARC", "header" ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/walkers.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/walkers.py#L137-L198
def walk( self, node, omit=( 'lexpos', 'lineno', 'colno', 'rowno'), indent=0, depth=-1, pos=False, _level=0): """ Accepts the standard node argument, along with an optional omit flag - it should be an iterable that lists out all attributes that should be omitted from the repr output. """ if not depth: return '<%s ...>' % node.__class__.__name__ attrs = [] children = node.children() ids = {id(child) for child in children} indentation = ' ' * (indent * (_level + 1)) header = '\n' + indentation if indent else '' joiner = ',\n' + indentation if indent else ', ' tailer = '\n' + ' ' * (indent * _level) if indent else '' for k, v in vars(node).items(): if k.startswith('_'): continue if id(v) in ids: ids.remove(id(v)) if isinstance(v, Node): attrs.append((k, self.walk( v, omit, indent, depth - 1, pos, _level))) elif isinstance(v, list): items = [] for i in v: if id(i) in ids: ids.remove(id(i)) items.append(self.walk( i, omit, indent, depth - 1, pos, _level + 1)) attrs.append( (k, '[' + header + joiner.join(items) + tailer + ']')) else: attrs.append((k, repr_compat(v))) if ids: # for unnamed child nodes. attrs.append(('?children', '[' + header + joiner.join( self.walk(child, omit, indent, depth - 1, pos, _level + 1) for child in children if id(child) in ids) + tailer + ']')) position = ('@%s:%s ' % ( '?' if node.lineno is None else node.lineno, '?' if node.colno is None else node.colno, ) if pos else '') omit_keys = () if not omit else set(omit) return '<%s %s%s>' % (node.__class__.__name__, position, ', '.join( '%s=%s' % (k, v) for k, v in sorted(attrs) if k not in omit_keys ))
[ "def", "walk", "(", "self", ",", "node", ",", "omit", "=", "(", "'lexpos'", ",", "'lineno'", ",", "'colno'", ",", "'rowno'", ")", ",", "indent", "=", "0", ",", "depth", "=", "-", "1", ",", "pos", "=", "False", ",", "_level", "=", "0", ")", ":",...
Accepts the standard node argument, along with an optional omit flag - it should be an iterable that lists out all attributes that should be omitted from the repr output.
[ "Accepts", "the", "standard", "node", "argument", "along", "with", "an", "optional", "omit", "flag", "-", "it", "should", "be", "an", "iterable", "that", "lists", "out", "all", "attributes", "that", "should", "be", "omitted", "from", "the", "repr", "output",...
python
train
awslabs/aws-sam-cli
samcli/local/docker/lambda_image.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/lambda_image.py#L118-L142
def _generate_docker_image_version(layers, runtime): """ Generate the Docker TAG that will be used to create the image Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of the layers runtime str Runtime of the image to create Returns ------- str String representing the TAG to be attached to the image """ # Docker has a concept of a TAG on an image. This is plus the REPOSITORY is a way to determine # a version of the image. We will produced a TAG for a combination of the runtime with the layers # specified in the template. This will allow reuse of the runtime and layers across different # functions that are defined. If two functions use the same runtime with the same layers (in the # same order), SAM CLI will only produce one image and use this image across both functions for invoke. return runtime + '-' + hashlib.sha256( "-".join([layer.name for layer in layers]).encode('utf-8')).hexdigest()[0:25]
[ "def", "_generate_docker_image_version", "(", "layers", ",", "runtime", ")", ":", "# Docker has a concept of a TAG on an image. This is plus the REPOSITORY is a way to determine", "# a version of the image. We will produced a TAG for a combination of the runtime with the layers", "# specified in...
Generate the Docker TAG that will be used to create the image Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of the layers runtime str Runtime of the image to create Returns ------- str String representing the TAG to be attached to the image
[ "Generate", "the", "Docker", "TAG", "that", "will", "be", "used", "to", "create", "the", "image" ]
python
train
INM-6/hybridLFPy
examples/example_microcircuit.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit.py#L86-L159
def merge_gdf(model_params, raw_label='spikes_', file_type='gdf', fileprefix='spikes'): ''' NEST produces one file per virtual process containing recorder output. This function gathers and combines them into one single file per network population. Parameters ---------- model_params : object network parameters object Returns ------- None ''' def get_raw_gids(model_params): ''' Reads text file containing gids of neuron populations as created within the NEST simulation. These gids are not continuous as in the simulation devices get created in between. Parameters ---------- model_params : object network parameters object Returns ------- gids : list list of neuron ids and value (spike time, voltage etc.) ''' gidfile = open(os.path.join(model_params.raw_nest_output_path, model_params.GID_filename),'r') gids = [] for l in gidfile : a = l.split() gids.append([int(a[0]),int(a[1])]) return gids #some preprocessing raw_gids = get_raw_gids(model_params) pop_sizes = [raw_gids[i][1]-raw_gids[i][0]+1 for i in np.arange(model_params.Npops)] raw_first_gids = [raw_gids[i][0] for i in np.arange(model_params.Npops)] converted_first_gids = [int(1 + np.sum(pop_sizes[:i])) for i in np.arange(model_params.Npops)] for pop_idx in np.arange(model_params.Npops): if pop_idx % SIZE == RANK: files = glob(os.path.join(model_params.raw_nest_output_path, raw_label + str(pop_idx) + '*.' + file_type)) gdf = [] # init for f in files: new_gdf = helpers.read_gdf(f) for line in new_gdf: line[0] = line[0] - raw_first_gids[pop_idx] + \ converted_first_gids[pop_idx] gdf.append(line) print 'writing: %s' % os.path.join(model_params.spike_output_path, fileprefix + '_%s.gdf' % model_params.X[pop_idx]) helpers.write_gdf(gdf, os.path.join(model_params.spike_output_path, fileprefix + '_%s.gdf' % model_params.X[pop_idx])) COMM.Barrier() return
[ "def", "merge_gdf", "(", "model_params", ",", "raw_label", "=", "'spikes_'", ",", "file_type", "=", "'gdf'", ",", "fileprefix", "=", "'spikes'", ")", ":", "def", "get_raw_gids", "(", "model_params", ")", ":", "'''\n Reads text file containing gids of neuron pop...
NEST produces one file per virtual process containing recorder output. This function gathers and combines them into one single file per network population. Parameters ---------- model_params : object network parameters object Returns ------- None
[ "NEST", "produces", "one", "file", "per", "virtual", "process", "containing", "recorder", "output", ".", "This", "function", "gathers", "and", "combines", "them", "into", "one", "single", "file", "per", "network", "population", ".", "Parameters", "----------", "...
python
train
ifduyue/urlfetch
urlfetch.py
https://github.com/ifduyue/urlfetch/blob/e0ea4673367c157eb832ba4ba2635306c81a61be/urlfetch.py#L957-L976
def choose_boundary(): """Generate a multipart boundry. :returns: A boundary string """ global BOUNDARY_PREFIX if BOUNDARY_PREFIX is None: BOUNDARY_PREFIX = "urlfetch" try: uid = repr(os.getuid()) BOUNDARY_PREFIX += "." + uid except AttributeError: pass try: pid = repr(os.getpid()) BOUNDARY_PREFIX += "." + pid except AttributeError: pass return "%s.%s" % (BOUNDARY_PREFIX, uuid.uuid4().hex)
[ "def", "choose_boundary", "(", ")", ":", "global", "BOUNDARY_PREFIX", "if", "BOUNDARY_PREFIX", "is", "None", ":", "BOUNDARY_PREFIX", "=", "\"urlfetch\"", "try", ":", "uid", "=", "repr", "(", "os", ".", "getuid", "(", ")", ")", "BOUNDARY_PREFIX", "+=", "\".\"...
Generate a multipart boundry. :returns: A boundary string
[ "Generate", "a", "multipart", "boundry", "." ]
python
train
amadeus4dev/amadeus-python
amadeus/mixins/http.py
https://github.com/amadeus4dev/amadeus-python/blob/afb93667d2cd486ddc7f4a7f29f222f04453a44a/amadeus/mixins/http.py#L68-L93
def request(self, verb, path, **params): ''' A helper function for making generic POST requests calls. It is used by every namespaced API method. It can be used to make any generic API call that is automatically authenticated using your API credentials: .. code-block:: python amadeus.request('GET', '/foo/bar', airline='1X') :param verb: the HTTP verb to use :paramtype verb: str :param path: path the full path for the API call :paramtype path: str :param params: (optional) params to pass to the API :paramtype params: dict :rtype: amadeus.Response :raises amadeus.ResponseError: when the request fails ''' return self._unauthenticated_request( verb, path, params, self.__access_token()._bearer_token() )
[ "def", "request", "(", "self", ",", "verb", ",", "path", ",", "*", "*", "params", ")", ":", "return", "self", ".", "_unauthenticated_request", "(", "verb", ",", "path", ",", "params", ",", "self", ".", "__access_token", "(", ")", ".", "_bearer_token", ...
A helper function for making generic POST requests calls. It is used by every namespaced API method. It can be used to make any generic API call that is automatically authenticated using your API credentials: .. code-block:: python amadeus.request('GET', '/foo/bar', airline='1X') :param verb: the HTTP verb to use :paramtype verb: str :param path: path the full path for the API call :paramtype path: str :param params: (optional) params to pass to the API :paramtype params: dict :rtype: amadeus.Response :raises amadeus.ResponseError: when the request fails
[ "A", "helper", "function", "for", "making", "generic", "POST", "requests", "calls", ".", "It", "is", "used", "by", "every", "namespaced", "API", "method", ".", "It", "can", "be", "used", "to", "make", "any", "generic", "API", "call", "that", "is", "autom...
python
train
razor-x/dichalcogenides
dichalcogenides/dichalcogenide/energy.py
https://github.com/razor-x/dichalcogenides/blob/0fa1995a3a328b679c9926f73239d0ecdc6e5d3d/dichalcogenides/dichalcogenide/energy.py#L43-L68
def k(self, e, n, τ, σ): """Inverse energy-momentum relation. :param e: Energy :math:`E`. :type e: float :param n: Band index :math:`n = ±1`. :type n: int :param τ: Valley index :math:`τ = ±1`. :type τ: int :param σ: Spin index :math:`σ = ±1`. :type σ: int :return: :math:`k(E)` :rtype: float """ d = self._dichalcogenide at, Δ, λ = d.at, d.Δ, d.λ sqrt = numpy.sqrt x = 2 * e * Δ**-1 y = (λ * Δ**-1) * (1 - x) return (at**-1) * (Δ / 2) * sqrt(x**2 + 2 * τ * σ * y - 1)
[ "def", "k", "(", "self", ",", "e", ",", "n", ",", "τ,", " ", "):", "", "", "d", "=", "self", ".", "_dichalcogenide", "at", ",", "Δ,", " ", " =", "d", "a", "t", ", ", "d", "Δ", ",", " d", ".", "", "", "", "sqrt", "=", "numpy", ".", "sqrt...
Inverse energy-momentum relation. :param e: Energy :math:`E`. :type e: float :param n: Band index :math:`n = ±1`. :type n: int :param τ: Valley index :math:`τ = ±1`. :type τ: int :param σ: Spin index :math:`σ = ±1`. :type σ: int :return: :math:`k(E)` :rtype: float
[ "Inverse", "energy", "-", "momentum", "relation", "." ]
python
train
peopledoc/django-agnocomplete
agnocomplete/core.py
https://github.com/peopledoc/django-agnocomplete/blob/9bf21db2f2036ba5059b843acd32902a09192053/agnocomplete/core.py#L416-L429
def item(self, current_item): """ Return the current item. @param current_item: Current item @type param: django.models @return: Value and label of the current item @rtype : dict """ return { 'value': text(getattr(current_item, self.get_field_name())), 'label': self.label(current_item) }
[ "def", "item", "(", "self", ",", "current_item", ")", ":", "return", "{", "'value'", ":", "text", "(", "getattr", "(", "current_item", ",", "self", ".", "get_field_name", "(", ")", ")", ")", ",", "'label'", ":", "self", ".", "label", "(", "current_item...
Return the current item. @param current_item: Current item @type param: django.models @return: Value and label of the current item @rtype : dict
[ "Return", "the", "current", "item", "." ]
python
train
raiden-network/raiden
raiden/network/rpc/client.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/rpc/client.py#L830-L877
def poll( self, transaction_hash: bytes, ): """ Wait until the `transaction_hash` is applied or rejected. Args: transaction_hash: Transaction hash that we are waiting for. """ if len(transaction_hash) != 32: raise ValueError( 'transaction_hash must be a 32 byte hash', ) transaction_hash = encode_hex(transaction_hash) # used to check if the transaction was removed, this could happen # if gas price is too low: # # > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18 # > Shannon). All sequential txs from this address(7d0eae79) # > will be ignored # last_result = None while True: # Could return None for a short period of time, until the # transaction is added to the pool transaction = self.web3.eth.getTransaction(transaction_hash) # if the transaction was added to the pool and then removed if transaction is None and last_result is not None: raise Exception('invalid transaction, check gas price') # the transaction was added to the pool and mined if transaction and transaction['blockNumber'] is not None: last_result = transaction # this will wait for both APPLIED and REVERTED transactions transaction_block = transaction['blockNumber'] confirmation_block = transaction_block + self.default_block_num_confirmations block_number = self.block_number() if block_number >= confirmation_block: return transaction gevent.sleep(1.0)
[ "def", "poll", "(", "self", ",", "transaction_hash", ":", "bytes", ",", ")", ":", "if", "len", "(", "transaction_hash", ")", "!=", "32", ":", "raise", "ValueError", "(", "'transaction_hash must be a 32 byte hash'", ",", ")", "transaction_hash", "=", "encode_hex"...
Wait until the `transaction_hash` is applied or rejected. Args: transaction_hash: Transaction hash that we are waiting for.
[ "Wait", "until", "the", "transaction_hash", "is", "applied", "or", "rejected", "." ]
python
train
bwhite/hadoopy
hadoopy/_freeze.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_freeze.py#L191-L228
def freeze_to_tar(script_path, freeze_fn, extra_files=None): """Freezes a script to a .tar or .tar.gz file The script contains all of the files at the root of the tar Args: script_path: Path to python script to be frozen. freeze_fn: Tar filename (must end in .tar or .tar.gz) extra_files: List of paths to add to the tar (default is None) Returns: List of freeze commands ran Raises: subprocess.CalledProcessError: freeze error. OSError: freeze not found. NameError: Tar must end in .tar or .tar.gz """ if not extra_files: extra_files = [] freeze_dir = tempfile.mkdtemp() try: cmds = freeze(script_path, target_dir=freeze_dir) if freeze_fn.endswith('.tar.gz'): mode = 'w|gz' elif freeze_fn.endswith('.tar'): mode = 'w' else: raise NameError('[%s] must end in .tar or .tar.gz' % freeze_fn) fp = tarfile.open(freeze_fn, mode) proj_name = os.path.basename(script_path) proj_name = proj_name[:proj_name.rfind('.')] # Remove extension for x in glob.glob('%s/dist/%s/*' % (freeze_dir, proj_name)) + extra_files: fp.add(x, arcname=os.path.basename(x)) fp.close() finally: shutil.rmtree(freeze_dir) return cmds
[ "def", "freeze_to_tar", "(", "script_path", ",", "freeze_fn", ",", "extra_files", "=", "None", ")", ":", "if", "not", "extra_files", ":", "extra_files", "=", "[", "]", "freeze_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "cmds", "=", "fre...
Freezes a script to a .tar or .tar.gz file The script contains all of the files at the root of the tar Args: script_path: Path to python script to be frozen. freeze_fn: Tar filename (must end in .tar or .tar.gz) extra_files: List of paths to add to the tar (default is None) Returns: List of freeze commands ran Raises: subprocess.CalledProcessError: freeze error. OSError: freeze not found. NameError: Tar must end in .tar or .tar.gz
[ "Freezes", "a", "script", "to", "a", ".", "tar", "or", ".", "tar", ".", "gz", "file" ]
python
train
jedie/DragonPy
dragonpy/Dragon32/dragon_charmap.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/Dragon32/dragon_charmap.py#L218-L253
def create_wiki_page(): """ for http://archive.worldofdragon.org/index.php?title=CharMap """ print ( '{| class="wikitable"' ' style="font-family: monospace;' ' background-color:#ffffcc;"' ' cellpadding="10"' ) print("|-") print("! POKE") print("value") print("! ") print("! unicode") print("codepoint") print("! type") print("|-") for no, data in enumerate(DRAGON_CHARS_MAP): item, item_type = data codepoint = ord(item) print("|%i" % no) foreground, background = get_rgb_color(item_type) foreground = "#%02x%02x%02x" % foreground background = "#%02x%02x%02x" % background style = "color: #%s;" print('| style="color:%s; background-color:%s;" | &#x%x;' % ( foreground, background, codepoint )) print("|%i" % codepoint) print("|%s" % item_type) print("|-") print("|}")
[ "def", "create_wiki_page", "(", ")", ":", "print", "(", "'{| class=\"wikitable\"'", "' style=\"font-family: monospace;'", "' background-color:#ffffcc;\"'", "' cellpadding=\"10\"'", ")", "print", "(", "\"|-\"", ")", "print", "(", "\"! POKE\"", ")", "print", "(", "\"value\"...
for http://archive.worldofdragon.org/index.php?title=CharMap
[ "for", "http", ":", "//", "archive", ".", "worldofdragon", ".", "org", "/", "index", ".", "php?title", "=", "CharMap" ]
python
train
bitcraze/crazyflie-lib-python
cflib/crazyflie/log.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/log.py#L325-L330
def get_id_from_cstring(name): """Return variable type id given the C-storage name""" for key in list(LogTocElement.types.keys()): if (LogTocElement.types[key][0] == name): return key raise KeyError('Type [%s] not found in LogTocElement.types!' % name)
[ "def", "get_id_from_cstring", "(", "name", ")", ":", "for", "key", "in", "list", "(", "LogTocElement", ".", "types", ".", "keys", "(", ")", ")", ":", "if", "(", "LogTocElement", ".", "types", "[", "key", "]", "[", "0", "]", "==", "name", ")", ":", ...
Return variable type id given the C-storage name
[ "Return", "variable", "type", "id", "given", "the", "C", "-", "storage", "name" ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/Crtsh/crtshquery.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/Crtsh/crtshquery.py#L10-L47
def search(self, domain, wildcard=True): """ Search crt.sh for the given domain. domain -- Domain to search for wildcard -- Whether or not to prepend a wildcard to the domain (default: True) Return a list of a certificate dict: { "issuer_ca_id": 16418, "issuer_name": "C=US, O=Let's Encrypt, CN=Let's Encrypt Authority X3", "name_value": "hatch.uber.com", "min_cert_id": 325717795, "min_entry_timestamp": "2018-02-08T16:47:39.089", "not_before": "2018-02-08T15:47:39" } XML notation would also include the base64 cert: https://crt.sh/atom?q={} """ base_url = "https://crt.sh/?q={}&output=json" if wildcard: domain = "%25.{}".format(domain) url = base_url.format(domain) ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1' req = requests.get(url, headers={'User-Agent': ua}) if req.ok: try: content = req.content.decode('utf-8') data = json.loads(content.replace('}{', '},{')) return data except Exception: self.error("Error retrieving information.") return None
[ "def", "search", "(", "self", ",", "domain", ",", "wildcard", "=", "True", ")", ":", "base_url", "=", "\"https://crt.sh/?q={}&output=json\"", "if", "wildcard", ":", "domain", "=", "\"%25.{}\"", ".", "format", "(", "domain", ")", "url", "=", "base_url", ".", ...
Search crt.sh for the given domain. domain -- Domain to search for wildcard -- Whether or not to prepend a wildcard to the domain (default: True) Return a list of a certificate dict: { "issuer_ca_id": 16418, "issuer_name": "C=US, O=Let's Encrypt, CN=Let's Encrypt Authority X3", "name_value": "hatch.uber.com", "min_cert_id": 325717795, "min_entry_timestamp": "2018-02-08T16:47:39.089", "not_before": "2018-02-08T15:47:39" } XML notation would also include the base64 cert: https://crt.sh/atom?q={}
[ "Search", "crt", ".", "sh", "for", "the", "given", "domain", "." ]
python
train
wakatime/wakatime
wakatime/packages/pygments/formatters/svg.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/svg.py#L97-L136
def format_unencoded(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'. """ x = self.xoffset y = self.yoffset if not self.nowrap: if self.encoding: outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding) else: outfile.write('<?xml version="1.0"?>\n') outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' 'svg10.dtd">\n') outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize)) outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y)) for ttype, value in tokensource: style = self._get_style(ttype) tspan = style and '<tspan' + style + '>' or '' tspanend = tspan and '</tspan>' or '' value = escape_html(value) if self.spacehack: value = value.expandtabs().replace(' ', '&#160;') parts = value.split('\n') for part in parts[:-1]: outfile.write(tspan + part + tspanend) y += self.ystep outfile.write('</text>\n<text x="%s" y="%s" ' 'xml:space="preserve">' % (x, y)) outfile.write(tspan + parts[-1] + tspanend) outfile.write('</text>') if not self.nowrap: outfile.write('</g></svg>\n')
[ "def", "format_unencoded", "(", "self", ",", "tokensource", ",", "outfile", ")", ":", "x", "=", "self", ".", "xoffset", "y", "=", "self", ".", "yoffset", "if", "not", "self", ".", "nowrap", ":", "if", "self", ".", "encoding", ":", "outfile", ".", "wr...
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'.
[ "Format", "tokensource", "an", "iterable", "of", "(", "tokentype", "tokenstring", ")", "tuples", "and", "write", "it", "into", "outfile", "." ]
python
train
pypa/pipenv
pipenv/vendor/jinja2/sandbox.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/sandbox.py#L323-L330
def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith('_') or is_internal_attribute(obj, attr))
[ "def", "is_safe_attribute", "(", "self", ",", "obj", ",", "attr", ",", "value", ")", ":", "return", "not", "(", "attr", ".", "startswith", "(", "'_'", ")", "or", "is_internal_attribute", "(", "obj", ",", "attr", ")", ")" ]
The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function.
[ "The", "sandboxed", "environment", "will", "call", "this", "method", "to", "check", "if", "the", "attribute", "of", "an", "object", "is", "safe", "to", "access", ".", "Per", "default", "all", "attributes", "starting", "with", "an", "underscore", "are", "cons...
python
train
deepmind/sonnet
sonnet/python/modules/pondering_rnn.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/pondering_rnn.py#L166-L211
def _build(self, x, prev_state): """Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2. """ x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) # Weights for the halting signal halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
[ "def", "_build", "(", "self", ",", "x", ",", "prev_state", ")", ":", "x", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "self", ".", "_batch_size", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "s...
Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2.
[ "Connects", "the", "core", "to", "the", "graph", "." ]
python
train
davgeo/clear
clear/database.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/database.py#L257-L304
def _AddToSingleColumnTable(self, tableName, columnHeading, newValue): """ Add an entry to a table containing a single column. Checks existing table entries to avoid duplicate entries if the given value already exists in the table. Parameters ---------- tableName : string Name of table to add entry to. columnHeading : string Name of column heading. newValue : string New value to add to table. """ match = None currentTable = self._GetFromSingleColumnTable(tableName) if currentTable is not None: for currentValue in currentTable: if currentValue == newValue: match = True if match is None: goodlogging.Log.Info("DB", "Adding {0} to {1} table".format(newValue, tableName), verbosity=self.logVerbosity) self._ActionDatabase("INSERT INTO {0} VALUES (?)".format(tableName), (newValue, )) else: goodlogging.Log.Info("DB", "{0} already exists in {1} table".format(newValue, tableName), verbosity=self.logVerbosity) ############################################################################ # _GetFromSingleColumnTable ############################################################################ """ Get all entries from a table containing a single column. Parameters ---------- tableName : string Name of table to add entry to. Returns ---------- list or None If either no table or no rows are found this returns None, otherwise a list of all table entries is returned. """
[ "def", "_AddToSingleColumnTable", "(", "self", ",", "tableName", ",", "columnHeading", ",", "newValue", ")", ":", "match", "=", "None", "currentTable", "=", "self", ".", "_GetFromSingleColumnTable", "(", "tableName", ")", "if", "currentTable", "is", "not", "None...
Add an entry to a table containing a single column. Checks existing table entries to avoid duplicate entries if the given value already exists in the table. Parameters ---------- tableName : string Name of table to add entry to. columnHeading : string Name of column heading. newValue : string New value to add to table.
[ "Add", "an", "entry", "to", "a", "table", "containing", "a", "single", "column", ".", "Checks", "existing", "table", "entries", "to", "avoid", "duplicate", "entries", "if", "the", "given", "value", "already", "exists", "in", "the", "table", "." ]
python
train
gboeing/osmnx
osmnx/save_load.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/save_load.py#L468-L522
def update_edge_keys(G): """ Update the keys of edges that share a u, v with another edge but differ in geometry. For example, two one-way streets from u to v that bow away from each other as separate streets, rather than opposite direction edges of a single street. Parameters ---------- G : networkx multidigraph Returns ------- networkx multigraph """ # identify all the edges that are duplicates based on a sorted combination # of their origin, destination, and key. that is, edge uv will match edge vu # as a duplicate, but only if they have the same key edges = graph_to_gdfs(G, nodes=False, fill_edge_geometry=False) edges['uvk'] = edges.apply(lambda row: '_'.join(sorted([str(row['u']), str(row['v'])]) + [str(row['key'])]), axis=1) edges['dupe'] = edges['uvk'].duplicated(keep=False) dupes = edges[edges['dupe']==True].dropna(subset=['geometry']) different_streets = [] groups = dupes[['geometry', 'uvk', 'u', 'v', 'key', 'dupe']].groupby('uvk') # for each set of duplicate edges for label, group in groups: # if there are more than 2 edges here, make sure to compare all if len(group['geometry']) > 2: l = group['geometry'].tolist() l.append(l[0]) geom_pairs = list(zip(l[:-1], l[1:])) # otherwise, just compare the first edge to the second edge else: geom_pairs = [(group['geometry'].iloc[0], group['geometry'].iloc[1])] # for each pair of edges to compare for geom1, geom2 in geom_pairs: # if they don't have the same geometry, flag them as different streets if not is_same_geometry(geom1, geom2): # add edge uvk, but not edge vuk, otherwise we'll iterate both their keys # and they'll still duplicate each other at the end of this process different_streets.append((group['u'].iloc[0], group['v'].iloc[0], group['key'].iloc[0])) # for each unique different street, iterate its key + 1 so it's unique for u, v, k in set(different_streets): # filter out key if it appears in data dict as we'll pass it explicitly attributes = {k:v for k, v in G[u][v][k].items() if k != 'key'} G.add_edge(u, v, key=k+1, **attributes) G.remove_edge(u, v, key=k) return G
[ "def", "update_edge_keys", "(", "G", ")", ":", "# identify all the edges that are duplicates based on a sorted combination", "# of their origin, destination, and key. that is, edge uv will match edge vu", "# as a duplicate, but only if they have the same key", "edges", "=", "graph_to_gdfs", ...
Update the keys of edges that share a u, v with another edge but differ in geometry. For example, two one-way streets from u to v that bow away from each other as separate streets, rather than opposite direction edges of a single street. Parameters ---------- G : networkx multidigraph Returns ------- networkx multigraph
[ "Update", "the", "keys", "of", "edges", "that", "share", "a", "u", "v", "with", "another", "edge", "but", "differ", "in", "geometry", ".", "For", "example", "two", "one", "-", "way", "streets", "from", "u", "to", "v", "that", "bow", "away", "from", "...
python
train
nikoladimitroff/Adder
adder/logic.py
https://github.com/nikoladimitroff/Adder/blob/034f301d3a850d2cbeb2b17c8973f363f90f390e/adder/logic.py#L418-L428
def is_subsumed_by(x, y): """ Returns true if y subsumes x (for example P(x) subsumes P(A) as it is more abstract) """ varsX = __split_expression(x)[1] theta = unify(x, y) if theta is problem.FAILURE: return False return all(__is_variable(theta[var]) for var in theta.keys() if var in varsX)
[ "def", "is_subsumed_by", "(", "x", ",", "y", ")", ":", "varsX", "=", "__split_expression", "(", "x", ")", "[", "1", "]", "theta", "=", "unify", "(", "x", ",", "y", ")", "if", "theta", "is", "problem", ".", "FAILURE", ":", "return", "False", "return...
Returns true if y subsumes x (for example P(x) subsumes P(A) as it is more abstract)
[ "Returns", "true", "if", "y", "subsumes", "x", "(", "for", "example", "P", "(", "x", ")", "subsumes", "P", "(", "A", ")", "as", "it", "is", "more", "abstract", ")" ]
python
train
hfaran/progressive
progressive/bar.py
https://github.com/hfaran/progressive/blob/e39c7fb17405dbe997c3417a5993b94ef16dab0a/progressive/bar.py#L144-L166
def max_width(self): """Get maximum width of progress bar :rtype: int :returns: Maximum column width of progress bar """ value, unit = float(self._width_str[:-1]), self._width_str[-1] ensure(unit in ["c", "%"], ValueError, "Width unit must be either 'c' or '%'") if unit == "c": ensure(value <= self.columns, ValueError, "Terminal only has {} columns, cannot draw " "bar of size {}.".format(self.columns, value)) retval = value else: # unit == "%" ensure(0 < value <= 100, ValueError, "value=={} does not satisfy 0 < value <= 100".format(value)) dec = value / 100 retval = dec * self.columns return floor(retval)
[ "def", "max_width", "(", "self", ")", ":", "value", ",", "unit", "=", "float", "(", "self", ".", "_width_str", "[", ":", "-", "1", "]", ")", ",", "self", ".", "_width_str", "[", "-", "1", "]", "ensure", "(", "unit", "in", "[", "\"c\"", ",", "\"...
Get maximum width of progress bar :rtype: int :returns: Maximum column width of progress bar
[ "Get", "maximum", "width", "of", "progress", "bar" ]
python
train
zabertech/python-izaber
izaber/zconfig.py
https://github.com/zabertech/python-izaber/blob/729bf9ef637e084c8ab3cc16c34cf659d3a79ee4/izaber/zconfig.py#L427-L448
def initialize(**kwargs): """ Loads the globally shared YAML configuration """ global config config_opts = kwargs.setdefault('config',{}) if isinstance(config_opts,basestring): config_opts = {'config_filename':config_opts} kwargs['config'] = config_opts if 'environment' in kwargs: config_opts['environment'] = kwargs['environment'] config.load_config(**config_opts) # Overlay the subconfig if kwargs.get('name'): subconfig = config.get(kwargs.get('name'),{}) config.overlay_add(subconfig) config.overlay_add(app_config)
[ "def", "initialize", "(", "*", "*", "kwargs", ")", ":", "global", "config", "config_opts", "=", "kwargs", ".", "setdefault", "(", "'config'", ",", "{", "}", ")", "if", "isinstance", "(", "config_opts", ",", "basestring", ")", ":", "config_opts", "=", "{"...
Loads the globally shared YAML configuration
[ "Loads", "the", "globally", "shared", "YAML", "configuration" ]
python
train
urbn/Caesium
caesium/document.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L36-L45
def publish(self): """ Iterate over the scheduler collections and apply any actions found """ try: for collection in self.settings.get("scheduler").get("collections"): yield self.publish_for_collection(collection) except Exception as ex: self.logger.error(ex)
[ "def", "publish", "(", "self", ")", ":", "try", ":", "for", "collection", "in", "self", ".", "settings", ".", "get", "(", "\"scheduler\"", ")", ".", "get", "(", "\"collections\"", ")", ":", "yield", "self", ".", "publish_for_collection", "(", "collection",...
Iterate over the scheduler collections and apply any actions found
[ "Iterate", "over", "the", "scheduler", "collections", "and", "apply", "any", "actions", "found" ]
python
train
erikvw/django-collect-offline
django_collect_offline/offline_model.py
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L71-L91
def has_offline_historical_manager_or_raise(self): """Raises an exception if model uses a history manager and historical model history_id is not a UUIDField. Note: expected to use edc_model.HistoricalRecords instead of simple_history.HistoricalRecords. """ try: model = self.instance.__class__.history.model except AttributeError: model = self.instance.__class__ field = [field for field in model._meta.fields if field.name == "history_id"] if field and not isinstance(field[0], UUIDField): raise OfflineHistoricalManagerError( f"Field 'history_id' of historical model " f"'{model._meta.app_label}.{model._meta.model_name}' " "must be an UUIDfield. " "For history = HistoricalRecords() use edc_model.HistoricalRecords instead of " "simple_history.HistoricalRecords(). " f"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'." )
[ "def", "has_offline_historical_manager_or_raise", "(", "self", ")", ":", "try", ":", "model", "=", "self", ".", "instance", ".", "__class__", ".", "history", ".", "model", "except", "AttributeError", ":", "model", "=", "self", ".", "instance", ".", "__class__"...
Raises an exception if model uses a history manager and historical model history_id is not a UUIDField. Note: expected to use edc_model.HistoricalRecords instead of simple_history.HistoricalRecords.
[ "Raises", "an", "exception", "if", "model", "uses", "a", "history", "manager", "and", "historical", "model", "history_id", "is", "not", "a", "UUIDField", "." ]
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/campaignactions.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/campaignactions.py#L139-L147
def unschedule(self, campaign_id): """ Unschedule a scheduled campaign that hasn’t started sending. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` """ self.campaign_id = campaign_id return self._mc_client._post(url=self._build_path(campaign_id, 'actions/unschedule'))
[ "def", "unschedule", "(", "self", ",", "campaign_id", ")", ":", "self", ".", "campaign_id", "=", "campaign_id", "return", "self", ".", "_mc_client", ".", "_post", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ",", "'actions/unschedule'", "...
Unschedule a scheduled campaign that hasn’t started sending. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str`
[ "Unschedule", "a", "scheduled", "campaign", "that", "hasn’t", "started", "sending", "." ]
python
valid
paperhive/ansible-ec2-inventory
ansible_ec2_inventory/ec2inventory.py
https://github.com/paperhive/ansible-ec2-inventory/blob/6a13f9de61c089a7b13bce494adceb7507971059/ansible_ec2_inventory/ec2inventory.py#L810-L895
def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info
[ "def", "add_elasticache_node", "(", "self", ",", "node", ",", "cluster", ",", "region", ")", ":", "# Only want available nodes unless all_elasticache_nodes is True", "if", "not", "self", ".", "all_elasticache_nodes", "and", "node", "[", "'CacheNodeStatus'", "]", "!=", ...
Adds an ElastiCache node to the inventory and index, as long as it is addressable
[ "Adds", "an", "ElastiCache", "node", "to", "the", "inventory", "and", "index", "as", "long", "as", "it", "is", "addressable" ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5737-L5746
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle): '''Set new video subtitle file. @param p_mi: the media player. @param psz_subtitle: new video subtitle file. @return: the success status (boolean). ''' f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \ _Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_subtitle)
[ "def", "libvlc_video_set_subtitle_file", "(", "p_mi", ",", "psz_subtitle", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_subtitle_file'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_subtitle_file'", ",", "(", "(", "1", ",...
Set new video subtitle file. @param p_mi: the media player. @param psz_subtitle: new video subtitle file. @return: the success status (boolean).
[ "Set", "new", "video", "subtitle", "file", "." ]
python
train
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L2195-L2317
def init(cwd, bare=False, template=None, separate_git_dir=None, shared=None, opts='', git_opts='', user=None, password=None, ignore_retcode=False, output_encoding=None): ''' Interface to `git-init(1)`_ cwd The path to the directory to be initialized bare : False If ``True``, init a bare repository .. versionadded:: 2015.8.0 template Set this argument to specify an alternate `template directory`_ .. versionadded:: 2015.8.0 separate_git_dir Set this argument to specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.8.0 opts Any additional options to add to the command line, in a single string .. note:: On the Salt CLI, if the opts are preceded with a dash, it is necessary to precede them with ``opts=`` (as in the CLI examples below) to avoid causing errors with Salt's own argument parsing. git_opts Any additional options to add to git command itself (not the ``init`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`template directory`: http://git-scm.com/docs/git-init#_template_directory CLI Examples: .. code-block:: bash salt myminion git.init /path/to/repo # Init a bare repo (before 2015.8.0) salt myminion git.init /path/to/bare/repo.git opts='--bare' # Init a bare repo (2015.8.0 and later) salt myminion git.init /path/to/bare/repo.git bare=True ''' cwd = _expand_path(cwd, user) command = ['git'] + _format_git_opts(git_opts) command.append('init') if bare: command.append('--bare') if template is not None: command.append('--template={0}'.format(template)) if separate_git_dir is not None: command.append('--separate-git-dir={0}'.format(separate_git_dir)) if shared is not None: if isinstance(shared, six.integer_types) \ and not isinstance(shared, bool): shared = '0' + six.text_type(shared) elif not isinstance(shared, six.string_types): # Using lower here because booleans would be capitalized when # converted to a string. shared = six.text_type(shared).lower() command.append('--shared={0}'.format(shared)) command.extend(_format_opts(opts)) command.append(cwd) return _git_run(command, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding)['stdout']
[ "def", "init", "(", "cwd", ",", "bare", "=", "False", ",", "template", "=", "None", ",", "separate_git_dir", "=", "None", ",", "shared", "=", "None", ",", "opts", "=", "''", ",", "git_opts", "=", "''", ",", "user", "=", "None", ",", "password", "="...
Interface to `git-init(1)`_ cwd The path to the directory to be initialized bare : False If ``True``, init a bare repository .. versionadded:: 2015.8.0 template Set this argument to specify an alternate `template directory`_ .. versionadded:: 2015.8.0 separate_git_dir Set this argument to specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.8.0 opts Any additional options to add to the command line, in a single string .. note:: On the Salt CLI, if the opts are preceded with a dash, it is necessary to precede them with ``opts=`` (as in the CLI examples below) to avoid causing errors with Salt's own argument parsing. git_opts Any additional options to add to git command itself (not the ``init`` subcommand), in a single string. This is useful for passing ``-c`` to run git with temporary changes to the git configuration. .. versionadded:: 2017.7.0 .. note:: This is only supported in git 1.7.2 and newer. user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`template directory`: http://git-scm.com/docs/git-init#_template_directory CLI Examples: .. code-block:: bash salt myminion git.init /path/to/repo # Init a bare repo (before 2015.8.0) salt myminion git.init /path/to/bare/repo.git opts='--bare' # Init a bare repo (2015.8.0 and later) salt myminion git.init /path/to/bare/repo.git bare=True
[ "Interface", "to", "git", "-", "init", "(", "1", ")", "_" ]
python
train
gplepage/gvar
src/gvar/linalg.py
https://github.com/gplepage/gvar/blob/d6671697319eb6280de3793c9a1c2b616c6f2ae0/src/gvar/linalg.py#L151-L224
def svd(a, compute_uv=True, rcond=None): """ svd decomposition of matrix ``a`` containing |GVar|\s. Args: a: Two-dimensional matrix/array of numbers and/or :class:`gvar.GVar`\s. compute_uv (bool): It ``True`` (default), returns tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT`` where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1`` and ``vT @ vT.T = 1``, and ``s`` is the list of singular values. Only ``s`` is returned if ``compute_uv=False``. rcond (float): Singular values whose difference is smaller than ``rcond`` times their sum are assumed to be degenerate for calculating variances for ``u`` and ``vT``. Default (``rcond=None``) is ``max(M,N)`` times machine precision. Returns: Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT`` where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1`` and ``vT @ vT.T = 1``, and ``s`` is the list of singular values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)`` and ``vT.shape=(K,M)`` where ``K`` is the number of nonzero singular values (``len(s)==K``). If ``compute_uv==False`` only ``s`` is returned. Raises: ValueError: If matrix is not two-dimensional. """ a = numpy.asarray(a) if a.dtype != object: return numpy.linalg.svd(a, compute_uv=compute_uv) amean = gvar.mean(a) if amean.ndim != 2: raise ValueError( 'matrix must have dimension 2: actual shape = ' + str(a.shape) ) if rcond is None: rcond = numpy.finfo(float).eps * max(a.shape) da = a - amean u0,s0,v0T = numpy.linalg.svd(amean, compute_uv=True, full_matrices=True) k = min(a.shape) s = s0 + [ u0[:, i].dot(da.dot(v0T[i, :])) for i in range(k) ] if compute_uv: u = numpy.array(u0, dtype=object) vT = numpy.array(v0T, dtype=object) # u first daaT = da.dot(a.T) + a.dot(da.T) s02 = numpy.zeros(daaT.shape[0], float) s02[:len(s0)] = s0 ** 2 for j in range(s02.shape[0]): for i in range(k): if i == j: continue ds2 = s02[i] - s02[j] if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0: continue u[:, i] += u0[:, j] * u0[:, j].dot(daaT.dot(u0[:, i])) / ds2 # v next daTa = da.T.dot(a) + a.T.dot(da) s02 = numpy.zeros(daTa.shape[0], float) s02[:len(s0)] = s0 ** 2 for j in range(s02.shape[0]): for i in range(k): if i == j: continue ds2 = s02[i] - s02[j] if abs(ds2) < rcond * abs(s02[i] + s02[j]) or ds2 == 0: continue vT[i, :] += v0T[j, :] * v0T[j, :].dot(daTa.dot(v0T[i, :])) / ds2 return u[:,:k], s, vT[:k, :] else: return s
[ "def", "svd", "(", "a", ",", "compute_uv", "=", "True", ",", "rcond", "=", "None", ")", ":", "a", "=", "numpy", ".", "asarray", "(", "a", ")", "if", "a", ".", "dtype", "!=", "object", ":", "return", "numpy", ".", "linalg", ".", "svd", "(", "a",...
svd decomposition of matrix ``a`` containing |GVar|\s. Args: a: Two-dimensional matrix/array of numbers and/or :class:`gvar.GVar`\s. compute_uv (bool): It ``True`` (default), returns tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT`` where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1`` and ``vT @ vT.T = 1``, and ``s`` is the list of singular values. Only ``s`` is returned if ``compute_uv=False``. rcond (float): Singular values whose difference is smaller than ``rcond`` times their sum are assumed to be degenerate for calculating variances for ``u`` and ``vT``. Default (``rcond=None``) is ``max(M,N)`` times machine precision. Returns: Tuple ``(u,s,vT)`` where matrix ``a = u @ np.diag(s) @ vT`` where matrices ``u`` and ``vT`` satisfy ``u.T @ u = 1`` and ``vT @ vT.T = 1``, and ``s`` is the list of singular values. If ``a.shape=(N,M)``, then ``u.shape=(N,K)`` and ``vT.shape=(K,M)`` where ``K`` is the number of nonzero singular values (``len(s)==K``). If ``compute_uv==False`` only ``s`` is returned. Raises: ValueError: If matrix is not two-dimensional.
[ "svd", "decomposition", "of", "matrix", "a", "containing", "|GVar|", "\\", "s", "." ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L1746-L1766
def verify_certificate(self): """ Verify a certificate in a context. .. versionadded:: 0.15 :raises X509StoreContextError: If an error occurred when validating a certificate in the context. Sets ``certificate`` attribute to indicate which certificate caused the error. """ # Always re-initialize the store context in case # :meth:`verify_certificate` is called multiple times. # # :meth:`_init` is called in :meth:`__init__` so _cleanup is called # before _init to ensure memory is not leaked. self._cleanup() self._init() ret = _lib.X509_verify_cert(self._store_ctx) self._cleanup() if ret <= 0: raise self._exception_from_context()
[ "def", "verify_certificate", "(", "self", ")", ":", "# Always re-initialize the store context in case", "# :meth:`verify_certificate` is called multiple times.", "#", "# :meth:`_init` is called in :meth:`__init__` so _cleanup is called", "# before _init to ensure memory is not leaked.", "self"...
Verify a certificate in a context. .. versionadded:: 0.15 :raises X509StoreContextError: If an error occurred when validating a certificate in the context. Sets ``certificate`` attribute to indicate which certificate caused the error.
[ "Verify", "a", "certificate", "in", "a", "context", "." ]
python
test
genialis/resolwe
resolwe/permissions/utils.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/utils.py#L79-L87
def fetch_user(query): """Get user by ``pk`` or ``username``. Raise error if it doesn't exist.""" user_filter = {'pk': query} if query.isdigit() else {'username': query} user_model = get_user_model() try: return user_model.objects.get(**user_filter) except user_model.DoesNotExist: raise exceptions.ParseError("Unknown user: {}".format(query))
[ "def", "fetch_user", "(", "query", ")", ":", "user_filter", "=", "{", "'pk'", ":", "query", "}", "if", "query", ".", "isdigit", "(", ")", "else", "{", "'username'", ":", "query", "}", "user_model", "=", "get_user_model", "(", ")", "try", ":", "return",...
Get user by ``pk`` or ``username``. Raise error if it doesn't exist.
[ "Get", "user", "by", "pk", "or", "username", ".", "Raise", "error", "if", "it", "doesn", "t", "exist", "." ]
python
train
RJT1990/pyflux
pyflux/ensembles/mixture_of_experts.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L248-L281
def run(self, h, recalculate=False): """ Run the aggregating algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on recalculate: boolean Whether to recalculate the predictions or not Returns ---------- - np.ndarray of normalized weights, np.ndarray of losses for each model """ data = self.data[-h:] predictions = self._model_predict_is(h, recalculate=recalculate).values weights = np.zeros((h, len(self.model_list))) normalized_weights = np.zeros((h, len(self.model_list))) ensemble_prediction = np.zeros(h) for t in range(h): if t == 0: weights[t,:] = 100000 ensemble_prediction[t] = np.dot(weights[t,:]/weights[t,:].sum(), predictions[t,:]) weights[t,:] = weights[t,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:])) normalized_weights[t,:] = weights[t,:]/weights[t,:].sum() else: ensemble_prediction[t] = np.dot(weights[t-1,:]/weights[t-1,:].sum(), predictions[t,:]) weights[t,:] = weights[t-1,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:])) normalized_weights[t,:] = weights[t,:]/weights[t,:].sum() return normalized_weights, self._construct_losses(data, predictions, ensemble_prediction), ensemble_prediction
[ "def", "run", "(", "self", ",", "h", ",", "recalculate", "=", "False", ")", ":", "data", "=", "self", ".", "data", "[", "-", "h", ":", "]", "predictions", "=", "self", ".", "_model_predict_is", "(", "h", ",", "recalculate", "=", "recalculate", ")", ...
Run the aggregating algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on recalculate: boolean Whether to recalculate the predictions or not Returns ---------- - np.ndarray of normalized weights, np.ndarray of losses for each model
[ "Run", "the", "aggregating", "algorithm", "Parameters", "----------", "h", ":", "int", "How", "many", "steps", "to", "run", "the", "aggregating", "algorithm", "on" ]
python
train
pkgw/pwkit
pwkit/numutil.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/numutil.py#L147-L186
def data_frame_to_astropy_table (dataframe): """This is a backport of the Astropy method :meth:`astropy.table.table.Table.from_pandas`. It converts a Pandas :class:`pandas.DataFrame` object to an Astropy :class:`astropy.table.Table`. """ from astropy.utils import OrderedDict from astropy.table import Table, Column, MaskedColumn from astropy.extern import six out = OrderedDict() for name in dataframe.columns: column = dataframe[name] mask = np.array (column.isnull ()) data = np.array (column) if data.dtype.kind == 'O': # If all elements of an object array are string-like or np.nan # then coerce back to a native numpy str/unicode array. string_types = six.string_types if six.PY3: string_types += (bytes,) nan = np.nan if all(isinstance(x, string_types) or x is nan for x in data): # Force any missing (null) values to b''. Numpy will # upcast to str/unicode as needed. data[mask] = b'' # When the numpy object array is represented as a list then # numpy initializes to the correct string or unicode type. data = np.array([x for x in data]) if np.any(mask): out[name] = MaskedColumn(data=data, name=name, mask=mask) else: out[name] = Column(data=data, name=name) return Table(out)
[ "def", "data_frame_to_astropy_table", "(", "dataframe", ")", ":", "from", "astropy", ".", "utils", "import", "OrderedDict", "from", "astropy", ".", "table", "import", "Table", ",", "Column", ",", "MaskedColumn", "from", "astropy", ".", "extern", "import", "six",...
This is a backport of the Astropy method :meth:`astropy.table.table.Table.from_pandas`. It converts a Pandas :class:`pandas.DataFrame` object to an Astropy :class:`astropy.table.Table`.
[ "This", "is", "a", "backport", "of", "the", "Astropy", "method", ":", "meth", ":", "astropy", ".", "table", ".", "table", ".", "Table", ".", "from_pandas", ".", "It", "converts", "a", "Pandas", ":", "class", ":", "pandas", ".", "DataFrame", "object", "...
python
train
theiviaxx/python-perforce
perforce/models.py
https://github.com/theiviaxx/python-perforce/blob/01a3b01fe5949126fa0097d9a8ad386887823b5a/perforce/models.py#L630-L633
def save(self): """Saves the state of the changelist""" self._connection.run(['change', '-i'], stdin=format(self), marshal_output=False) self._dirty = False
[ "def", "save", "(", "self", ")", ":", "self", ".", "_connection", ".", "run", "(", "[", "'change'", ",", "'-i'", "]", ",", "stdin", "=", "format", "(", "self", ")", ",", "marshal_output", "=", "False", ")", "self", ".", "_dirty", "=", "False" ]
Saves the state of the changelist
[ "Saves", "the", "state", "of", "the", "changelist" ]
python
train
christophertbrown/bioscripts
ctbBio/name2fasta.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/name2fasta.py#L7-L22
def split_fasta(f, id2f): """ split fasta file into separate fasta files based on list of scaffolds that belong to each separate file """ opened = {} for seq in parse_fasta(f): id = seq[0].split('>')[1].split()[0] if id not in id2f: continue fasta = id2f[id] if fasta not in opened: opened[fasta] = '%s.fa' % fasta seq[1] += '\n' with open(opened[fasta], 'a+') as f_out: f_out.write('\n'.join(seq))
[ "def", "split_fasta", "(", "f", ",", "id2f", ")", ":", "opened", "=", "{", "}", "for", "seq", "in", "parse_fasta", "(", "f", ")", ":", "id", "=", "seq", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "split", "(", ")", "[...
split fasta file into separate fasta files based on list of scaffolds that belong to each separate file
[ "split", "fasta", "file", "into", "separate", "fasta", "files", "based", "on", "list", "of", "scaffolds", "that", "belong", "to", "each", "separate", "file" ]
python
train
evhub/coconut
coconut/command/command.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/command/command.py#L127-L132
def setup(self, *args, **kwargs): """Set parameters for the compiler.""" if self.comp is None: self.comp = Compiler(*args, **kwargs) else: self.comp.setup(*args, **kwargs)
[ "def", "setup", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "comp", "is", "None", ":", "self", ".", "comp", "=", "Compiler", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "c...
Set parameters for the compiler.
[ "Set", "parameters", "for", "the", "compiler", "." ]
python
train
kata198/ProcessMappingScanner
ProcessMappingScanner/__init__.py
https://github.com/kata198/ProcessMappingScanner/blob/d1735fe6746493c51aaae213b982fa96f5c5b621/ProcessMappingScanner/__init__.py#L131-L194
def scanProcessForCwd(pid, searchPortion, isExactMatch=False): ''' scanProcessForCwd - Searches a given pid's cwd for a given pattern @param pid <int> - A running process ID on this system @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'cwd' : The exact cwd of matched process } ''' try: try: pid = int(pid) except ValueError as e: sys.stderr.write('Expected an integer, got %s for pid.\n' %(str(type(pid)),)) raise e cwd = getProcessCwd(pid) if not cwd: return None isMatch = False if isExactMatch is True: if searchPortion == cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] == cwd: isMatch = True else: if searchPortion in cwd: isMatch = True else: if searchPortion.endswith('/') and searchPortion[:-1] in cwd: isMatch = True if not isMatch: return None cmdline = getProcessCommandLineStr(pid) owner = getProcessOwnerStr(pid) return { 'searchPortion' : searchPortion, 'pid' : pid, 'owner' : owner, 'cmdline' : cmdline, 'cwd' : cwd, } except OSError: return None except IOError: return None except FileNotFoundError: return None except PermissionError: return None
[ "def", "scanProcessForCwd", "(", "pid", ",", "searchPortion", ",", "isExactMatch", "=", "False", ")", ":", "try", ":", "try", ":", "pid", "=", "int", "(", "pid", ")", "except", "ValueError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'...
scanProcessForCwd - Searches a given pid's cwd for a given pattern @param pid <int> - A running process ID on this system @param searchPortion <str> - Any portion of directory to search @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed. @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned. { 'searchPortion' : The passed search pattern 'pid' : The passed pid (as an integer) 'owner' : String of process owner, or uid if no mapping can be found, or "unknown" if neither could be determined. 'cmdline' : Commandline string 'cwd' : The exact cwd of matched process }
[ "scanProcessForCwd", "-", "Searches", "a", "given", "pid", "s", "cwd", "for", "a", "given", "pattern" ]
python
valid
tgsmith61591/pmdarima
pmdarima/arima/auto.py
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/auto.py#L565-L593
def _return_wrapper(fits, return_all, start, trace): """If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. """ # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
[ "def", "_return_wrapper", "(", "fits", ",", "return_all", ",", "start", ",", "trace", ")", ":", "# make sure it's an iterable", "if", "not", "is_iterable", "(", "fits", ")", ":", "fits", "=", "[", "fits", "]", "# whether to print the final runtime", "if", "trace...
If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all.
[ "If", "the", "user", "wants", "to", "get", "all", "of", "the", "models", "back", "this", "will", "return", "a", "list", "of", "the", "ARIMA", "models", "otherwise", "it", "will", "just", "return", "the", "model", ".", "If", "this", "is", "called", "fro...
python
train
tadashi-aikawa/owlmixin
owlmixin/__init__.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/__init__.py#L588-L602
def from_yamlf_to_list(cls, fpath: str, encoding: str='utf8', force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TList[T]: """From yaml file path to list of instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance """ return cls.from_dicts(util.load_yamlf(fpath, encoding), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
[ "def", "from_yamlf_to_list", "(", "cls", ",", "fpath", ":", "str", ",", "encoding", ":", "str", "=", "'utf8'", ",", "force_snake_case", "=", "True", ",", "force_cast", ":", "bool", "=", "False", ",", "restrict", ":", "bool", "=", "True", ")", "->", "TL...
From yaml file path to list of instance :param fpath: Yaml file path :param encoding: Yaml file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance
[ "From", "yaml", "file", "path", "to", "list", "of", "instance" ]
python
train
googleapis/google-auth-library-python
google/auth/transport/grpc.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/auth/transport/grpc.py#L80-L135
def secure_authorized_channel( credentials, request, target, ssl_credentials=None, **kwargs): """Creates a secure authorized gRPC channel. This creates a channel with SSL and :class:`AuthMetadataPlugin`. This channel can be used to create a stub that can make authorized requests. Example:: import google.auth import google.auth.transport.grpc import google.auth.transport.requests from google.cloud.speech.v1 import cloud_speech_pb2 # Get credentials. credentials, _ = google.auth.default() # Get an HTTP request function to refresh credentials. request = google.auth.transport.requests.Request() # Create a channel. channel = google.auth.transport.grpc.secure_authorized_channel( credentials, 'speech.googleapis.com:443', request) # Use the channel to create a stub. cloud_speech.create_Speech_stub(channel) Args: credentials (google.auth.credentials.Credentials): The credentials to add to requests. request (google.auth.transport.Request): A HTTP transport request object used to refresh credentials as needed. Even though gRPC is a separate transport, there's no way to refresh the credentials without using a standard http transport. target (str): The host and port of the service. ssl_credentials (grpc.ChannelCredentials): Optional SSL channel credentials. This can be used to specify different certificates. kwargs: Additional arguments to pass to :func:`grpc.secure_channel`. Returns: grpc.Channel: The created gRPC channel. """ # Create the metadata plugin for inserting the authorization header. metadata_plugin = AuthMetadataPlugin(credentials, request) # Create a set of grpc.CallCredentials using the metadata plugin. google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) if ssl_credentials is None: ssl_credentials = grpc.ssl_channel_credentials() # Combine the ssl credentials and the authorization credentials. composite_credentials = grpc.composite_channel_credentials( ssl_credentials, google_auth_credentials) return grpc.secure_channel(target, composite_credentials, **kwargs)
[ "def", "secure_authorized_channel", "(", "credentials", ",", "request", ",", "target", ",", "ssl_credentials", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Create the metadata plugin for inserting the authorization header.", "metadata_plugin", "=", "AuthMetadataPlugin...
Creates a secure authorized gRPC channel. This creates a channel with SSL and :class:`AuthMetadataPlugin`. This channel can be used to create a stub that can make authorized requests. Example:: import google.auth import google.auth.transport.grpc import google.auth.transport.requests from google.cloud.speech.v1 import cloud_speech_pb2 # Get credentials. credentials, _ = google.auth.default() # Get an HTTP request function to refresh credentials. request = google.auth.transport.requests.Request() # Create a channel. channel = google.auth.transport.grpc.secure_authorized_channel( credentials, 'speech.googleapis.com:443', request) # Use the channel to create a stub. cloud_speech.create_Speech_stub(channel) Args: credentials (google.auth.credentials.Credentials): The credentials to add to requests. request (google.auth.transport.Request): A HTTP transport request object used to refresh credentials as needed. Even though gRPC is a separate transport, there's no way to refresh the credentials without using a standard http transport. target (str): The host and port of the service. ssl_credentials (grpc.ChannelCredentials): Optional SSL channel credentials. This can be used to specify different certificates. kwargs: Additional arguments to pass to :func:`grpc.secure_channel`. Returns: grpc.Channel: The created gRPC channel.
[ "Creates", "a", "secure", "authorized", "gRPC", "channel", "." ]
python
train
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L462-L504
def interpolate_linear(self, xi, yi, zdata): """ Piecewise linear interpolation/extrapolation to arbitrary point(s). The method is fast, but has only C^0 continuity. Parameters ---------- xi : float / array of floats, shape (l,) x coordinates on the Cartesian plane yi : float / array of floats, shape (l,) y coordinates on the Cartesian plane zdata : array of floats, shape (n,) value at each point in the triangulation must be the same size of the mesh Returns ------- zi : float / array of floats, shape (l,) interpolated value(s) of (xi,yi) err : int / array of ints, shape (l,) whether interpolation (0), extrapolation (1) or error (other) """ if zdata.size != self.npoints: raise ValueError('zdata should be same size as mesh') xi = np.array(xi) yi = np.array(yi) size = xi.size zi = np.empty(size) zierr = np.empty(size, dtype=np.int) zdata = self._shuffle_field(zdata) # iterate for i in range(0, size): ist = np.abs(self._x - xi[i]).argmin() + 1 zi[i], zierr[i] = _srfpack.intrc0(xi[i], yi[i], self._x, self._y, zdata,\ self.lst, self.lptr, self.lend, ist) return zi, zierr
[ "def", "interpolate_linear", "(", "self", ",", "xi", ",", "yi", ",", "zdata", ")", ":", "if", "zdata", ".", "size", "!=", "self", ".", "npoints", ":", "raise", "ValueError", "(", "'zdata should be same size as mesh'", ")", "xi", "=", "np", ".", "array", ...
Piecewise linear interpolation/extrapolation to arbitrary point(s). The method is fast, but has only C^0 continuity. Parameters ---------- xi : float / array of floats, shape (l,) x coordinates on the Cartesian plane yi : float / array of floats, shape (l,) y coordinates on the Cartesian plane zdata : array of floats, shape (n,) value at each point in the triangulation must be the same size of the mesh Returns ------- zi : float / array of floats, shape (l,) interpolated value(s) of (xi,yi) err : int / array of ints, shape (l,) whether interpolation (0), extrapolation (1) or error (other)
[ "Piecewise", "linear", "interpolation", "/", "extrapolation", "to", "arbitrary", "point", "(", "s", ")", ".", "The", "method", "is", "fast", "but", "has", "only", "C^0", "continuity", "." ]
python
train
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L2816-L2830
def is_valid_tile(self, x, y): """Check to see if the requested tile is part of the tile map.""" x = int(x) y = int(y) if x < 0: return False if y < 0: return False if x > self.size_in_tiles.X: return False if y > self.size_in_tiles.Y: return False return True
[ "def", "is_valid_tile", "(", "self", ",", "x", ",", "y", ")", ":", "x", "=", "int", "(", "x", ")", "y", "=", "int", "(", "y", ")", "if", "x", "<", "0", ":", "return", "False", "if", "y", "<", "0", ":", "return", "False", "if", "x", ">", "...
Check to see if the requested tile is part of the tile map.
[ "Check", "to", "see", "if", "the", "requested", "tile", "is", "part", "of", "the", "tile", "map", "." ]
python
train
kislyuk/ensure
ensure/main.py
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L626-L632
def raises_regex(self, expected_exception, expected_regexp): """ Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised, and the string representation of *expected_exception* must match regular expression *expected_regexp*. """ return unittest_case.assertRaisesRegexp(expected_exception, expected_regexp, self._orig_subject, *self._args, **self._kwargs)
[ "def", "raises_regex", "(", "self", ",", "expected_exception", ",", "expected_regexp", ")", ":", "return", "unittest_case", ".", "assertRaisesRegexp", "(", "expected_exception", ",", "expected_regexp", ",", "self", ".", "_orig_subject", ",", "*", "self", ".", "_ar...
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised, and the string representation of *expected_exception* must match regular expression *expected_regexp*.
[ "Ensures", "preceding", "predicates", "(", "specifically", ":", "meth", ":", "called_with", "()", ")", "result", "in", "*", "expected_exception", "*", "being", "raised", "and", "the", "string", "representation", "of", "*", "expected_exception", "*", "must", "mat...
python
train
dshean/pygeotools
pygeotools/lib/malib.py
https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/malib.py#L1688-L1702
def iv(b, **kwargs): """Quick access to imview for interactive sessions """ import matplotlib.pyplot as plt import imview.imviewer as imview b = checkma(b) #if hasattr(kwargs,'imshow_kwargs'): # kwargs['imshow_kwargs']['interpolation'] = 'bicubic' #else: # kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'} #bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs) fig = plt.figure() imview.bma_fig(fig, b, **kwargs) plt.show() return fig
[ "def", "iv", "(", "b", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "imview", ".", "imviewer", "as", "imview", "b", "=", "checkma", "(", "b", ")", "#if hasattr(kwargs,'imshow_kwargs'):", "# kwargs['imsho...
Quick access to imview for interactive sessions
[ "Quick", "access", "to", "imview", "for", "interactive", "sessions" ]
python
train
apache/airflow
airflow/contrib/hooks/gcp_speech_to_text_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_speech_to_text_hook.py#L53-L73
def recognize_speech(self, config, audio, retry=None, timeout=None): """ Recognizes audio input :param config: information to the recognizer that specifies how to process the request. https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig :type config: dict or google.cloud.speech_v1.types.RecognitionConfig :param audio: audio data to be recognized https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio :type audio: dict or google.cloud.speech_v1.types.RecognitionAudio :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float """ client = self.get_conn() response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout) self.log.info("Recognised speech: %s" % response) return response
[ "def", "recognize_speech", "(", "self", ",", "config", ",", "audio", ",", "retry", "=", "None", ",", "timeout", "=", "None", ")", ":", "client", "=", "self", ".", "get_conn", "(", ")", "response", "=", "client", ".", "recognize", "(", "config", "=", ...
Recognizes audio input :param config: information to the recognizer that specifies how to process the request. https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionConfig :type config: dict or google.cloud.speech_v1.types.RecognitionConfig :param audio: audio data to be recognized https://googleapis.github.io/google-cloud-python/latest/speech/gapic/v1/types.html#google.cloud.speech_v1.types.RecognitionAudio :type audio: dict or google.cloud.speech_v1.types.RecognitionAudio :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float
[ "Recognizes", "audio", "input" ]
python
test
bjodah/pycompilation
pycompilation/dist.py
https://github.com/bjodah/pycompilation/blob/43eac8d82f8258d30d4df77fd2ad3f3e4f4dca18/pycompilation/dist.py#L74-L112
def _copy_or_render_source(ext, f, output_dir, render_callback, skip_copy=False): """ Tries to do regex match for each (pattern, target, subsd) tuple in ext.template_regexps for file f. """ # Either render a template or copy the source dirname = os.path.dirname(f) filename = os.path.basename(f) for pattern, target, subsd in ext.template_regexps: if re.match(pattern, filename): tgt = os.path.join(dirname, re.sub( pattern, target, filename)) rw = MetaReaderWriter('.metadata_subsd') try: prev_subsd = rw.get_from_metadata_file(output_dir, f) except (FileNotFoundError, KeyError): prev_subsd = None render_callback( get_abspath(f), os.path.join(output_dir, tgt), subsd, only_update=ext.only_update, prev_subsd=prev_subsd, create_dest_dirs=True, logger=ext.logger) rw.save_to_metadata_file(output_dir, f, subsd) return tgt else: if not skip_copy: copy(f, os.path.join(output_dir, os.path.dirname(f)), only_update=ext.only_update, dest_is_dir=True, create_dest_dirs=True, logger=ext.logger) return f
[ "def", "_copy_or_render_source", "(", "ext", ",", "f", ",", "output_dir", ",", "render_callback", ",", "skip_copy", "=", "False", ")", ":", "# Either render a template or copy the source", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "f", ")", "filena...
Tries to do regex match for each (pattern, target, subsd) tuple in ext.template_regexps for file f.
[ "Tries", "to", "do", "regex", "match", "for", "each", "(", "pattern", "target", "subsd", ")", "tuple", "in", "ext", ".", "template_regexps", "for", "file", "f", "." ]
python
train
galaxyproject/gravity
gravity/config_manager.py
https://github.com/galaxyproject/gravity/blob/2f792497fc60874f881c9ef74a5905a286a9ce3e/gravity/config_manager.py#L283-L303
def register_config_changes(self, configs, meta_changes): """ Persist config changes to the JSON state file. When a config changes, a process manager may perform certain actions based on these changes. This method can be called once the actions are complete. """ for config_file in meta_changes['remove_configs'].keys(): self._purge_config_file(config_file) for config_file, config in configs.items(): if 'update_attribs' in config: config['attribs'] = config.pop('update_attribs') if 'update_instance_name' in config: config['instance_name'] = config.pop('update_instance_name') if 'update_services' in config or 'remove_services' in config: remove = config.pop('remove_services', []) services = config.pop('update_services', []) # need to prevent old service defs from overwriting new ones for service in config['services']: if service not in remove and service not in services: services.append(service) config['services'] = services self._register_config_file(config_file, config)
[ "def", "register_config_changes", "(", "self", ",", "configs", ",", "meta_changes", ")", ":", "for", "config_file", "in", "meta_changes", "[", "'remove_configs'", "]", ".", "keys", "(", ")", ":", "self", ".", "_purge_config_file", "(", "config_file", ")", "for...
Persist config changes to the JSON state file. When a config changes, a process manager may perform certain actions based on these changes. This method can be called once the actions are complete.
[ "Persist", "config", "changes", "to", "the", "JSON", "state", "file", ".", "When", "a", "config", "changes", "a", "process", "manager", "may", "perform", "certain", "actions", "based", "on", "these", "changes", ".", "This", "method", "can", "be", "called", ...
python
train
aewallin/allantools
allantools/plot.py
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/plot.py#L66-L92
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
[ "def", "plot", "(", "self", ",", "atDataset", ",", "errorbars", "=", "False", ",", "grid", "=", "False", ")", ":", "if", "errorbars", ":", "self", ".", "ax", ".", "errorbar", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", ...
use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False
[ "use", "matplotlib", "methods", "for", "plotting" ]
python
train
tgsmith61591/pmdarima
pmdarima/arima/arima.py
https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/arima/arima.py#L380-L458
def fit(self, y, exogenous=None, **fit_args): """Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the statsmodels ARIMA fit. """ y = c1d(check_array(y, ensure_2d=False, force_all_finite=False, copy=True, dtype=DTYPE)) # type: np.ndarray n_samples = y.shape[0] # if exog was included, check the array... if exogenous is not None: exogenous = check_array(exogenous, ensure_2d=True, force_all_finite=False, copy=False, dtype=DTYPE) # determine the CV args, if any cv = self.out_of_sample_size scoring = get_callable(self.scoring, VALID_SCORING) # don't allow negative, don't allow > n_samples cv = max(cv, 0) # if cv is too big, raise if cv >= n_samples: raise ValueError("out-of-sample size must be less than number " "of samples!") # If we want to get a score on the out-of-sample, we need to trim # down the size of our y vec for fitting. Addressed due to Issue #28 cv_samples = None cv_exog = None if cv: cv_samples = y[-cv:] y = y[:-cv] # This also means we have to address the exogenous matrix if exogenous is not None: cv_exog = exogenous[-cv:, :] exogenous = exogenous[:-cv, :] # Internal call self._fit(y, exogenous, **fit_args) # now make a forecast if we're validating to compute the # out-of-sample score if cv_samples is not None: # get the predictions (use self.predict, which calls forecast # from statsmodels internally) pred = self.predict(n_periods=cv, exogenous=cv_exog) self.oob_ = scoring(cv_samples, pred, **self.scoring_args) self.oob_preds_ = pred # If we compute out of sample scores, we have to now update the # observed time points so future forecasts originate from the end # of our y vec self.update(cv_samples, cv_exog, **fit_args) else: self.oob_ = np.nan self.oob_preds_ = None return self
[ "def", "fit", "(", "self", ",", "y", ",", "exogenous", "=", "None", ",", "*", "*", "fit_args", ")", ":", "y", "=", "c1d", "(", "check_array", "(", "y", ",", "ensure_2d", "=", "False", ",", "force_all_finite", "=", "False", ",", "copy", "=", "True",...
Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the statsmodels ARIMA fit.
[ "Fit", "an", "ARIMA", "to", "a", "vector", "y", "of", "observations", "with", "an", "optional", "matrix", "of", "exogenous", "variables", "." ]
python
train
SmartDeveloperHub/agora-service-provider
agora/provider/jobs/collect.py
https://github.com/SmartDeveloperHub/agora-service-provider/blob/3962207e5701c659c74c8cfffcbc4b0a63eac4b4/agora/provider/jobs/collect.py#L68-L85
def __extract_pattern_nodes(graph): """ Extract and bind the triple patterns contained in the search plan, so as to be able to identify to which pattern is associated each triple of the fragment. :return: """ tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern) for tpn in tp_nodes: subject = list(graph.objects(tpn, AGORA.subject)).pop() predicate = list(graph.objects(tpn, AGORA.predicate)).pop() obj = list(graph.objects(tpn, AGORA.object)).pop() subject_str = list(graph.objects(subject, RDFS.label)).pop().toPython() predicate_str = graph.qname(predicate) if (obj, RDF.type, AGORA.Variable) in graph: object_str = list(graph.objects(obj, RDFS.label)).pop().toPython() else: object_str = list(graph.objects(obj, AGORA.value)).pop().toPython() __plan_patterns[tpn] = '{} {} {}'.format(subject_str, predicate_str, object_str)
[ "def", "__extract_pattern_nodes", "(", "graph", ")", ":", "tp_nodes", "=", "graph", ".", "subjects", "(", "RDF", ".", "type", ",", "AGORA", ".", "TriplePattern", ")", "for", "tpn", "in", "tp_nodes", ":", "subject", "=", "list", "(", "graph", ".", "object...
Extract and bind the triple patterns contained in the search plan, so as to be able to identify to which pattern is associated each triple of the fragment. :return:
[ "Extract", "and", "bind", "the", "triple", "patterns", "contained", "in", "the", "search", "plan", "so", "as", "to", "be", "able", "to", "identify", "to", "which", "pattern", "is", "associated", "each", "triple", "of", "the", "fragment", ".", ":", "return"...
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_kvlayer.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L411-L422
def streamitem_to_key_data(si): ''' extract the parts of a StreamItem that go into a kvlayer key, convert StreamItem to blob for storage. return (kvlayer key tuple), data blob ''' key = key_for_stream_item(si) data = streamcorpus.serialize(si) errors, data = streamcorpus.compress_and_encrypt(data) assert not errors, errors return key, data
[ "def", "streamitem_to_key_data", "(", "si", ")", ":", "key", "=", "key_for_stream_item", "(", "si", ")", "data", "=", "streamcorpus", ".", "serialize", "(", "si", ")", "errors", ",", "data", "=", "streamcorpus", ".", "compress_and_encrypt", "(", "data", ")",...
extract the parts of a StreamItem that go into a kvlayer key, convert StreamItem to blob for storage. return (kvlayer key tuple), data blob
[ "extract", "the", "parts", "of", "a", "StreamItem", "that", "go", "into", "a", "kvlayer", "key", "convert", "StreamItem", "to", "blob", "for", "storage", "." ]
python
test