repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
tensorflow/tensor2tensor
tensor2tensor/models/distillation.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/distillation.py#L175-L196
def distill_resnet_32_to_15_cifar20x5(): """Set of hyperparameters.""" hparams = distill_base() hparams.teacher_model = "resnet" hparams.teacher_hparams = "resnet_cifar_32" hparams.student_model = "resnet" hparams.student_hparams = "resnet_cifar_15" hparams.optimizer_momentum_nesterov = True # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) hparams.teacher_learning_rate = 0.25 * 128. * 8. / 256. hparams.student_learning_rate = 0.2 * 128. * 8. / 256. hparams.learning_rate_decay_scheme = "piecewise" hparams.add_hparam("learning_rate_boundaries", [40000, 60000, 80000]) hparams.add_hparam("learning_rate_multiples", [0.1, 0.01, 0.001]) hparams.task_balance = 0.28 hparams.distill_temperature = 2.0 hparams.num_classes = 20 return hparams
[ "def", "distill_resnet_32_to_15_cifar20x5", "(", ")", ":", "hparams", "=", "distill_base", "(", ")", "hparams", ".", "teacher_model", "=", "\"resnet\"", "hparams", ".", "teacher_hparams", "=", "\"resnet_cifar_32\"", "hparams", ".", "student_model", "=", "\"resnet\"", ...
Set of hyperparameters.
[ "Set", "of", "hyperparameters", "." ]
python
train
35.363636
abelcarreras/DynaPhoPy
dynaphopy/interface/iofile/__init__.py
https://github.com/abelcarreras/DynaPhoPy/blob/51e99422228e6be84830d659b88a0ca904d9136f/dynaphopy/interface/iofile/__init__.py#L10-L20
def diff_matrix(array_1, array_2, cell_size): """ :param array_1: supercell scaled positions respect unit cell :param array_2: supercell scaled positions respect unit cell :param cell_size: diference between arrays accounting for periodicity :return: """ array_1_norm = np.array(array_1) / np.array(cell_size, dtype=float)[None,:] array_2_norm = np.array(array_2) / np.array(cell_size, dtype=float)[None,:] return array_2_norm - array_1_norm
[ "def", "diff_matrix", "(", "array_1", ",", "array_2", ",", "cell_size", ")", ":", "array_1_norm", "=", "np", ".", "array", "(", "array_1", ")", "/", "np", ".", "array", "(", "cell_size", ",", "dtype", "=", "float", ")", "[", "None", ",", ":", "]", ...
:param array_1: supercell scaled positions respect unit cell :param array_2: supercell scaled positions respect unit cell :param cell_size: diference between arrays accounting for periodicity :return:
[ ":", "param", "array_1", ":", "supercell", "scaled", "positions", "respect", "unit", "cell", ":", "param", "array_2", ":", "supercell", "scaled", "positions", "respect", "unit", "cell", ":", "param", "cell_size", ":", "diference", "between", "arrays", "accountin...
python
train
42.545455
Microsoft/nni
src/sdk/pynni/nni/networkmorphism_tuner/layers.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/layers.py#L613-L661
def layer_description_extractor(layer, node_to_id): '''get layer description. ''' layer_input = layer.input layer_output = layer.output if layer_input is not None: if isinstance(layer_input, Iterable): layer_input = list(map(lambda x: node_to_id[x], layer_input)) else: layer_input = node_to_id[layer_input] if layer_output is not None: layer_output = node_to_id[layer_output] if isinstance(layer, StubConv): return ( type(layer).__name__, layer_input, layer_output, layer.input_channel, layer.filters, layer.kernel_size, layer.stride, layer.padding, ) elif isinstance(layer, (StubDense,)): return [ type(layer).__name__, layer_input, layer_output, layer.input_units, layer.units, ] elif isinstance(layer, (StubBatchNormalization,)): return (type(layer).__name__, layer_input, layer_output, layer.num_features) elif isinstance(layer, (StubDropout,)): return (type(layer).__name__, layer_input, layer_output, layer.rate) elif isinstance(layer, StubPooling): return ( type(layer).__name__, layer_input, layer_output, layer.kernel_size, layer.stride, layer.padding, ) else: return (type(layer).__name__, layer_input, layer_output)
[ "def", "layer_description_extractor", "(", "layer", ",", "node_to_id", ")", ":", "layer_input", "=", "layer", ".", "input", "layer_output", "=", "layer", ".", "output", "if", "layer_input", "is", "not", "None", ":", "if", "isinstance", "(", "layer_input", ",",...
get layer description.
[ "get", "layer", "description", "." ]
python
train
30.163265
open511/open511
open511/utils/schedule.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/utils/schedule.py#L53-L56
def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max): """Returns a list of tuples of start/end datetimes for when the schedule is active during the provided range.""" raise NotImplementedError
[ "def", "intervals", "(", "self", ",", "range_start", "=", "datetime", ".", "datetime", ".", "min", ",", "range_end", "=", "datetime", ".", "datetime", ".", "max", ")", ":", "raise", "NotImplementedError" ]
Returns a list of tuples of start/end datetimes for when the schedule is active during the provided range.
[ "Returns", "a", "list", "of", "tuples", "of", "start", "/", "end", "datetimes", "for", "when", "the", "schedule", "is", "active", "during", "the", "provided", "range", "." ]
python
valid
62
Parsl/parsl
parsl/providers/aws/aws.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/providers/aws/aws.py#L540-L564
def status(self, job_ids): """Get the status of a list of jobs identified by their ids. Parameters ---------- job_ids : list of str Identifiers for the jobs. Returns ------- list of int The status codes of the requsted jobs. """ all_states = [] status = self.client.describe_instances(InstanceIds=job_ids) for r in status['Reservations']: for i in r['Instances']: instance_id = i['InstanceId'] instance_state = translate_table.get(i['State']['Name'], 'UNKNOWN') self.resources[instance_id]['status'] = instance_state all_states.extend([instance_state]) return all_states
[ "def", "status", "(", "self", ",", "job_ids", ")", ":", "all_states", "=", "[", "]", "status", "=", "self", ".", "client", ".", "describe_instances", "(", "InstanceIds", "=", "job_ids", ")", "for", "r", "in", "status", "[", "'Reservations'", "]", ":", ...
Get the status of a list of jobs identified by their ids. Parameters ---------- job_ids : list of str Identifiers for the jobs. Returns ------- list of int The status codes of the requsted jobs.
[ "Get", "the", "status", "of", "a", "list", "of", "jobs", "identified", "by", "their", "ids", "." ]
python
valid
29.88
reingart/pyafipws
iibb.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/iibb.py#L89-L144
def ConsultarContribuyentes(self, fecha_desde, fecha_hasta, cuit_contribuyente): "Realiza la consulta remota a ARBA, estableciendo los resultados" self.limpiar() try: self.xml = SimpleXMLElement(XML_ENTRADA_BASE) self.xml.fechaDesde = fecha_desde self.xml.fechaHasta = fecha_hasta self.xml.contribuyentes.contribuyente.cuitContribuyente = cuit_contribuyente xml = self.xml.as_xml() self.CodigoHash = md5.md5(xml).hexdigest() nombre = "DFEServicioConsulta_%s.xml" % self.CodigoHash # guardo el xml en el archivo a enviar y luego lo re-abro: archivo = open(os.path.join(tempfile.gettempdir(), nombre), "w") archivo.write(xml) archivo.close() archivo = open(os.path.join(tempfile.gettempdir(), nombre), "r") if not self.testing: response = self.client(user=self.Usuario, password=self.Password, file=archivo) else: response = open(self.testing).read() self.XmlResponse = response self.xml = SimpleXMLElement(response) if 'tipoError' in self.xml: self.TipoError = str(self.xml.tipoError) self.CodigoError = str(self.xml.codigoError) self.MensajeError = str(self.xml.mensajeError).decode('latin1').encode("ascii", "replace") if 'numeroComprobante' in self.xml: self.NumeroComprobante = str(self.xml.numeroComprobante) self.CantidadContribuyentes = int(self.xml.cantidadContribuyentes) if 'contribuyentes' in self.xml: for contrib in self.xml.contribuyente: c = { 'CuitContribuytente': str(contrib.cuitContribuyente), 'AlicuotaPercepcion': str(contrib.alicuotaPercepcion), 'AlicuotaRetencion': str(contrib.alicuotaRetencion), 'GrupoPercepcion': str(contrib.grupoPercepcion), 'GrupoRetencion': str(contrib.grupoRetencion), 'Errores': [], } self.contribuyentes.append(c) # establecer valores del primer contrib (sin eliminarlo) self.LeerContribuyente(pop=False) return True except Exception, e: ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback) self.Traceback = ''.join(ex) try: self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] except: self.Excepcion = u"<no disponible>" return False
[ "def", "ConsultarContribuyentes", "(", "self", ",", "fecha_desde", ",", "fecha_hasta", ",", "cuit_contribuyente", ")", ":", "self", ".", "limpiar", "(", ")", "try", ":", "self", ".", "xml", "=", "SimpleXMLElement", "(", "XML_ENTRADA_BASE", ")", "self", ".", ...
Realiza la consulta remota a ARBA, estableciendo los resultados
[ "Realiza", "la", "consulta", "remota", "a", "ARBA", "estableciendo", "los", "resultados" ]
python
train
50.642857
bcho/bearychat-py
bearychat/incoming.py
https://github.com/bcho/bearychat-py/blob/d492595d6334dfba511f82770995160ee12b5de1/bearychat/incoming.py#L43-L50
def reset(self): '''Reset stream.''' self._text = None self._markdown = False self._channel = Incoming.DEFAULT_CHANNEL self._attachments = [] return self
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_text", "=", "None", "self", ".", "_markdown", "=", "False", "self", ".", "_channel", "=", "Incoming", ".", "DEFAULT_CHANNEL", "self", ".", "_attachments", "=", "[", "]", "return", "self" ]
Reset stream.
[ "Reset", "stream", "." ]
python
train
24.375
ska-sa/spead2
spead2/__init__.py
https://github.com/ska-sa/spead2/blob/cac95fd01d8debaa302d2691bd26da64b7828bc6/spead2/__init__.py#L367-L386
def _read_bits(cls, raw_value): """Generator that takes a memory view and provides bitfields from it. After creating the generator, call `send(None)` to initialise it, and thereafter call `send(need_bits)` to obtain that many bits. """ have_bits = 0 bits = 0 byte_source = iter(raw_value) result = 0 while True: need_bits = yield result while have_bits < need_bits: try: bits = (bits << 8) | int(next(byte_source)) have_bits += 8 except StopIteration: return result = int(bits >> (have_bits - need_bits)) bits &= (1 << (have_bits - need_bits)) - 1 have_bits -= need_bits
[ "def", "_read_bits", "(", "cls", ",", "raw_value", ")", ":", "have_bits", "=", "0", "bits", "=", "0", "byte_source", "=", "iter", "(", "raw_value", ")", "result", "=", "0", "while", "True", ":", "need_bits", "=", "yield", "result", "while", "have_bits", ...
Generator that takes a memory view and provides bitfields from it. After creating the generator, call `send(None)` to initialise it, and thereafter call `send(need_bits)` to obtain that many bits.
[ "Generator", "that", "takes", "a", "memory", "view", "and", "provides", "bitfields", "from", "it", ".", "After", "creating", "the", "generator", "call", "send", "(", "None", ")", "to", "initialise", "it", "and", "thereafter", "call", "send", "(", "need_bits"...
python
train
38.75
networks-lab/metaknowledge
metaknowledge/medline/recordMedline.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/medline/recordMedline.py#L66-L88
def writeRecord(self, f): """This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic. """ if self.bad: raise BadPubmedRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'".format(self._sourceLine, self._sourceFile)) else: authTags = {} for tag in authorBasedTags: for val in self._fieldDict.get(tag, []): split = val.split(' : ') try: authTags[split[0]].append("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))) except KeyError: authTags[split[0]] = ["{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)),' : '.join(split[1:]).replace('\n', '\n '))] for tag, value in self._fieldDict.items(): if tag in authorBasedTags: continue else: for v in value: f.write("{0}{1}- {2}\n".format(tag, ' ' * (4 - len(tag)), v.replace('\n', '\n '))) if tag == 'AU': for authVal in authTags.get(v,[]): f.write(authVal)
[ "def", "writeRecord", "(", "self", ",", "f", ")", ":", "if", "self", ".", "bad", ":", "raise", "BadPubmedRecord", "(", "\"This record cannot be converted to a file as the input was malformed.\\nThe original line number (if any) is: {} and the original file is: '{}'\"", ".", "form...
This is nearly identical to the original the FAU tag is the only tag not writen in the same place, doing so would require changing the parser and lots of extra logic.
[ "This", "is", "nearly", "identical", "to", "the", "original", "the", "FAU", "tag", "is", "the", "only", "tag", "not", "writen", "in", "the", "same", "place", "doing", "so", "would", "require", "changing", "the", "parser", "and", "lots", "of", "extra", "l...
python
train
61.913043
rosenbrockc/fortpy
fortpy/interop/ftypes.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/interop/ftypes.py#L683-L690
def _py_outvar(parameter, lparams, tab): """Returns the code to produce a ctypes output variable for interacting with fortran. """ if ("out" in parameter.direction and parameter.D > 0 and ":" in parameter.dimension and ("allocatable" in parameter.modifiers or "pointer" in parameter.modifiers)): lparams.append("byref({}_o)".format(parameter.lname)) blank = True if parameter.direction == "(inout)" else False return ("{0}_o = POINTER({1})()".format(parameter.lname, _py_ctype(parameter)), blank)
[ "def", "_py_outvar", "(", "parameter", ",", "lparams", ",", "tab", ")", ":", "if", "(", "\"out\"", "in", "parameter", ".", "direction", "and", "parameter", ".", "D", ">", "0", "and", "\":\"", "in", "parameter", ".", "dimension", "and", "(", "\"allocatabl...
Returns the code to produce a ctypes output variable for interacting with fortran.
[ "Returns", "the", "code", "to", "produce", "a", "ctypes", "output", "variable", "for", "interacting", "with", "fortran", "." ]
python
train
66.625
tmux-python/libtmux
libtmux/common.py
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/common.py#L106-L142
def show_environment(self, name=None): """Show environment ``$ tmux show-environment -t [session] <name>``. Return dict of environment variables for the session or the value of a specific variable if the name is specified. Parameters ---------- name : str the environment variable name. such as 'PATH'. Returns ------- str or dict environmental variables in dict, if no name, or str if name entered. """ tmux_args = ['show-environment'] if self._add_option: tmux_args += [self._add_option] if name: tmux_args += [name] vars = self.cmd(*tmux_args).stdout vars = [tuple(item.split('=', 1)) for item in vars] vars_dict = {} for t in vars: if len(t) == 2: vars_dict[t[0]] = t[1] elif len(t) == 1: vars_dict[t[0]] = True else: raise ValueError('unexpected variable %s', t) if name: return vars_dict.get(name) return vars_dict
[ "def", "show_environment", "(", "self", ",", "name", "=", "None", ")", ":", "tmux_args", "=", "[", "'show-environment'", "]", "if", "self", ".", "_add_option", ":", "tmux_args", "+=", "[", "self", ".", "_add_option", "]", "if", "name", ":", "tmux_args", ...
Show environment ``$ tmux show-environment -t [session] <name>``. Return dict of environment variables for the session or the value of a specific variable if the name is specified. Parameters ---------- name : str the environment variable name. such as 'PATH'. Returns ------- str or dict environmental variables in dict, if no name, or str if name entered.
[ "Show", "environment", "$", "tmux", "show", "-", "environment", "-", "t", "[", "session", "]", "<name", ">", "." ]
python
train
29.648649
geopython/geolinks
geolinks/__init__.py
https://github.com/geopython/geolinks/blob/134608c81e6b31323a17d05fb5e62e3119de81da/geolinks/__init__.py#L63-L110
def sniff_link(url): """performs basic heuristics to detect what the URL is""" protocol = None link = url.strip() # heuristics begin if inurl(['service=CSW', 'request=GetRecords'], link): protocol = 'OGC:CSW' elif inurl(['service=SOS', 'request=GetObservation'], link): protocol = 'OGC:SOS' elif inurl(['service=WCS', 'request=GetCoverage'], link): protocol = 'OGC:WCS' elif inurl(['service=WFS', 'request=GetFeature'], link): protocol = 'OGC:WFS' elif inurl(['service=WMS', 'request=GetMap'], link): protocol = 'OGC:WMS' elif inurl(['service=WPS', 'request=Execute'], link): protocol = 'OGC:WPS' elif inurl(['arcims'], link): protocol = 'ESRI:ArcIMS' elif inurl(['arcgis'], link): protocol = 'ESRI:ArcGIS' elif inurl(['mpk'], link, 'end'): protocol = 'ESRI:MPK' elif inurl(['opendap'], link): protocol = 'OPeNDAP:OPeNDAP' elif inurl(['ncss'], link): protocol = 'UNIDATA:NCSS' elif inurl(['cdmremote'], link): protocol = 'UNIDATA:CDM' elif inurl(['gml'], link, 'end'): protocol = 'OGC:GML' elif inurl(['htm', 'html', 'shtml'], link, 'end'): protocol = 'WWW:LINK' # extra tests elif all([inurl(['census.gov/geo/tiger'], link), inurl(['zip'], link, 'end')]): protocol = 'ESRI:SHAPEFILE' elif inurl(['7z', 'bz2', 'gz', 'rar', 'tar.gz', 'tgz', 'zip'], link, 'end'): protocol = 'WWW:DOWNLOAD' elif inurl(['kml', 'kmz'], link, 'end'): protocol = 'OGC:KML' else: LOGGER.info('No link type detected') return protocol
[ "def", "sniff_link", "(", "url", ")", ":", "protocol", "=", "None", "link", "=", "url", ".", "strip", "(", ")", "# heuristics begin", "if", "inurl", "(", "[", "'service=CSW'", ",", "'request=GetRecords'", "]", ",", "link", ")", ":", "protocol", "=", "'OG...
performs basic heuristics to detect what the URL is
[ "performs", "basic", "heuristics", "to", "detect", "what", "the", "URL", "is" ]
python
train
34.020833
uw-it-cte/uw-restclients-wheniwork
uw_wheniwork/shifts.py
https://github.com/uw-it-cte/uw-restclients-wheniwork/blob/0d3ca09d5bbe808fec12e5f943596570d33a1731/uw_wheniwork/shifts.py#L66-L77
def delete_shifts(self, shifts): """ Delete existing shifts. http://dev.wheniwork.com/#delete-shift """ url = "/2/shifts/?%s" % urlencode( {'ids': ",".join(str(s) for s in shifts)}) data = self._delete_resource(url) return data
[ "def", "delete_shifts", "(", "self", ",", "shifts", ")", ":", "url", "=", "\"/2/shifts/?%s\"", "%", "urlencode", "(", "{", "'ids'", ":", "\",\"", ".", "join", "(", "str", "(", "s", ")", "for", "s", "in", "shifts", ")", "}", ")", "data", "=", "self"...
Delete existing shifts. http://dev.wheniwork.com/#delete-shift
[ "Delete", "existing", "shifts", "." ]
python
valid
23.916667
bitshares/uptick
uptick/markets.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L353-L364
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): """ Bid for collateral in the settlement fund """ print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
[ "def", "bidcollateral", "(", "ctx", ",", "collateral_symbol", ",", "collateral_amount", ",", "debt_symbol", ",", "debt_amount", ",", "account", ")", ":", "print_tx", "(", "ctx", ".", "bitshares", ".", "bid_collateral", "(", "Amount", "(", "collateral_amount", ",...
Bid for collateral in the settlement fund
[ "Bid", "for", "collateral", "in", "the", "settlement", "fund" ]
python
train
29.166667
hvac/hvac
hvac/api/secrets_engines/transit.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/transit.py#L734-L757
def backup_key(self, name, mount_point=DEFAULT_MOUNT_POINT): """Return a plaintext backup of a named key. The backup contains all the configuration data and keys of all the versions along with the HMAC key. The response from this endpoint can be used with the /restore endpoint to restore the key. Supported methods: GET: /{mount_point}/backup/{name}. Produces: 200 application/json :param name: Name of the key. :type name: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response """ api_path = '/v1/{mount_point}/backup/{name}'.format( mount_point=mount_point, name=name, ) response = self._adapter.get( url=api_path, ) return response.json()
[ "def", "backup_key", "(", "self", ",", "name", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "api_path", "=", "'/v1/{mount_point}/backup/{name}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "name", "=", "name", ",", ")", "response", ...
Return a plaintext backup of a named key. The backup contains all the configuration data and keys of all the versions along with the HMAC key. The response from this endpoint can be used with the /restore endpoint to restore the key. Supported methods: GET: /{mount_point}/backup/{name}. Produces: 200 application/json :param name: Name of the key. :type name: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
[ "Return", "a", "plaintext", "backup", "of", "a", "named", "key", "." ]
python
train
38.583333
gem/oq-engine
openquake/hmtk/seismicity/gcmt_catalogue.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/gcmt_catalogue.py#L193-L206
def _to_ned(self): """ Switches the reference frame to NED """ if self.ref_frame is 'USE': # Rotate return utils.use_to_ned(self.tensor), \ utils.use_to_ned(self.tensor_sigma) elif self.ref_frame is 'NED': # Alreadt NED return self.tensor, self.tensor_sigma else: raise ValueError('Reference frame %s not recognised - cannot ' 'transform to NED!' % self.ref_frame)
[ "def", "_to_ned", "(", "self", ")", ":", "if", "self", ".", "ref_frame", "is", "'USE'", ":", "# Rotate", "return", "utils", ".", "use_to_ned", "(", "self", ".", "tensor", ")", ",", "utils", ".", "use_to_ned", "(", "self", ".", "tensor_sigma", ")", "eli...
Switches the reference frame to NED
[ "Switches", "the", "reference", "frame", "to", "NED" ]
python
train
36
rikrd/inspire
inspirespeech/htk.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk.py#L283-L293
def main(): """Test code called from commandline""" model = load_model('../data/hmmdefs') hmm = model.hmms['r-We'] for state_name in hmm.state_names: print(state_name) state = model.states[state_name] print(state.means_) print(model) model2 = load_model('../data/prior.hmm1mixSI.rate32') print(model2)
[ "def", "main", "(", ")", ":", "model", "=", "load_model", "(", "'../data/hmmdefs'", ")", "hmm", "=", "model", ".", "hmms", "[", "'r-We'", "]", "for", "state_name", "in", "hmm", ".", "state_names", ":", "print", "(", "state_name", ")", "state", "=", "mo...
Test code called from commandline
[ "Test", "code", "called", "from", "commandline" ]
python
train
31.181818
anthok/overwatch-api
overwatch_api/core.py
https://github.com/anthok/overwatch-api/blob/aba976a3c07c4932de13f4236d924b2901b149b9/overwatch_api/core.py#L98-L114
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
[ "async", "def", "get_stats", "(", "self", ",", "battletag", ":", "str", ",", "regions", "=", "(", "EUROPE", ",", "KOREA", ",", "AMERICAS", ",", "CHINA", ",", "JAPAN", ",", "ANY", ")", ",", "platform", "=", "None", ",", "_session", "=", "None", ",", ...
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.
[ "Returns", "the", "stats", "for", "the", "profiles", "on", "the", "specified", "regions", "and", "platform", ".", "The", "format", "for", "regions", "without", "a", "matching", "user", "the", "format", "is", "the", "same", "as", "get_profile", ".", "The", ...
python
train
77.352941
nfcpy/nfcpy
src/nfc/tag/tt1.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt1.py#L436-L450
def write_block(self, block, data, erase=True): """Write an 8-byte data block at address (block * 8). The target bytes are zero'd first if *erase* is True. """ if block < 0 or block > 255: raise ValueError("invalid block number") log.debug("write block {0}".format(block)) cmd = "\x54" if erase is True else "\x1B" cmd = cmd + chr(block) + data + self.uid rsp = self.transceive(cmd) if len(rsp) < 9: raise Type1TagCommandError(RESPONSE_ERROR) if erase is True and rsp[1:9] != data: raise Type1TagCommandError(WRITE_ERROR)
[ "def", "write_block", "(", "self", ",", "block", ",", "data", ",", "erase", "=", "True", ")", ":", "if", "block", "<", "0", "or", "block", ">", "255", ":", "raise", "ValueError", "(", "\"invalid block number\"", ")", "log", ".", "debug", "(", "\"write ...
Write an 8-byte data block at address (block * 8). The target bytes are zero'd first if *erase* is True.
[ "Write", "an", "8", "-", "byte", "data", "block", "at", "address", "(", "block", "*", "8", ")", ".", "The", "target", "bytes", "are", "zero", "d", "first", "if", "*", "erase", "*", "is", "True", "." ]
python
train
41.6
dylanaraps/bum
bum/song.py
https://github.com/dylanaraps/bum/blob/004d795a67398e79f2c098d7775e9cd97231646b/bum/song.py#L12-L22
def init(port=6600, server="localhost"): """Initialize mpd.""" client = mpd.MPDClient() try: client.connect(server, port) return client except ConnectionRefusedError: print("error: Connection refused to mpd/mopidy.") os._exit(1)
[ "def", "init", "(", "port", "=", "6600", ",", "server", "=", "\"localhost\"", ")", ":", "client", "=", "mpd", ".", "MPDClient", "(", ")", "try", ":", "client", ".", "connect", "(", "server", ",", "port", ")", "return", "client", "except", "ConnectionRe...
Initialize mpd.
[ "Initialize", "mpd", "." ]
python
train
24.363636
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L2224-L2240
def scan_chain_len(self, scan_chain): """Retrieves and returns the number of bits in the scan chain. Args: self (JLink): the ``JLink`` instance scan_chain (int): scan chain to be measured Returns: Number of bits in the specified scan chain. Raises: JLinkException: on error. """ res = self._dll.JLINKARM_MeasureSCLen(scan_chain) if res < 0: raise errors.JLinkException(res) return res
[ "def", "scan_chain_len", "(", "self", ",", "scan_chain", ")", ":", "res", "=", "self", ".", "_dll", ".", "JLINKARM_MeasureSCLen", "(", "scan_chain", ")", "if", "res", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "res", ")", "return", "re...
Retrieves and returns the number of bits in the scan chain. Args: self (JLink): the ``JLink`` instance scan_chain (int): scan chain to be measured Returns: Number of bits in the specified scan chain. Raises: JLinkException: on error.
[ "Retrieves", "and", "returns", "the", "number", "of", "bits", "in", "the", "scan", "chain", "." ]
python
train
28.647059
polysquare/cmake-ast
cmakeast/ast.py
https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/ast.py#L671-L683
def consume_token(self, tokens, index, tokens_len): """Consume a token. Returns a tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together. """ del tokens_len if tokens[index].type == TokenType.EndInlineRST: return _paste_tokens_line_by_line(tokens, TokenType.RST, self.begin, index + 1)
[ "def", "consume_token", "(", "self", ",", "tokens", ",", "index", ",", "tokens_len", ")", ":", "del", "tokens_len", "if", "tokens", "[", "index", "]", ".", "type", "==", "TokenType", ".", "EndInlineRST", ":", "return", "_paste_tokens_line_by_line", "(", "tok...
Consume a token. Returns a tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together.
[ "Consume", "a", "token", "." ]
python
train
40.153846
PmagPy/PmagPy
pmagpy/pmagplotlib.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmagplotlib.py#L2872-L2964
def add_borders(Figs, titles, border_color='#000000', text_color='#800080', con_id=""): """ Formatting for generating plots on the server Default border color: black Default text color: purple """ def split_title(s): """ Add '\n's to split of overly long titles """ s_list = s.split(",") lines = [] tot = 0 line = [] for i in s_list: tot += len(i) if tot < 30: line.append(i + ",") else: lines.append(" ".join(line)) line = [i] tot = 0 lines.append(" ".join(line)) return "\n".join(lines).strip(',') # format contribution id if available if con_id: if not str(con_id).startswith("/"): con_id = "/" + str(con_id) import datetime now = datetime.datetime.utcnow() for key in list(Figs.keys()): fig = plt.figure(Figs[key]) plot_title = split_title(titles[key]).strip().strip('\n') fig.set_figheight(5.5) #get returns Bbox with x0, y0, x1, y1 pos = fig.gca().get_position() # tweak some of the default values w = pos.x1 - pos.x0 h = (pos.y1 - pos.y0) / 1.1 x = pos.x0 y = pos.y0 * 1.3 # set takes: left, bottom, width, height fig.gca().set_position([x, y, w, h]) # add an axis covering the entire figure border_ax = fig.add_axes([0, 0, 1, 1]) border_ax.set_frame_on(False) border_ax.set_xticks([]) border_ax.set_yticks([]) # add a border if "\n" in plot_title: y_val = 1.0 # lower border #fig.set_figheight(6.25) else: y_val = 1.04 # higher border #border_ax.text(-0.02, y_val, " # |", # horizontalalignment='left', # verticalalignment='top', # color=text_color, # bbox=dict(edgecolor=border_color, # facecolor='#FFFFFF', linewidth=0.25), # size=50) #border_ax.text(-0.02, 0, "| # |", # horizontalalignment='left', # verticalalignment='bottom', # color=text_color, # bbox=dict(edgecolor=border_color, # facecolor='#FFFFFF', linewidth=0.25), # size=20)#18) # add text border_ax.text((4. / fig.get_figwidth()) * 0.015, 0.03, now.strftime("%Y-%m-%d, %I:%M:%S {}".format('UT')), horizontalalignment='left', verticalalignment='top', color=text_color, size=10) border_ax.text(0.5, 0.98, plot_title, horizontalalignment='center', verticalalignment='top', color=text_color, size=20) border_ax.text(1 - (4. / fig.get_figwidth()) * 0.015, 0.03, 'earthref.org/MagIC{}'.format(con_id), horizontalalignment='right', verticalalignment='top', color=text_color, size=10) return Figs
[ "def", "add_borders", "(", "Figs", ",", "titles", ",", "border_color", "=", "'#000000'", ",", "text_color", "=", "'#800080'", ",", "con_id", "=", "\"\"", ")", ":", "def", "split_title", "(", "s", ")", ":", "\"\"\"\n Add '\\n's to split of overly long titles...
Formatting for generating plots on the server Default border color: black Default text color: purple
[ "Formatting", "for", "generating", "plots", "on", "the", "server", "Default", "border", "color", ":", "black", "Default", "text", "color", ":", "purple" ]
python
train
38.677419
mjg59/python-broadlink
broadlink/__init__.py
https://github.com/mjg59/python-broadlink/blob/1d6d8d2aee6e221aa3383e4078b19b7b95397f43/broadlink/__init__.py#L374-L382
def set_power(self, state): """Sets the power state of the smart plug.""" packet = bytearray(16) packet[0] = 2 if self.check_nightlight(): packet[4] = 3 if state else 2 else: packet[4] = 1 if state else 0 self.send_packet(0x6a, packet)
[ "def", "set_power", "(", "self", ",", "state", ")", ":", "packet", "=", "bytearray", "(", "16", ")", "packet", "[", "0", "]", "=", "2", "if", "self", ".", "check_nightlight", "(", ")", ":", "packet", "[", "4", "]", "=", "3", "if", "state", "else"...
Sets the power state of the smart plug.
[ "Sets", "the", "power", "state", "of", "the", "smart", "plug", "." ]
python
train
29.222222
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py#L68-L139
def order_shares(id_or_ins, amount, price=None, style=None): """ 落指定股数的买/卖单,最常见的落单方式之一。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #购买Buy 2000 股的平安银行股票,并以市价单发送: order_shares('000001.XSHE', 2000) #卖出2000股的平安银行股票,并以市价单发送: order_shares('000001.XSHE', -2000) #购买1000股的平安银行股票,并以限价单发送,价格为¥10: order_shares('000001.XSHG', 1000, style=LimitOrder(10)) """ if amount == 0: # 如果下单量为0,则认为其并没有发单,则直接返回None user_system_log.warn(_(u"Order Creation Failed: Order amount is 0.")) return None style = cal_style(price, style) if isinstance(style, LimitOrder): if style.get_limit_price() <= 0: raise RQInvalidArgument(_(u"Limit order price should be positive")) order_book_id = assure_stock_order_book_id(id_or_ins) env = Environment.get_instance() price = env.get_last_price(order_book_id) if not is_valid_price(price): user_system_log.warn( _(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=order_book_id)) return if amount > 0: side = SIDE.BUY position_effect = POSITION_EFFECT.OPEN else: amount = abs(amount) side = SIDE.SELL position_effect = POSITION_EFFECT.CLOSE if side == SIDE.BUY: # 卖出不再限制 round_lot, order_shares 不再依赖 portfolio round_lot = int(env.get_instrument(order_book_id).round_lot) try: amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot except ValueError: amount = 0 r_order = Order.__from_create__(order_book_id, amount, side, style, position_effect) if amount == 0: # 如果计算出来的下单量为0, 则不生成Order, 直接返回None # 因为很多策略会直接在handle_bar里面执行order_target_percent之类的函数,经常会出现下一个量为0的订单,如果这些订单都生成是没有意义的。 user_system_log.warn(_(u"Order Creation Failed: 0 order quantity")) return if r_order.type == ORDER_TYPE.MARKET: r_order.set_frozen_price(price) if env.can_submit_order(r_order): env.broker.submit_order(r_order) return r_order
[ "def", "order_shares", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ")", ":", "if", "amount", "==", "0", ":", "# 如果下单量为0,则认为其并没有发单,则直接返回None", "user_system_log", ".", "warn", "(", "_", "(", "u\"Order Creation Failed: O...
落指定股数的买/卖单,最常见的落单方式之一。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #购买Buy 2000 股的平安银行股票,并以市价单发送: order_shares('000001.XSHE', 2000) #卖出2000股的平安银行股票,并以市价单发送: order_shares('000001.XSHE', -2000) #购买1000股的平安银行股票,并以限价单发送,价格为¥10: order_shares('000001.XSHG', 1000, style=LimitOrder(10))
[ "落指定股数的买", "/", "卖单,最常见的落单方式之一。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market", "order)。" ]
python
train
34.833333
Robpol86/libnl
libnl/genl/mngt.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/mngt.py#L136-L148
def lookup_family(family): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L94. Positional arguments: family -- integer. Returns: genl_ops class instance or None. """ for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'): if ops.o_id == family: return ops return None
[ "def", "lookup_family", "(", "family", ")", ":", "for", "ops", "in", "nl_list_for_each_entry", "(", "genl_ops", "(", ")", ",", "genl_ops_list", ",", "'o_list'", ")", ":", "if", "ops", ".", "o_id", "==", "family", ":", "return", "ops", "return", "None" ]
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L94. Positional arguments: family -- integer. Returns: genl_ops class instance or None.
[ "https", ":", "//", "github", ".", "com", "/", "thom311", "/", "libnl", "/", "blob", "/", "libnl3_2_25", "/", "lib", "/", "genl", "/", "mngt", ".", "c#L94", "." ]
python
train
26.692308
cogniteev/docido-python-sdk
docido_sdk/toolbox/ha.py
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/ha.py#L74-L99
def catch(cls, catch_exception, config='default'): """Decorator class method catching exceptions raised by the wrapped member function. When exception is caught, the decorator waits for an amount of time specified in the `ha_config`. :param catch_exception: Exception class or tuple of exception classes. """ def wrap(method): @functools.wraps(method) def wrapped_method(self, *args, **kwargs): assert isinstance(self, HA) delay_policy = self.ha_get_delay_policy(config) max_retries = self.ha_get_config(config).max_retries for retries in itertools.count(): try: return method(self, *args, **kwargs) except catch_exception as e: res = self.ha_on_error(method, e, args, kwargs) if res is not None: args, kwargs = res if max_retries and retries >= max_retries: raise tts = next(delay_policy) time.sleep(tts) return wrapped_method return wrap
[ "def", "catch", "(", "cls", ",", "catch_exception", ",", "config", "=", "'default'", ")", ":", "def", "wrap", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapped_method", "(", "self", ",", "*", "args", ",", "*...
Decorator class method catching exceptions raised by the wrapped member function. When exception is caught, the decorator waits for an amount of time specified in the `ha_config`. :param catch_exception: Exception class or tuple of exception classes.
[ "Decorator", "class", "method", "catching", "exceptions", "raised", "by", "the", "wrapped", "member", "function", ".", "When", "exception", "is", "caught", "the", "decorator", "waits", "for", "an", "amount", "of", "time", "specified", "in", "the", "ha_config", ...
python
train
46.576923
smarie/python-parsyfiles
parsyfiles/plugins_optional/support_for_attrs.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_optional/support_for_attrs.py#L6-L33
def _guess_type_from_validator(validator): """ Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator in order to unpack the validators. :param validator: :return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used) or None if no inner 'instance_of' validator is found """ if isinstance(validator, _OptionalValidator): # Optional : look inside return _guess_type_from_validator(validator.validator) elif isinstance(validator, _AndValidator): # Sequence : try each of them for v in validator.validators: typ = _guess_type_from_validator(v) if typ is not None: return typ return None elif isinstance(validator, _InstanceOfValidator): # InstanceOf validator : found it ! return validator.type else: # we could not find the type return None
[ "def", "_guess_type_from_validator", "(", "validator", ")", ":", "if", "isinstance", "(", "validator", ",", "_OptionalValidator", ")", ":", "# Optional : look inside", "return", "_guess_type_from_validator", "(", "validator", ".", "validator", ")", "elif", "isinstance",...
Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator in order to unpack the validators. :param validator: :return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used) or None if no inner 'instance_of' validator is found
[ "Utility", "method", "to", "return", "the", "declared", "type", "of", "an", "attribute", "or", "None", ".", "It", "handles", "_OptionalValidator", "and", "_AndValidator", "in", "order", "to", "unpack", "the", "validators", "." ]
python
train
35.214286
rkhleics/wagtailmodeladmin
wagtailmodeladmin/views.py
https://github.com/rkhleics/wagtailmodeladmin/blob/7fddc853bab2ff3868b8c7a03329308c55f16358/wagtailmodeladmin/views.py#L297-L327
def get_search_results(self, request, queryset, search_term): """ Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name use_distinct = False if self.search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in self.search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.opts, search_spec): use_distinct = True break return queryset, use_distinct
[ "def", "get_search_results", "(", "self", ",", "request", ",", "queryset", ",", "search_term", ")", ":", "# Apply keyword searches.", "def", "construct_search", "(", "field_name", ")", ":", "if", "field_name", ".", "startswith", "(", "'^'", ")", ":", "return", ...
Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates.
[ "Returns", "a", "tuple", "containing", "a", "queryset", "to", "implement", "the", "search", "and", "a", "boolean", "indicating", "if", "the", "results", "may", "contain", "duplicates", "." ]
python
train
43.677419
ungarj/s2reader
s2reader/s2reader.py
https://github.com/ungarj/s2reader/blob/376fd7ee1d15cce0849709c149d694663a7bc0ef/s2reader/s2reader.py#L321-L339
def footprint(self): """Find and return footprint as Shapely Polygon.""" # Check whether product or granule footprint needs to be calculated. tile_geocoding = self._metadata.iter("Tile_Geocoding").next() resolution = 10 searchstring = ".//*[@resolution='%s']" % resolution size, geoposition = tile_geocoding.findall(searchstring) nrows, ncols = (int(i.text) for i in size) ulx, uly, xdim, ydim = (int(i.text) for i in geoposition) lrx = ulx + nrows * resolution lry = uly - ncols * resolution utm_footprint = box(ulx, lry, lrx, uly) project = partial( pyproj.transform, pyproj.Proj(init=self.srid), pyproj.Proj(init='EPSG:4326') ) footprint = transform(project, utm_footprint).buffer(0) return footprint
[ "def", "footprint", "(", "self", ")", ":", "# Check whether product or granule footprint needs to be calculated.", "tile_geocoding", "=", "self", ".", "_metadata", ".", "iter", "(", "\"Tile_Geocoding\"", ")", ".", "next", "(", ")", "resolution", "=", "10", "searchstri...
Find and return footprint as Shapely Polygon.
[ "Find", "and", "return", "footprint", "as", "Shapely", "Polygon", "." ]
python
train
44.473684
pytroll/satpy
satpy/readers/seviri_l1b_native.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/seviri_l1b_native.py#L159-L236
def _read_header(self): """Read the header info""" data = np.fromfile(self.filename, dtype=native_header, count=1) self.header.update(recarray2dict(data)) data15hd = self.header['15_DATA_HEADER'] sec15hd = self.header['15_SECONDARY_PRODUCT_HEADER'] # Set the list of available channels: self.mda['available_channels'] = get_available_channels(self.header) self.mda['channel_list'] = [i for i in CHANNEL_NAMES.values() if self.mda['available_channels'][i]] self.platform_id = data15hd[ 'SatelliteStatus']['SatelliteDefinition']['SatelliteId'] self.mda['platform_name'] = "Meteosat-" + SATNUM[self.platform_id] equator_radius = data15hd['GeometricProcessing'][ 'EarthModel']['EquatorialRadius'] * 1000. north_polar_radius = data15hd[ 'GeometricProcessing']['EarthModel']['NorthPolarRadius'] * 1000. south_polar_radius = data15hd[ 'GeometricProcessing']['EarthModel']['SouthPolarRadius'] * 1000. polar_radius = (north_polar_radius + south_polar_radius) * 0.5 ssp_lon = data15hd['ImageDescription'][ 'ProjectionDescription']['LongitudeOfSSP'] self.mda['projection_parameters'] = {'a': equator_radius, 'b': polar_radius, 'h': 35785831.00, 'ssp_longitude': ssp_lon} north = int(sec15hd['NorthLineSelectedRectangle']['Value']) east = int(sec15hd['EastColumnSelectedRectangle']['Value']) south = int(sec15hd['SouthLineSelectedRectangle']['Value']) west = int(sec15hd['WestColumnSelectedRectangle']['Value']) ncolumns = west - east + 1 nrows = north - south + 1 # check if the file has less rows or columns than # the maximum, if so it is an area of interest file if (nrows < VISIR_NUM_LINES) or (ncolumns < VISIR_NUM_COLUMNS): self.mda['is_full_disk'] = False # If the number of columns in the file is not divisible by 4, # UMARF will add extra columns to the file modulo = ncolumns % 4 padding = 0 if modulo > 0: padding = 4 - modulo cols_visir = ncolumns + padding # Check the VISIR calculated column dimension against # the header information cols_visir_hdr = int(sec15hd['NumberColumnsVISIR']['Value']) if cols_visir_hdr != cols_visir: logger.warning( "Number of VISIR columns from the header is incorrect!") logger.warning("Header: %d", cols_visir_hdr) logger.warning("Calculated: = %d", cols_visir) # HRV Channel - check if the area is reduced in east west # direction as this affects the number of columns in the file cols_hrv_hdr = int(sec15hd['NumberColumnsHRV']['Value']) if ncolumns < VISIR_NUM_COLUMNS: cols_hrv = cols_hrv_hdr else: cols_hrv = int(cols_hrv_hdr / 2) # self.mda represents the 16bit dimensions not 10bit self.mda['number_of_lines'] = int(sec15hd['NumberLinesVISIR']['Value']) self.mda['number_of_columns'] = cols_visir self.mda['hrv_number_of_lines'] = int(sec15hd["NumberLinesHRV"]['Value']) self.mda['hrv_number_of_columns'] = cols_hrv
[ "def", "_read_header", "(", "self", ")", ":", "data", "=", "np", ".", "fromfile", "(", "self", ".", "filename", ",", "dtype", "=", "native_header", ",", "count", "=", "1", ")", "self", ".", "header", ".", "update", "(", "recarray2dict", "(", "data", ...
Read the header info
[ "Read", "the", "header", "info" ]
python
train
43.730769
google/grr
grr/server/grr_response_server/databases/mysql_migration.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_migration.py#L28-L31
def _MigrationFilenameToInt(fname): """Converts migration filename to a migration number.""" base, _ = os.path.splitext(fname) return int(base)
[ "def", "_MigrationFilenameToInt", "(", "fname", ")", ":", "base", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "return", "int", "(", "base", ")" ]
Converts migration filename to a migration number.
[ "Converts", "migration", "filename", "to", "a", "migration", "number", "." ]
python
train
36.5
johnnoone/aioconsul
aioconsul/client/kv_endpoint.py
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/kv_endpoint.py#L525-L540
def delete_cas(self, key, *, index): """Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID The Key will only be deleted if its current modify index matches the supplied Index """ self.append({ "Verb": "delete-cas", "Key": key, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
[ "def", "delete_cas", "(", "self", ",", "key", ",", "*", ",", "index", ")", ":", "self", ".", "append", "(", "{", "\"Verb\"", ":", "\"delete-cas\"", ",", "\"Key\"", ":", "key", ",", "\"Index\"", ":", "extract_attr", "(", "index", ",", "keys", "=", "["...
Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID The Key will only be deleted if its current modify index matches the supplied Index
[ "Deletes", "the", "Key", "with", "check", "-", "and", "-", "set", "semantics", "." ]
python
train
29.625
gr33ndata/dysl
dysl/social.py
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L71-L83
def normalize(self, text): """ Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized. """ #print 'Normalize...\n' text = text.lower() text = unicodedata.normalize('NFC', text) text = self.strip_mentions_links(text) return text
[ "def", "normalize", "(", "self", ",", "text", ")", ":", "#print 'Normalize...\\n'", "text", "=", "text", ".", "lower", "(", ")", "text", "=", "unicodedata", ".", "normalize", "(", "'NFC'", ",", "text", ")", "text", "=", "self", ".", "strip_mentions_links",...
Normalizes text. Converts to lowercase, Unicode NFC normalization and removes mentions and links :param text: Text to be normalized.
[ "Normalizes", "text", ".", "Converts", "to", "lowercase", "Unicode", "NFC", "normalization", "and", "removes", "mentions", "and", "links" ]
python
train
30.538462
lemieuxl/pyGenClean
pyGenClean/PlinkUtils/plot_MDS.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/PlinkUtils/plot_MDS.py#L229-L256
def checkArgs(args): """Checks the arguments and options. :param args: an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1. """ # Check in input file if not os.path.isfile(args.file): msg = "%s: no such file" % args.file raise ProgramError(msg) # Check the population file if args.population_file is None: msg = "population-file: no population file" raise ProgramError(msg) elif not os.path.isfile(args.population_file): msg = "%s: no such file" % args.population_file raise ProgramError(msg) return True
[ "def", "checkArgs", "(", "args", ")", ":", "# Check in input file", "if", "not", "os", ".", "path", ".", "isfile", "(", "args", ".", "file", ")", ":", "msg", "=", "\"%s: no such file\"", "%", "args", ".", "file", "raise", "ProgramError", "(", "msg", ")",...
Checks the arguments and options. :param args: an object containing the options of the program. :type args: argparse.Namespace :returns: ``True`` if everything was OK. If there is a problem with an option, an exception is raised using the :py:class:`ProgramError` class, a message is printed to the :class:`sys.stderr` and the program exists with code 1.
[ "Checks", "the", "arguments", "and", "options", "." ]
python
train
30.107143
tensorflow/hub
tensorflow_hub/resolver.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L103-L122
def _print_download_progress_msg(self, msg, flush=False): """Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode). """ if self._interactive_mode(): # Print progress message to console overwriting previous progress # message. self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg) sys.stdout.flush() if flush: print("\n") else: # Interactive progress tracking is disabled. Print progress to the # standard TF log. logging.info(msg)
[ "def", "_print_download_progress_msg", "(", "self", ",", "msg", ",", "flush", "=", "False", ")", ":", "if", "self", ".", "_interactive_mode", "(", ")", ":", "# Print progress message to console overwriting previous progress", "# message.", "self", ".", "_max_prog_str", ...
Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode).
[ "Prints", "a", "message", "about", "download", "progress", "either", "to", "the", "console", "or", "TF", "log", "." ]
python
train
35.35
django-admin-tools/django-admin-tools
admin_tools/utils.py
https://github.com/django-admin-tools/django-admin-tools/blob/ba6f46f51ebd84fcf84f2f79ec9487f45452d79b/admin_tools/utils.py#L166-L173
def _get_admin_change_url(self, model, context): """ Returns the admin change url. """ app_label = model._meta.app_label return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context), app_label, model.__name__.lower()))
[ "def", "_get_admin_change_url", "(", "self", ",", "model", ",", "context", ")", ":", "app_label", "=", "model", ".", "_meta", ".", "app_label", "return", "reverse", "(", "'%s:%s_%s_changelist'", "%", "(", "get_admin_site_name", "(", "context", ")", ",", "app_l...
Returns the admin change url.
[ "Returns", "the", "admin", "change", "url", "." ]
python
train
44.375
phoebe-project/phoebe2
phoebe/atmospheres/passbands.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/atmospheres/passbands.py#L359-L379
def _bb_intensity(self, Teff, photon_weighted=False): """ Computes mean passband intensity using blackbody atmosphere: I_pb^E = \int_\lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda P(\lambda) d\lambda I_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda \lambda P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean passband intensity using blackbody atmosphere. """ if photon_weighted: pb = lambda w: w*self._planck(w, Teff)*self.ptf(w) return integrate.quad(pb, self.wl[0], self.wl[-1])[0]/self.ptf_photon_area else: pb = lambda w: self._planck(w, Teff)*self.ptf(w) return integrate.quad(pb, self.wl[0], self.wl[-1])[0]/self.ptf_area
[ "def", "_bb_intensity", "(", "self", ",", "Teff", ",", "photon_weighted", "=", "False", ")", ":", "if", "photon_weighted", ":", "pb", "=", "lambda", "w", ":", "w", "*", "self", ".", "_planck", "(", "w", ",", "Teff", ")", "*", "self", ".", "ptf", "(...
Computes mean passband intensity using blackbody atmosphere: I_pb^E = \int_\lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda P(\lambda) d\lambda I_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda \lambda P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean passband intensity using blackbody atmosphere.
[ "Computes", "mean", "passband", "intensity", "using", "blackbody", "atmosphere", ":" ]
python
train
42.952381
saltstack/salt
salt/modules/sysbench.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysbench.py#L59-L85
def cpu(): ''' Tests for the CPU performance of minions. CLI Examples: .. code-block:: bash salt '*' sysbench.cpu ''' # Test data max_primes = [500, 1000, 2500, 5000] # Initializing the test variables test_command = 'sysbench --test=cpu --cpu-max-prime={0} run' result = None ret_val = {} # Test beings! for primes in max_primes: key = 'Prime numbers limit: {0}'.format(primes) run_command = test_command.format(primes) result = __salt__['cmd.run'](run_command) ret_val[key] = _parser(result) return ret_val
[ "def", "cpu", "(", ")", ":", "# Test data", "max_primes", "=", "[", "500", ",", "1000", ",", "2500", ",", "5000", "]", "# Initializing the test variables", "test_command", "=", "'sysbench --test=cpu --cpu-max-prime={0} run'", "result", "=", "None", "ret_val", "=", ...
Tests for the CPU performance of minions. CLI Examples: .. code-block:: bash salt '*' sysbench.cpu
[ "Tests", "for", "the", "CPU", "performance", "of", "minions", "." ]
python
train
21.62963
sijis/sumologic-python
src/sumologic/utils.py
https://github.com/sijis/sumologic-python/blob/b50200907837f0d452d14ead5e647b8e24e2e9e5/src/sumologic/utils.py#L4-L9
def get_logging_level(debug): """Returns logging level based on boolean""" level = logging.INFO if debug: level = logging.DEBUG return level
[ "def", "get_logging_level", "(", "debug", ")", ":", "level", "=", "logging", ".", "INFO", "if", "debug", ":", "level", "=", "logging", ".", "DEBUG", "return", "level" ]
Returns logging level based on boolean
[ "Returns", "logging", "level", "based", "on", "boolean" ]
python
train
26.5
rootpy/rootpy
rootpy/plotting/hist.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/hist.py#L1544-L1669
def quantiles(self, quantiles, axis=0, strict=False, recompute_integral=False): """ Calculate the quantiles of this histogram. Parameters ---------- quantiles : list or int A list of cumulative probabilities or an integer used to determine equally spaced values between 0 and 1 (inclusive). axis : int, optional (default=0) The axis to compute the quantiles along. 2D and 3D histograms are first projected along the desired axis before computing the quantiles. strict : bool, optional (default=False) If True, then return the sorted unique quantiles corresponding exactly to bin edges of this histogram. recompute_integral : bool, optional (default=False) If this histogram was filled with SetBinContent instead of Fill, then the integral must be computed before calculating the quantiles. Returns ------- output : list or numpy array If NumPy is importable then an array of the quantiles is returned, otherwise a list is returned. """ if axis >= self.GetDimension(): raise ValueError( "axis must be less than the dimensionality of the histogram") if recompute_integral: self.ComputeIntegral() if isinstance(self, _Hist2D): newname = '{0}_{1}'.format(self.__class__.__name__, uuid()) if axis == 0: proj = self.ProjectionX(newname, 1, self.nbins(1)) elif axis == 1: proj = self.ProjectionY(newname, 1, self.nbins(0)) else: raise ValueError("axis must be 0 or 1") return asrootpy(proj).quantiles( quantiles, strict=strict, recompute_integral=False) elif isinstance(self, _Hist3D): newname = '{0}_{1}'.format(self.__class__.__name__, uuid()) if axis == 0: proj = self.ProjectionX( newname, 1, self.nbins(1), 1, self.nbins(2)) elif axis == 1: proj = self.ProjectionY( newname, 1, self.nbins(0), 1, self.nbins(2)) elif axis == 2: proj = self.ProjectionZ( newname, 1, self.nbins(0), 1, self.nbins(1)) else: raise ValueError("axis must be 0, 1, or 2") return asrootpy(proj).quantiles( quantiles, strict=strict, recompute_integral=False) try: import numpy as np except ImportError: # use python implementation use_numpy = False else: use_numpy = True if isinstance(quantiles, int): num_quantiles = quantiles if use_numpy: qs = np.linspace(0, 1, num_quantiles) output = np.empty(num_quantiles, dtype=float) else: def linspace(start, stop, n): if n == 1: yield start return h = float(stop - start) / (n - 1) for i in range(n): yield start + h * i quantiles = list(linspace(0, 1, num_quantiles)) qs = array('d', quantiles) output = array('d', [0.] * num_quantiles) else: num_quantiles = len(quantiles) if use_numpy: qs = np.array(quantiles, dtype=float) output = np.empty(num_quantiles, dtype=float) else: qs = array('d', quantiles) output = array('d', [0.] * num_quantiles) if strict: integral = self.GetIntegral() nbins = self.nbins(0) if use_numpy: edges = np.empty(nbins + 1, dtype=float) self.GetLowEdge(edges) edges[-1] = edges[-2] + self.GetBinWidth(nbins) integral = np.ndarray((nbins + 1,), dtype=float, buffer=integral) idx = np.searchsorted(integral, qs, side='left') output = np.unique(np.take(edges, idx)) else: quantiles = list(set(qs)) quantiles.sort() output = [] ibin = 0 for quant in quantiles: # find first bin greater than or equal to quant while integral[ibin] < quant and ibin < nbins + 1: ibin += 1 edge = self.GetBinLowEdge(ibin + 1) output.append(edge) if ibin >= nbins + 1: break output = list(set(output)) output.sort() return output self.GetQuantiles(num_quantiles, output, qs) if use_numpy: return output return list(output)
[ "def", "quantiles", "(", "self", ",", "quantiles", ",", "axis", "=", "0", ",", "strict", "=", "False", ",", "recompute_integral", "=", "False", ")", ":", "if", "axis", ">=", "self", ".", "GetDimension", "(", ")", ":", "raise", "ValueError", "(", "\"axi...
Calculate the quantiles of this histogram. Parameters ---------- quantiles : list or int A list of cumulative probabilities or an integer used to determine equally spaced values between 0 and 1 (inclusive). axis : int, optional (default=0) The axis to compute the quantiles along. 2D and 3D histograms are first projected along the desired axis before computing the quantiles. strict : bool, optional (default=False) If True, then return the sorted unique quantiles corresponding exactly to bin edges of this histogram. recompute_integral : bool, optional (default=False) If this histogram was filled with SetBinContent instead of Fill, then the integral must be computed before calculating the quantiles. Returns ------- output : list or numpy array If NumPy is importable then an array of the quantiles is returned, otherwise a list is returned.
[ "Calculate", "the", "quantiles", "of", "this", "histogram", "." ]
python
train
39.055556
pvlib/pvlib-python
pvlib/location.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/location.py#L164-L235
def get_clearsky(self, times, model='ineichen', solar_position=None, dni_extra=None, **kwargs): """ Calculate the clear sky estimates of GHI, DNI, and/or DHI at this location. Parameters ---------- times: DatetimeIndex model: str, default 'ineichen' The clear sky model to use. Must be one of 'ineichen', 'haurwitz', 'simplified_solis'. solar_position : None or DataFrame, default None DataFrame with columns 'apparent_zenith', 'zenith', 'apparent_elevation'. dni_extra: None or numeric, default None If None, will be calculated from times. kwargs Extra parameters passed to the relevant functions. Climatological values are assumed in many cases. See source code for details! Returns ------- clearsky : DataFrame Column names are: ``ghi, dni, dhi``. """ if dni_extra is None: dni_extra = irradiance.get_extra_radiation(times) try: pressure = kwargs.pop('pressure') except KeyError: pressure = atmosphere.alt2pres(self.altitude) if solar_position is None: solar_position = self.get_solarposition(times, pressure=pressure, **kwargs) apparent_zenith = solar_position['apparent_zenith'] apparent_elevation = solar_position['apparent_elevation'] if model == 'ineichen': try: linke_turbidity = kwargs.pop('linke_turbidity') except KeyError: interp_turbidity = kwargs.pop('interp_turbidity', True) linke_turbidity = clearsky.lookup_linke_turbidity( times, self.latitude, self.longitude, interp_turbidity=interp_turbidity) try: airmass_absolute = kwargs.pop('airmass_absolute') except KeyError: airmass_absolute = self.get_airmass( times, solar_position=solar_position)['airmass_absolute'] cs = clearsky.ineichen(apparent_zenith, airmass_absolute, linke_turbidity, altitude=self.altitude, dni_extra=dni_extra, **kwargs) elif model == 'haurwitz': cs = clearsky.haurwitz(apparent_zenith) elif model == 'simplified_solis': cs = clearsky.simplified_solis( apparent_elevation, pressure=pressure, dni_extra=dni_extra, **kwargs) else: raise ValueError('{} is not a valid clear sky model. Must be ' 'one of ineichen, simplified_solis, haurwitz' .format(model)) return cs
[ "def", "get_clearsky", "(", "self", ",", "times", ",", "model", "=", "'ineichen'", ",", "solar_position", "=", "None", ",", "dni_extra", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "dni_extra", "is", "None", ":", "dni_extra", "=", "irradiance",...
Calculate the clear sky estimates of GHI, DNI, and/or DHI at this location. Parameters ---------- times: DatetimeIndex model: str, default 'ineichen' The clear sky model to use. Must be one of 'ineichen', 'haurwitz', 'simplified_solis'. solar_position : None or DataFrame, default None DataFrame with columns 'apparent_zenith', 'zenith', 'apparent_elevation'. dni_extra: None or numeric, default None If None, will be calculated from times. kwargs Extra parameters passed to the relevant functions. Climatological values are assumed in many cases. See source code for details! Returns ------- clearsky : DataFrame Column names are: ``ghi, dni, dhi``.
[ "Calculate", "the", "clear", "sky", "estimates", "of", "GHI", "DNI", "and", "/", "or", "DHI", "at", "this", "location", "." ]
python
train
38.986111
ttinies/sc2players
sc2players/playerManagement.py
https://github.com/ttinies/sc2players/blob/fd9b37c268bf1005d9ef73a25e65ed97c8b7895f/sc2players/playerManagement.py#L81-L92
def getKnownPlayers(reset=False): """identify all of the currently defined players""" global playerCache if not playerCache or reset: jsonFiles = os.path.join(c.PLAYERS_FOLDER, "*.json") for playerFilepath in glob.glob(jsonFiles): filename = os.path.basename(playerFilepath) name = re.sub("^player_", "", filename) name = re.sub("\.json$", "", name) player = PlayerRecord(name) playerCache[player.name] = player return playerCache
[ "def", "getKnownPlayers", "(", "reset", "=", "False", ")", ":", "global", "playerCache", "if", "not", "playerCache", "or", "reset", ":", "jsonFiles", "=", "os", ".", "path", ".", "join", "(", "c", ".", "PLAYERS_FOLDER", ",", "\"*.json\"", ")", "for", "pl...
identify all of the currently defined players
[ "identify", "all", "of", "the", "currently", "defined", "players" ]
python
train
42.666667
bethgelab/foolbox
foolbox/models/base.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/models/base.py#L104-L117
def _process_gradient(self, backward, dmdp): """ backward: `callable` callable that backpropagates the gradient of the model w.r.t to preprocessed input through the preprocessing to get the gradient of the model's output w.r.t. the input before preprocessing dmdp: gradient of model w.r.t. preprocessed input """ if backward is None: # pragma: no cover raise ValueError('Your preprocessing function does not provide' ' an (approximate) gradient') dmdx = backward(dmdp) assert dmdx.dtype == dmdp.dtype return dmdx
[ "def", "_process_gradient", "(", "self", ",", "backward", ",", "dmdp", ")", ":", "if", "backward", "is", "None", ":", "# pragma: no cover", "raise", "ValueError", "(", "'Your preprocessing function does not provide'", "' an (approximate) gradient'", ")", "dmdx", "=", ...
backward: `callable` callable that backpropagates the gradient of the model w.r.t to preprocessed input through the preprocessing to get the gradient of the model's output w.r.t. the input before preprocessing dmdp: gradient of model w.r.t. preprocessed input
[ "backward", ":", "callable", "callable", "that", "backpropagates", "the", "gradient", "of", "the", "model", "w", ".", "r", ".", "t", "to", "preprocessed", "input", "through", "the", "preprocessing", "to", "get", "the", "gradient", "of", "the", "model", "s", ...
python
valid
45.785714
biolink/biolink-model
metamodel/utils/generator.py
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L159-L211
def neighborhood(self, elements: List[ELEMENT_NAME]) \ -> References: """ Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element """ touches = References() for element in elements: if element in self.schema.classes: touches.classrefs.add(element) if None in touches.classrefs: raise ValueError("1") cls = self.schema.classes[element] if cls.is_a: touches.classrefs.add(cls.is_a) if None in touches.classrefs: raise ValueError("1") # Mixins include apply_to's touches.classrefs.update(set(cls.mixins)) for slotname in cls.slots: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) if None in touches.classrefs: raise ValueError("1") if element in self.synopsis.rangerefs: for slotname in self.synopsis.rangerefs[element]: touches.slotrefs.add(slotname) if self.schema.slots[slotname].domain: touches.classrefs.add(self.schema.slots[slotname].domain) elif element in self.schema.slots: touches.slotrefs.add(element) slot = self.schema.slots[element] touches.slotrefs.update(set(slot.mixins)) if slot.is_a: touches.slotrefs.add(slot.is_a) if element in self.synopsis.inverses: touches.slotrefs.update(self.synopsis.inverses[element]) if slot.domain: touches.classrefs.add(slot.domain) if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) elif element in self.schema.types: if element in self.synopsis.rangerefs: touches.slotrefs.update(self.synopsis.rangerefs[element]) return touches
[ "def", "neighborhood", "(", "self", ",", "elements", ":", "List", "[", "ELEMENT_NAME", "]", ")", "->", "References", ":", "touches", "=", "References", "(", ")", "for", "element", "in", "elements", ":", "if", "element", "in", "self", ".", "schema", ".", ...
Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element
[ "Return", "a", "list", "of", "all", "slots", "classes", "and", "types", "that", "touch", "any", "element", "in", "elements", "including", "the", "element", "itself" ]
python
train
47.698113
glomex/gcdt
gcdt/cloudwatch_logs.py
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/cloudwatch_logs.py#L56-L86
def filter_log_events(awsclient, log_group_name, start_ts, end_ts=None): """ Note: this is used to retrieve logs in ramuda. :param log_group_name: log group name :param start_ts: timestamp :param end_ts: timestamp :return: list of log entries """ client_logs = awsclient.get_client('logs') # TODO use all_pages instead! logs = [] next_token = None while True: request = { 'logGroupName': log_group_name, 'startTime': start_ts } if end_ts: request['endTime'] = end_ts if next_token: request['nextToken'] = next_token response = client_logs.filter_log_events(**request) logs.extend( [{'timestamp': e['timestamp'], 'message': e['message']} for e in response['events']] ) if 'nextToken' not in response: break next_token = response['nextToken'] return logs
[ "def", "filter_log_events", "(", "awsclient", ",", "log_group_name", ",", "start_ts", ",", "end_ts", "=", "None", ")", ":", "client_logs", "=", "awsclient", ".", "get_client", "(", "'logs'", ")", "# TODO use all_pages instead!", "logs", "=", "[", "]", "next_toke...
Note: this is used to retrieve logs in ramuda. :param log_group_name: log group name :param start_ts: timestamp :param end_ts: timestamp :return: list of log entries
[ "Note", ":", "this", "is", "used", "to", "retrieve", "logs", "in", "ramuda", "." ]
python
train
30.064516
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L2142-L2158
def close(self): """Ends the background threads, and prevent this instance from servicing further queries.""" if globals()['_GLOBAL_DONE'] == 0: globals()['_GLOBAL_DONE'] = 1 self.notify_all() self.engine.notify() self.unregister_all_services() for i in self.intf.values(): try: # there are cases, when we start mDNS without network i.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + \ socket.inet_aton('0.0.0.0')) except: pass i.close()
[ "def", "close", "(", "self", ")", ":", "if", "globals", "(", ")", "[", "'_GLOBAL_DONE'", "]", "==", "0", ":", "globals", "(", ")", "[", "'_GLOBAL_DONE'", "]", "=", "1", "self", ".", "notify_all", "(", ")", "self", ".", "engine", ".", "notify", "(",...
Ends the background threads, and prevent this instance from servicing further queries.
[ "Ends", "the", "background", "threads", "and", "prevent", "this", "instance", "from", "servicing", "further", "queries", "." ]
python
train
41.882353
scheibler/khard
khard/carddav_object.py
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/carddav_object.py#L1556-L1589
def _parse_type_value(types, value, supported_types): """Parse type value of phone numbers, email and post addresses. :param types: list of type values :type types: list(str) :param value: the corresponding label, required for more verbose exceptions :type value: str :param supported_types: all allowed standard types :type supported_types: list(str) :returns: tuple of standard and custom types and pref integer :rtype: tuple(list(str), list(str), int) """ custom_types = [] standard_types = [] pref = 0 for type in types: type = type.strip() if type: if type.lower() in supported_types: standard_types.append(type) elif type.lower() == "pref": pref += 1 elif re.match(r"^pref=\d{1,2}$", type.lower()): pref += int(type.split("=")[1]) else: if type.lower().startswith("x-"): custom_types.append(type[2:]) standard_types.append(type) else: custom_types.append(type) standard_types.append("X-{}".format(type)) return (standard_types, custom_types, pref)
[ "def", "_parse_type_value", "(", "types", ",", "value", ",", "supported_types", ")", ":", "custom_types", "=", "[", "]", "standard_types", "=", "[", "]", "pref", "=", "0", "for", "type", "in", "types", ":", "type", "=", "type", ".", "strip", "(", ")", ...
Parse type value of phone numbers, email and post addresses. :param types: list of type values :type types: list(str) :param value: the corresponding label, required for more verbose exceptions :type value: str :param supported_types: all allowed standard types :type supported_types: list(str) :returns: tuple of standard and custom types and pref integer :rtype: tuple(list(str), list(str), int)
[ "Parse", "type", "value", "of", "phone", "numbers", "email", "and", "post", "addresses", "." ]
python
test
39.352941
saltstack/salt
salt/minion.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1854-L1971
def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc )
[ "def", "_thread_multi_return", "(", "cls", ",", "minion_instance", ",", "opts", ",", "data", ")", ":", "fn_", "=", "os", ".", "path", ".", "join", "(", "minion_instance", ".", "proc_dir", ",", "data", "[", "'jid'", "]", ")", "if", "opts", "[", "'multip...
This method should be used as a threading target, start the actual minion side execution.
[ "This", "method", "should", "be", "used", "as", "a", "threading", "target", "start", "the", "actual", "minion", "side", "execution", "." ]
python
train
44.669492
notifiers/notifiers
notifiers/utils/requests.py
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers/utils/requests.py#L13-L67
def request( self, url: str, method: str, raise_for_status: bool = True, path_to_errors: tuple = None, *args, **kwargs ) -> tuple: """ A wrapper method for :meth:`~requests.Session.request``, which adds some defaults and logging :param url: The URL to send the reply to :param method: The method to use :param raise_for_status: Should an exception be raised for a failed response. Default is **True** :param args: Additional args to be sent to the request :param kwargs: Additional args to be sent to the request :return: Dict of response body or original :class:`requests.Response` """ session = kwargs.get("session", requests.Session()) log.debug( "sending a %s request to %s with args: %s kwargs: %s", method.upper(), url, args, kwargs, ) rsp = session.request(method, url, *args, **kwargs) log.debug("response: %s", rsp.text) errors = None if raise_for_status: try: rsp.raise_for_status() except requests.RequestException as e: if e.response is not None: rsp = e.response if path_to_errors: try: errors = rsp.json() for arg in path_to_errors: if errors.get(arg): errors = errors[arg] except json.decoder.JSONDecodeError: errors = [rsp.text] else: errors = [rsp.text] if not isinstance(errors, list): errors = [errors] else: rsp = None errors = [str(e)] log.debug("errors when trying to access %s: %s", url, errors) log.debug("returning response %s, errors %s", rsp, errors) return rsp, errors
[ "def", "request", "(", "self", ",", "url", ":", "str", ",", "method", ":", "str", ",", "raise_for_status", ":", "bool", "=", "True", ",", "path_to_errors", ":", "tuple", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "tuple", ":...
A wrapper method for :meth:`~requests.Session.request``, which adds some defaults and logging :param url: The URL to send the reply to :param method: The method to use :param raise_for_status: Should an exception be raised for a failed response. Default is **True** :param args: Additional args to be sent to the request :param kwargs: Additional args to be sent to the request :return: Dict of response body or original :class:`requests.Response`
[ "A", "wrapper", "method", "for", ":", "meth", ":", "~requests", ".", "Session", ".", "request", "which", "adds", "some", "defaults", "and", "logging" ]
python
train
37.527273
tsnaomi/finnsyll
finnsyll/prev/v05.py
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v05.py#L97-L119
def _syllabify(word, T4=True): '''Syllabify the given word.''' word = replace_umlauts(word) word, rules = apply_T1(word) if re.search(r'[^ieAyOauo]*([ieAyOauo]{2})[^ieAyOauo]*', word): word, T2 = apply_T2(word) word, T8 = apply_T8(word) word, T9 = apply_T9(word) word, T4 = apply_T4(word) if T4 else (word, '') rules += T2 + T8 + T9 + T4 if re.search(r'[ieAyOauo]{3}', word): word, T6 = apply_T6(word) word, T5 = apply_T5(word) word, T7 = apply_T7(word) word, T2 = apply_T2(word) rules += T5 + T6 + T7 + T2 word = replace_umlauts(word, put_back=True) rules = rules or ' T0' # T0 means no rules have applied return word, rules
[ "def", "_syllabify", "(", "word", ",", "T4", "=", "True", ")", ":", "word", "=", "replace_umlauts", "(", "word", ")", "word", ",", "rules", "=", "apply_T1", "(", "word", ")", "if", "re", ".", "search", "(", "r'[^ieAyOauo]*([ieAyOauo]{2})[^ieAyOauo]*'", ","...
Syllabify the given word.
[ "Syllabify", "the", "given", "word", "." ]
python
train
31.304348
CyberReboot/vent
vent/menus/editor.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/menus/editor.py#L115-L120
def change_screens(self): """ Change to the next tool to edit or back to MAIN form """ if self.settings['next_tool']: self.parentApp.change_form(self.settings['next_tool']) else: self.parentApp.change_form('MAIN')
[ "def", "change_screens", "(", "self", ")", ":", "if", "self", ".", "settings", "[", "'next_tool'", "]", ":", "self", ".", "parentApp", ".", "change_form", "(", "self", ".", "settings", "[", "'next_tool'", "]", ")", "else", ":", "self", ".", "parentApp", ...
Change to the next tool to edit or back to MAIN form
[ "Change", "to", "the", "next", "tool", "to", "edit", "or", "back", "to", "MAIN", "form" ]
python
train
42.666667
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4027-L4039
def get_stp_mst_detail_output_cist_migrate_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") migrate_time = ET.SubElement(cist, "migrate-time") migrate_time.text = kwargs.pop('migrate_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_migrate_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=",...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41.692308
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L469-L482
def enable(self, **kwdargs): """ General enable function encompassing all user interface features. Usage (e.g.): viewer.enable(rotate=False, flip=True) """ for feat, value in kwdargs: feat = feat.lower() if feat not in self.features: raise ValueError("'%s' is not a feature. Must be one of %s" % ( feat, str(self.features))) attr = self.features[feat] setattr(self, attr, bool(value))
[ "def", "enable", "(", "self", ",", "*", "*", "kwdargs", ")", ":", "for", "feat", ",", "value", "in", "kwdargs", ":", "feat", "=", "feat", ".", "lower", "(", ")", "if", "feat", "not", "in", "self", ".", "features", ":", "raise", "ValueError", "(", ...
General enable function encompassing all user interface features. Usage (e.g.): viewer.enable(rotate=False, flip=True)
[ "General", "enable", "function", "encompassing", "all", "user", "interface", "features", ".", "Usage", "(", "e", ".", "g", ".", ")", ":", "viewer", ".", "enable", "(", "rotate", "=", "False", "flip", "=", "True", ")" ]
python
train
36.285714
streamlink/streamlink
src/streamlink/stream/segmented.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/stream/segmented.py#L32-L39
def close(self): """Shuts down the thread.""" if not self.closed: log.debug("Closing worker thread") self.closed = True if self._wait: self._wait.set()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "log", ".", "debug", "(", "\"Closing worker thread\"", ")", "self", ".", "closed", "=", "True", "if", "self", ".", "_wait", ":", "self", ".", "_wait", ".", "set", "(", ...
Shuts down the thread.
[ "Shuts", "down", "the", "thread", "." ]
python
test
25.125
EnigmaBridge/client.py
ebclient/eb_utils.py
https://github.com/EnigmaBridge/client.py/blob/0fafe3902da394da88e9f960751d695ca65bbabd/ebclient/eb_utils.py#L47-L60
def build_api_object(uo=None, api_key=None, uo_id=None, uo_type=None): """ Builds API object identifier :return: """ if uo is not None: api_key = uo.resolve_api_key() if uo.resolve_api_key() is not None else api_key uo_id = uo.uo_id if uo.uo_id is not None else uo_id uo_type = uo.uo_type if uo.uo_type is not None else uo_type if uo_type is None or uo_type == EBConsts.INVALID_KEY_TYPE: uo_type = 0 return "%s%010x%010x" % (api_key, uo_id, uo_type)
[ "def", "build_api_object", "(", "uo", "=", "None", ",", "api_key", "=", "None", ",", "uo_id", "=", "None", ",", "uo_type", "=", "None", ")", ":", "if", "uo", "is", "not", "None", ":", "api_key", "=", "uo", ".", "resolve_api_key", "(", ")", "if", "u...
Builds API object identifier :return:
[ "Builds", "API", "object", "identifier", ":", "return", ":" ]
python
train
38.714286
TissueMAPS/TmClient
src/python/tmclient/api.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/api.py#L1796-L1837
def upload_segmentation_image_file(self, mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, filename): '''Uploads segmentations from a *PNG* image file. Parameters ---------- mapobject_type_name: str name of the segmented objects plate_name: str name of the plate well_name: str name of the well in which the image is located well_pos_y: int y-position of the site relative to the well grid well_pos_x: int x-position of the site relative to the well grid tpoint: int, optional zero-based time point index (default: ``0``) zplane: int, optional zero-based z-plane index (default: ``0``) filename: str path to the file on disk Warning ------- This approach will only works for images with less than 65536 objects, since the *PNG* format is limited to 16-bit grayscale images. See also -------- :meth:`tmclient.api.TmClient.upload_segmentation_image` ''' logger.info('upload segmentation image file "%s"', filename) if not filename.lower().endswith('png'): raise IOError('Filename must have "png" extension.') filename = os.path.expanduser(os.path.expandvars(filename)) image = cv2.imread(filename, cv2.IMREAD_UNCHANGED | cv2.IMREAD_ANYDEPTH) self._upload_segmentation_image( mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, image.astype(np.int32) )
[ "def", "upload_segmentation_image_file", "(", "self", ",", "mapobject_type_name", ",", "plate_name", ",", "well_name", ",", "well_pos_y", ",", "well_pos_x", ",", "tpoint", ",", "zplane", ",", "filename", ")", ":", "logger", ".", "info", "(", "'upload segmentation ...
Uploads segmentations from a *PNG* image file. Parameters ---------- mapobject_type_name: str name of the segmented objects plate_name: str name of the plate well_name: str name of the well in which the image is located well_pos_y: int y-position of the site relative to the well grid well_pos_x: int x-position of the site relative to the well grid tpoint: int, optional zero-based time point index (default: ``0``) zplane: int, optional zero-based z-plane index (default: ``0``) filename: str path to the file on disk Warning ------- This approach will only works for images with less than 65536 objects, since the *PNG* format is limited to 16-bit grayscale images. See also -------- :meth:`tmclient.api.TmClient.upload_segmentation_image`
[ "Uploads", "segmentations", "from", "a", "*", "PNG", "*", "image", "file", "." ]
python
train
38.880952
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L1846-L1895
def save_copy_as(self, index=None): """Save copy of file as... Args: index: self.data index for the file to save. Returns: False if no file name was selected or if save() was unsuccessful. True is save() was successful. Gets the new file name from select_savename(). If no name is chosen, then the save_copy_as() aborts. Otherwise, the current stack is checked to see if the selected name already exists and, if so, then the tab with that name is closed. Unlike save_as(), this calls write() directly instead of using save(). The current file and tab aren't changed at all. The copied file is opened in a new tab. """ if index is None: # Save the currently edited file index = self.get_stack_index() finfo = self.data[index] original_filename = finfo.filename filename = self.select_savename(original_filename) if filename: ao_index = self.has_filename(filename) # Note: ao_index == index --> saving an untitled file if ao_index is not None and ao_index != index: if not self.close_file(ao_index): return if ao_index < index: index -= 1 try: self._write_to_file(finfo, filename) # open created copy file self.plugin_load.emit(filename) return True except EnvironmentError as error: self.msgbox = QMessageBox( QMessageBox.Critical, _("Save Error"), _("<b>Unable to save file '%s'</b>" "<br><br>Error message:<br>%s" ) % (osp.basename(finfo.filename), str(error)), parent=self) self.msgbox.exec_() else: return False
[ "def", "save_copy_as", "(", "self", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "# Save the currently edited file\r", "index", "=", "self", ".", "get_stack_index", "(", ")", "finfo", "=", "self", ".", "data", "[", "index", "]", ...
Save copy of file as... Args: index: self.data index for the file to save. Returns: False if no file name was selected or if save() was unsuccessful. True is save() was successful. Gets the new file name from select_savename(). If no name is chosen, then the save_copy_as() aborts. Otherwise, the current stack is checked to see if the selected name already exists and, if so, then the tab with that name is closed. Unlike save_as(), this calls write() directly instead of using save(). The current file and tab aren't changed at all. The copied file is opened in a new tab.
[ "Save", "copy", "of", "file", "as", "...", "Args", ":", "index", ":", "self", ".", "data", "index", "for", "the", "file", "to", "save", ".", "Returns", ":", "False", "if", "no", "file", "name", "was", "selected", "or", "if", "save", "()", "was", "u...
python
train
40.42
Rapptz/discord.py
discord/ext/commands/core.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/core.py#L1566-L1582
def is_nsfw(): """A :func:`.check` that checks if the channel is a NSFW channel. This check raises a special exception, :exc:`.NSFWChannelRequired` that is derived from :exc:`.CheckFailure`. .. versionchanged:: 1.1.0 Raise :exc:`.NSFWChannelRequired instead of generic :exc:`.CheckFailure`. DM channels will also now pass this check. """ def pred(ctx): ch = ctx.channel if ctx.guild is None or (isinstance(ch, discord.TextChannel) and ch.is_nsfw()): return True raise NSFWChannelRequired(ch) return check(pred)
[ "def", "is_nsfw", "(", ")", ":", "def", "pred", "(", "ctx", ")", ":", "ch", "=", "ctx", ".", "channel", "if", "ctx", ".", "guild", "is", "None", "or", "(", "isinstance", "(", "ch", ",", "discord", ".", "TextChannel", ")", "and", "ch", ".", "is_ns...
A :func:`.check` that checks if the channel is a NSFW channel. This check raises a special exception, :exc:`.NSFWChannelRequired` that is derived from :exc:`.CheckFailure`. .. versionchanged:: 1.1.0 Raise :exc:`.NSFWChannelRequired instead of generic :exc:`.CheckFailure`. DM channels will also now pass this check.
[ "A", ":", "func", ":", ".", "check", "that", "checks", "if", "the", "channel", "is", "a", "NSFW", "channel", "." ]
python
train
33.941176
trombastic/PyScada
pyscada/hmi/views.py
https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/hmi/views.py#L240-L309
def get_cache_data(request): if 'init' in request.POST: init = bool(float(request.POST['init'])) else: init = False active_variables = [] if 'variables[]' in request.POST: active_variables = request.POST.getlist('variables[]') """ else: active_variables = list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'charts__variables', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'xy_charts__variables', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'control_items__variable', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'custom_html_panels__variables', flat=True)) active_variables = list(set(active_variables)) """ active_variable_properties = [] if 'variable_properties[]' in request.POST: active_variable_properties = request.POST.getlist('variable_properties[]') timestamp_from = time.time() if 'timestamp_from' in request.POST: timestamp_from = float(request.POST['timestamp_from']) / 1000.0 timestamp_to = time.time() if 'timestamp_to' in request.POST: timestamp_to = min(timestamp_to, float(request.POST['timestamp_to']) / 1000.0) if timestamp_to == 0: timestamp_to = time.time() if timestamp_from == 0: timestamp_from == time.time() - 60 if timestamp_to - timestamp_from > 120 * 60: timestamp_from = timestamp_to - 120 * 60 #if not init: #timestamp_to = min(timestamp_from + 30, timestamp_to) if len(active_variables) > 0: data = RecordedData.objects.db_data( variable_ids=active_variables, time_min=timestamp_from, time_max=timestamp_to, time_in_ms=True, query_first_value=init) else: data = None if data is None: data = {} data['variable_properties'] = {} for item in VariableProperty.objects.filter(pk__in=active_variable_properties): data['variable_properties'][item.pk] = item.value() data["server_time"] = time.time() * 1000 return HttpResponse(json.dumps(data), content_type='application/json')
[ "def", "get_cache_data", "(", "request", ")", ":", "if", "'init'", "in", "request", ".", "POST", ":", "init", "=", "bool", "(", "float", "(", "request", ".", "POST", "[", "'init'", "]", ")", ")", "else", ":", "init", "=", "False", "active_variables", ...
else: active_variables = list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'charts__variables', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'xy_charts__variables', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'control_items__variable', flat=True)) active_variables += list( GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list( 'custom_html_panels__variables', flat=True)) active_variables = list(set(active_variables))
[ "else", ":", "active_variables", "=", "list", "(", "GroupDisplayPermission", ".", "objects", ".", "filter", "(", "hmi_group__in", "=", "request", ".", "user", ".", "groups", ".", "iterator", "()", ")", ".", "values_list", "(", "charts__variables", "flat", "=",...
python
train
35.557143
dbcli/athenacli
athenacli/sqlexecute.py
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/sqlexecute.py#L84-L98
def get_result(self, cursor): '''Get the current result's data from the cursor.''' title = headers = None # cursor.description is not None for queries that return result sets, # e.g. SELECT or SHOW. if cursor.description is not None: headers = [x[0] for x in cursor.description] rows = cursor.fetchall() status = '%d row%s in set' % (len(rows), '' if len(rows) == 1 else 's') else: logger.debug('No rows in result.') rows = None status = 'Query OK' return (title, rows, headers, status)
[ "def", "get_result", "(", "self", ",", "cursor", ")", ":", "title", "=", "headers", "=", "None", "# cursor.description is not None for queries that return result sets,", "# e.g. SELECT or SHOW.", "if", "cursor", ".", "description", "is", "not", "None", ":", "headers", ...
Get the current result's data from the cursor.
[ "Get", "the", "current", "result", "s", "data", "from", "the", "cursor", "." ]
python
train
40.066667
openstax/cnx-archive
cnxarchive/__init__.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/__init__.py#L137-L159
def main(global_config, **settings): """Main WSGI application factory.""" initialize_sentry_integration() config = Configurator(settings=settings) declare_api_routes(config) declare_type_info(config) # allowing the pyramid templates to render rss and xml config.include('pyramid_jinja2') config.add_jinja2_renderer('.rss') config.add_jinja2_renderer('.xml') mandatory_settings = ['exports-directories', 'exports-allowable-types'] for setting in mandatory_settings: if not settings.get(setting, None): raise ValueError('Missing {} config setting.'.format(setting)) config.scan(ignore='.tests') config.include('cnxarchive.events.main') config.add_tween('cnxarchive.tweens.conditional_http_tween_factory') return config.make_wsgi_app()
[ "def", "main", "(", "global_config", ",", "*", "*", "settings", ")", ":", "initialize_sentry_integration", "(", ")", "config", "=", "Configurator", "(", "settings", "=", "settings", ")", "declare_api_routes", "(", "config", ")", "declare_type_info", "(", "config...
Main WSGI application factory.
[ "Main", "WSGI", "application", "factory", "." ]
python
train
34.565217
StanfordBioinformatics/loom
client/loomengine/playbooks/files/gcloud_utils.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/client/loomengine/playbooks/files/gcloud_utils.py#L11-L24
def on_gcloud_vm(): """ Determines if we're running on a GCE instance.""" r = None try: r = requests.get('http://metadata.google.internal') except requests.ConnectionError: return False try: if r.headers['Metadata-Flavor'] == 'Google' and \ r.headers['Server'] == 'Metadata Server for VM': return True except KeyError: return False
[ "def", "on_gcloud_vm", "(", ")", ":", "r", "=", "None", "try", ":", "r", "=", "requests", ".", "get", "(", "'http://metadata.google.internal'", ")", "except", "requests", ".", "ConnectionError", ":", "return", "False", "try", ":", "if", "r", ".", "headers"...
Determines if we're running on a GCE instance.
[ "Determines", "if", "we", "re", "running", "on", "a", "GCE", "instance", "." ]
python
train
28.428571
soravux/scoop
bench/process_debug.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/bench/process_debug.py#L41-L49
def getWorkersName(data): """Returns the list of the names of the workers sorted alphabetically""" names = [fichier for fichier in data.keys()] names.sort() try: names.remove("broker") except ValueError: pass return names
[ "def", "getWorkersName", "(", "data", ")", ":", "names", "=", "[", "fichier", "for", "fichier", "in", "data", ".", "keys", "(", ")", "]", "names", ".", "sort", "(", ")", "try", ":", "names", ".", "remove", "(", "\"broker\"", ")", "except", "ValueErro...
Returns the list of the names of the workers sorted alphabetically
[ "Returns", "the", "list", "of", "the", "names", "of", "the", "workers", "sorted", "alphabetically" ]
python
train
28.111111
raiden-network/raiden
raiden/utils/signer.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/utils/signer.py#L83-L90
def sign(self, data: bytes, v: int = 27) -> Signature: """ Sign data hash with local private key """ assert v in (0, 27), 'Raiden is only signing messages with v in (0, 27)' _hash = eth_sign_sha3(data) signature = self.private_key.sign_msg_hash(message_hash=_hash) sig_bytes = signature.to_bytes() # adjust last byte to v return sig_bytes[:-1] + bytes([sig_bytes[-1] + v])
[ "def", "sign", "(", "self", ",", "data", ":", "bytes", ",", "v", ":", "int", "=", "27", ")", "->", "Signature", ":", "assert", "v", "in", "(", "0", ",", "27", ")", ",", "'Raiden is only signing messages with v in (0, 27)'", "_hash", "=", "eth_sign_sha3", ...
Sign data hash with local private key
[ "Sign", "data", "hash", "with", "local", "private", "key" ]
python
train
52.625
timothydmorton/simpledist
simpledist/kde.py
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/kde.py#L235-L254
def newton(f,c,tol=0.0001,restrict=None): """ newton(f,c) --> float Returns the x closest to c such that f(x) = 0 """ #print(c) if restrict: lo,hi = restrict if c < lo or c > hi: print(c) c = random*(hi-lo)+lo if fuzzyequals(f(c),0,tol): return c else: try: return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict) except: return None
[ "def", "newton", "(", "f", ",", "c", ",", "tol", "=", "0.0001", ",", "restrict", "=", "None", ")", ":", "#print(c)", "if", "restrict", ":", "lo", ",", "hi", "=", "restrict", "if", "c", "<", "lo", "or", "c", ">", "hi", ":", "print", "(", "c", ...
newton(f,c) --> float Returns the x closest to c such that f(x) = 0
[ "newton", "(", "f", "c", ")", "--", ">", "float", "Returns", "the", "x", "closest", "to", "c", "such", "that", "f", "(", "x", ")", "=", "0" ]
python
train
21.8
scheibler/khard
khard/khard.py
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/khard.py#L921-L982
def phone_subcommand(search_terms, vcard_list, parsable): """Print a phone application friendly contact table. :param search_terms: used as search term to filter the contacts before printing :type search_terms: str :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ all_phone_numbers_list = [] matching_phone_number_list = [] for vcard in vcard_list: for type, number_list in sorted(vcard.get_phone_numbers().items(), key=lambda k: k[0].lower()): for number in sorted(number_list): if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() # create output lines line_formatted = "\t".join([name, type, number]) line_parsable = "\t".join([number, name, type]) if parsable: # parsable option: start with phone number phone_number_line = line_parsable else: # else: start with name phone_number_line = line_formatted if re.search(search_terms, "%s\n%s" % (line_formatted, line_parsable), re.IGNORECASE | re.DOTALL): matching_phone_number_list.append(phone_number_line) elif len(re.sub("\D", "", search_terms)) >= 3: # The user likely searches for a phone number cause the # search string contains at least three digits. So we # remove all non-digit chars from the phone number field # and match against that. if re.search(re.sub("\D", "", search_terms), re.sub("\D", "", number), re.IGNORECASE): matching_phone_number_list.append(phone_number_line) # collect all phone numbers in a different list as fallback all_phone_numbers_list.append(phone_number_line) if matching_phone_number_list: if parsable: print('\n'.join(matching_phone_number_list)) else: list_phone_numbers(matching_phone_number_list) elif all_phone_numbers_list: if parsable: print('\n'.join(all_phone_numbers_list)) else: list_phone_numbers(all_phone_numbers_list) else: if not parsable: print("Found no phone numbers") sys.exit(1)
[ "def", "phone_subcommand", "(", "search_terms", ",", "vcard_list", ",", "parsable", ")", ":", "all_phone_numbers_list", "=", "[", "]", "matching_phone_number_list", "=", "[", "]", "for", "vcard", "in", "vcard_list", ":", "for", "type", ",", "number_list", "in", ...
Print a phone application friendly contact table. :param search_terms: used as search term to filter the contacts before printing :type search_terms: str :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None
[ "Print", "a", "phone", "application", "friendly", "contact", "table", "." ]
python
test
45.274194
coleifer/irc
irc.py
https://github.com/coleifer/irc/blob/f9d2bd6369aafe6cb0916c9406270ca8ecea2080/irc.py#L146-L168
def dispatch_patterns(self): """\ Low-level dispatching of socket data based on regex matching, in general handles * In event a nickname is taken, registers under a different one * Responds to periodic PING messages from server * Dispatches to registered callbacks when - any user leaves or enters a room currently connected to - a channel message is observed - a private message is received """ return ( (self.nick_re, self.new_nick), (self.nick_change_re, self.handle_nick_change), (self.ping_re, self.handle_ping), (self.part_re, self.handle_part), (self.join_re, self.handle_join), (self.quit_re, self.handle_quit), (self.chanmsg_re, self.handle_channel_message), (self.privmsg_re, self.handle_private_message), (self.registered_re, self.handle_registered), )
[ "def", "dispatch_patterns", "(", "self", ")", ":", "return", "(", "(", "self", ".", "nick_re", ",", "self", ".", "new_nick", ")", ",", "(", "self", ".", "nick_change_re", ",", "self", ".", "handle_nick_change", ")", ",", "(", "self", ".", "ping_re", ",...
\ Low-level dispatching of socket data based on regex matching, in general handles * In event a nickname is taken, registers under a different one * Responds to periodic PING messages from server * Dispatches to registered callbacks when - any user leaves or enters a room currently connected to - a channel message is observed - a private message is received
[ "\\", "Low", "-", "level", "dispatching", "of", "socket", "data", "based", "on", "regex", "matching", "in", "general", "handles" ]
python
test
41.652174
numba/llvmlite
llvmlite/binding/targets.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/targets.py#L187-L198
def from_triple(cls, triple): """ Create a Target instance for the given triple (a string). """ with ffi.OutputString() as outerr: target = ffi.lib.LLVMPY_GetTargetFromTriple(triple.encode('utf8'), outerr) if not target: raise RuntimeError(str(outerr)) target = cls(target) target._triple = triple return target
[ "def", "from_triple", "(", "cls", ",", "triple", ")", ":", "with", "ffi", ".", "OutputString", "(", ")", "as", "outerr", ":", "target", "=", "ffi", ".", "lib", ".", "LLVMPY_GetTargetFromTriple", "(", "triple", ".", "encode", "(", "'utf8'", ")", ",", "o...
Create a Target instance for the given triple (a string).
[ "Create", "a", "Target", "instance", "for", "the", "given", "triple", "(", "a", "string", ")", "." ]
python
train
38.666667
aiogram/aiogram
aiogram/utils/executor.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/executor.py#L294-L313
def start_polling(self, reset_webhook=None, timeout=20, fast=True): """ Start bot in long-polling mode :param reset_webhook: :param timeout: """ self._prepare_polling() loop: asyncio.AbstractEventLoop = self.loop try: loop.run_until_complete(self._startup_polling()) loop.create_task(self.dispatcher.start_polling(reset_webhook=reset_webhook, timeout=timeout, fast=fast)) loop.run_forever() except (KeyboardInterrupt, SystemExit): # loop.stop() pass finally: loop.run_until_complete(self._shutdown_polling()) log.warning("Goodbye!")
[ "def", "start_polling", "(", "self", ",", "reset_webhook", "=", "None", ",", "timeout", "=", "20", ",", "fast", "=", "True", ")", ":", "self", ".", "_prepare_polling", "(", ")", "loop", ":", "asyncio", ".", "AbstractEventLoop", "=", "self", ".", "loop", ...
Start bot in long-polling mode :param reset_webhook: :param timeout:
[ "Start", "bot", "in", "long", "-", "polling", "mode" ]
python
train
33.95
secdev/scapy
scapy/contrib/isotp.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/isotp.py#L602-L610
def begin_send(self, p): """Begin the transmission of message p. This method returns after sending the first frame. If multiple frames are necessary to send the message, this socket will unable to send other messages until either the transmission of this frame succeeds or it fails.""" if hasattr(p, "sent_time"): p.sent_time = time.time() return self.outs.begin_send(bytes(p))
[ "def", "begin_send", "(", "self", ",", "p", ")", ":", "if", "hasattr", "(", "p", ",", "\"sent_time\"", ")", ":", "p", ".", "sent_time", "=", "time", ".", "time", "(", ")", "return", "self", ".", "outs", ".", "begin_send", "(", "bytes", "(", "p", ...
Begin the transmission of message p. This method returns after sending the first frame. If multiple frames are necessary to send the message, this socket will unable to send other messages until either the transmission of this frame succeeds or it fails.
[ "Begin", "the", "transmission", "of", "message", "p", ".", "This", "method", "returns", "after", "sending", "the", "first", "frame", ".", "If", "multiple", "frames", "are", "necessary", "to", "send", "the", "message", "this", "socket", "will", "unable", "to"...
python
train
47.777778
sbg/sevenbridges-python
sevenbridges/models/dataset.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/dataset.py#L144-L162
def add_member(self, username, permissions, api=None): """Add member to a dataset :param username: Member username :param permissions: Permissions dict :param api: Api instance :return: New member instance """ api = api or self._API data = { 'username': username, 'permissions': permissions } response = api.post( url=self._URL['members'].format(id=self.id), data=data ) data = response.json() return Member(api=api, **data)
[ "def", "add_member", "(", "self", ",", "username", ",", "permissions", ",", "api", "=", "None", ")", ":", "api", "=", "api", "or", "self", ".", "_API", "data", "=", "{", "'username'", ":", "username", ",", "'permissions'", ":", "permissions", "}", "res...
Add member to a dataset :param username: Member username :param permissions: Permissions dict :param api: Api instance :return: New member instance
[ "Add", "member", "to", "a", "dataset", ":", "param", "username", ":", "Member", "username", ":", "param", "permissions", ":", "Permissions", "dict", ":", "param", "api", ":", "Api", "instance", ":", "return", ":", "New", "member", "instance" ]
python
train
29.421053
secdev/scapy
scapy/arch/windows/__init__.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/windows/__init__.py#L174-L183
def _exec_cmd(command): """Call a CMD command and return the output and returncode""" proc = sp.Popen(command, stdout=sp.PIPE, shell=True) if six.PY2: res = proc.communicate()[0] else: res = proc.communicate(timeout=5)[0] return res, proc.returncode
[ "def", "_exec_cmd", "(", "command", ")", ":", "proc", "=", "sp", ".", "Popen", "(", "command", ",", "stdout", "=", "sp", ".", "PIPE", ",", "shell", "=", "True", ")", "if", "six", ".", "PY2", ":", "res", "=", "proc", ".", "communicate", "(", ")", ...
Call a CMD command and return the output and returncode
[ "Call", "a", "CMD", "command", "and", "return", "the", "output", "and", "returncode" ]
python
train
31.6
noahmorrison/chevron
chevron/tokenizer.py
https://github.com/noahmorrison/chevron/blob/78f1a384eddef16906732d8db66deea6d37049b7/chevron/tokenizer.py#L31-L44
def l_sa_check(template, literal, is_standalone): """Do a preliminary check to see if a tag could be a standalone""" # If there is a newline, or the previous tag was a standalone if literal.find('\n') != -1 or is_standalone: padding = literal.split('\n')[-1] # If all the characters since the last newline are spaces if padding.isspace() or padding == '': # Then the next tag could be a standalone return True else: # Otherwise it can't be return False
[ "def", "l_sa_check", "(", "template", ",", "literal", ",", "is_standalone", ")", ":", "# If there is a newline, or the previous tag was a standalone", "if", "literal", ".", "find", "(", "'\\n'", ")", "!=", "-", "1", "or", "is_standalone", ":", "padding", "=", "lit...
Do a preliminary check to see if a tag could be a standalone
[ "Do", "a", "preliminary", "check", "to", "see", "if", "a", "tag", "could", "be", "a", "standalone" ]
python
train
38.071429
attakei/errcron
errcron/bot.py
https://github.com/attakei/errcron/blob/a3938fc7d051daefb6813588fcbeb9592bd00c9a/errcron/bot.py#L53-L63
def poll_crontab(self): """Check crontab and run target jobs """ polled_time = self._get_current_time() if polled_time.second >= 30: self.log.debug('Skip cronjobs in {}'.format(polled_time)) return for job in self._crontab: if not job.is_runnable(polled_time): continue job.do_action(self, polled_time)
[ "def", "poll_crontab", "(", "self", ")", ":", "polled_time", "=", "self", ".", "_get_current_time", "(", ")", "if", "polled_time", ".", "second", ">=", "30", ":", "self", ".", "log", ".", "debug", "(", "'Skip cronjobs in {}'", ".", "format", "(", "polled_t...
Check crontab and run target jobs
[ "Check", "crontab", "and", "run", "target", "jobs" ]
python
train
36
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3769-L3783
def get_tags_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get tags of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_TAGS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_tags_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_TAGS", ",", "per_page", "=", "...
Get tags of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "tags", "of", "delivery", "note", "per", "page" ]
python
train
35.6
gitpython-developers/GitPython
git/cmd.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/cmd.py#L866-L882
def custom_environment(self, **kwargs): """ A context manager around the above ``update_environment`` method to restore the environment back to its previous state after operation. ``Examples``:: with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'): repo.remotes.origin.fetch() :param kwargs: see update_environment """ old_env = self.update_environment(**kwargs) try: yield finally: self.update_environment(**old_env)
[ "def", "custom_environment", "(", "self", ",", "*", "*", "kwargs", ")", ":", "old_env", "=", "self", ".", "update_environment", "(", "*", "*", "kwargs", ")", "try", ":", "yield", "finally", ":", "self", ".", "update_environment", "(", "*", "*", "old_env"...
A context manager around the above ``update_environment`` method to restore the environment back to its previous state after operation. ``Examples``:: with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'): repo.remotes.origin.fetch() :param kwargs: see update_environment
[ "A", "context", "manager", "around", "the", "above", "update_environment", "method", "to", "restore", "the", "environment", "back", "to", "its", "previous", "state", "after", "operation", "." ]
python
train
31.294118
astropy/photutils
photutils/background/background_2d.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/background/background_2d.py#L412-L453
def _select_meshes(self, data): """ Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices. """ # the number of masked pixels in each mesh nmasked = np.ma.count_masked(data, axis=1) # meshes that contain more than ``exclude_percentile`` percent # masked pixels are excluded: # - for exclude_percentile=0, good meshes will be only where # nmasked=0 # - meshes where nmasked=self.box_npixels are *always* excluded # (second conditional needed for exclude_percentile=100) threshold_npixels = self.exclude_percentile / 100. * self.box_npixels mesh_idx = np.where((nmasked <= threshold_npixels) & (nmasked != self.box_npixels))[0] # good meshes if len(mesh_idx) == 0: raise ValueError('All meshes contain > {0} ({1} percent per ' 'mesh) masked pixels. Please check your data ' 'or decrease "exclude_percentile".' .format(threshold_npixels, self.exclude_percentile)) return mesh_idx
[ "def", "_select_meshes", "(", "self", ",", "data", ")", ":", "# the number of masked pixels in each mesh", "nmasked", "=", "np", ".", "ma", ".", "count_masked", "(", "data", ",", "axis", "=", "1", ")", "# meshes that contain more than ``exclude_percentile`` percent", ...
Define the x and y indices with respect to the low-resolution mesh image of the meshes to use for the background interpolation. The ``exclude_percentile`` keyword determines which meshes are not used for the background interpolation. Parameters ---------- data : 2D `~numpy.ma.MaskedArray` A 2D array where the y dimension represents each mesh and the x dimension represents the data in each mesh. Returns ------- mesh_idx : 1D `~numpy.ndarray` The 1D mesh indices.
[ "Define", "the", "x", "and", "y", "indices", "with", "respect", "to", "the", "low", "-", "resolution", "mesh", "image", "of", "the", "meshes", "to", "use", "for", "the", "background", "interpolation", "." ]
python
train
39.714286
spacetelescope/pysynphot
pysynphot/spectrum.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/spectrum.py#L699-L714
def setMagnitude(self, band, value): """Makes the magnitude of the source in the band equal to value. band is a SpectralElement. This method is marked for deletion once the .renorm method is well tested. Object returned is a CompositeSourceSpectrum. .. warning:: DO NOT USED """ objectFlux = band.calcTotalFlux(self) vegaFlux = band.calcVegaFlux() magDiff = -2.5*math.log10(objectFlux/vegaFlux) factor = 10**(-0.4*(value - magDiff)) return self * factor
[ "def", "setMagnitude", "(", "self", ",", "band", ",", "value", ")", ":", "objectFlux", "=", "band", ".", "calcTotalFlux", "(", "self", ")", "vegaFlux", "=", "band", ".", "calcVegaFlux", "(", ")", "magDiff", "=", "-", "2.5", "*", "math", ".", "log10", ...
Makes the magnitude of the source in the band equal to value. band is a SpectralElement. This method is marked for deletion once the .renorm method is well tested. Object returned is a CompositeSourceSpectrum. .. warning:: DO NOT USED
[ "Makes", "the", "magnitude", "of", "the", "source", "in", "the", "band", "equal", "to", "value", ".", "band", "is", "a", "SpectralElement", ".", "This", "method", "is", "marked", "for", "deletion", "once", "the", ".", "renorm", "method", "is", "well", "t...
python
train
33.5625
welbornprod/colr
examples/walk_dir.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/examples/walk_dir.py#L124-L167
def walk_dir_progress(path, maxdircnt=5000, file=sys.stdout): """ Walk a directory, printing status updates along the way. """ p = ProgressBar( 'Walking {}'.format(C(path, 'cyan')), bars=Bars.numbers_blue.with_wrapper(('(', ')')), show_time=True, file=file, ) rootcnt = 0 print('\nStarting progress bar...') p.start() for root, dirs, files in os.walk(path): rootcnt += 1 if rootcnt % 100 == 0: p.update( percent=min((rootcnt / maxdircnt) * 100, 100), text='Walking {}...'.format( C(os.path.split(root)[-1], 'cyan'), ) ) if rootcnt > maxdircnt: # Stop is called because we are printing before the # AnimatedProgress is finished running. p.stop() print( '\nFinished walking {} directories.'.format( C(maxdircnt, 'blue', style='bright') ), file=file, ) break else: # AnimatedProgress still running, `stop` it before printing. p.stop() print_err( '\nNever made it to {} directories ({}).'.format( C(maxdircnt, 'blue', style='bright'), C(rootcnt, 'red', style='bright'), ) ) print('\nFinished with progress bar.') return 0
[ "def", "walk_dir_progress", "(", "path", ",", "maxdircnt", "=", "5000", ",", "file", "=", "sys", ".", "stdout", ")", ":", "p", "=", "ProgressBar", "(", "'Walking {}'", ".", "format", "(", "C", "(", "path", ",", "'cyan'", ")", ")", ",", "bars", "=", ...
Walk a directory, printing status updates along the way.
[ "Walk", "a", "directory", "printing", "status", "updates", "along", "the", "way", "." ]
python
train
31.636364
gem/oq-engine
openquake/commonlib/oqvalidation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/oqvalidation.py#L468-L472
def lti(self): """ Dictionary extended_loss_type -> extended_loss_type index """ return {lt: i for i, (lt, dt) in enumerate(self.loss_dt_list())}
[ "def", "lti", "(", "self", ")", ":", "return", "{", "lt", ":", "i", "for", "i", ",", "(", "lt", ",", "dt", ")", "in", "enumerate", "(", "self", ".", "loss_dt_list", "(", ")", ")", "}" ]
Dictionary extended_loss_type -> extended_loss_type index
[ "Dictionary", "extended_loss_type", "-", ">", "extended_loss_type", "index" ]
python
train
34.6
dropbox/stone
stone/frontend/ir_generator.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L552-L577
def _create_type(self, env, item): """Create a forward reference for a union or struct.""" if item.name in env: existing_dt = env[item.name] raise InvalidSpec( 'Symbol %s already defined (%s:%d).' % (quote(item.name), existing_dt._ast_node.path, existing_dt._ast_node.lineno), item.lineno, item.path) namespace = self.api.ensure_namespace(env.namespace_name) if isinstance(item, AstStructDef): try: api_type = Struct(name=item.name, namespace=namespace, ast_node=item) except ParameterError as e: raise InvalidSpec( 'Bad declaration of %s: %s' % (quote(item.name), e.args[0]), item.lineno, item.path) elif isinstance(item, AstUnionDef): api_type = Union( name=item.name, namespace=namespace, ast_node=item, closed=item.closed) else: raise AssertionError('Unknown type definition %r' % type(item)) env[item.name] = api_type return api_type
[ "def", "_create_type", "(", "self", ",", "env", ",", "item", ")", ":", "if", "item", ".", "name", "in", "env", ":", "existing_dt", "=", "env", "[", "item", ".", "name", "]", "raise", "InvalidSpec", "(", "'Symbol %s already defined (%s:%d).'", "%", "(", "...
Create a forward reference for a union or struct.
[ "Create", "a", "forward", "reference", "for", "a", "union", "or", "struct", "." ]
python
train
43.730769
7sDream/zhihu-py3
zhihu/author.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/author.py#L66-L73
def id(self): """获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str """ return re.match(r'^.*/([^/]+)/$', self.url).group(1) \ if self.url is not None else ''
[ "def", "id", "(", "self", ")", ":", "return", "re", ".", "match", "(", "r'^.*/([^/]+)/$'", ",", "self", ".", "url", ")", ".", "group", "(", "1", ")", "if", "self", ".", "url", "is", "not", "None", "else", "''" ]
获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str
[ "获取用户id,就是网址最后那一部分", "." ]
python
train
24.75
neo4j-contrib/neomodel
neomodel/core.py
https://github.com/neo4j-contrib/neomodel/blob/cca5de4c4e90998293558b871b1b529095c91a38/neomodel/core.py#L50-L65
def remove_all_labels(stdout=None): """ Calls functions for dropping constraints and indexes. :param stdout: output stream :return: None """ if not stdout: stdout = sys.stdout stdout.write("Droping constraints...\n") drop_constraints(quiet=False, stdout=stdout) stdout.write('Droping indexes...\n') drop_indexes(quiet=False, stdout=stdout)
[ "def", "remove_all_labels", "(", "stdout", "=", "None", ")", ":", "if", "not", "stdout", ":", "stdout", "=", "sys", ".", "stdout", "stdout", ".", "write", "(", "\"Droping constraints...\\n\"", ")", "drop_constraints", "(", "quiet", "=", "False", ",", "stdout...
Calls functions for dropping constraints and indexes. :param stdout: output stream :return: None
[ "Calls", "functions", "for", "dropping", "constraints", "and", "indexes", "." ]
python
train
23.5
dtmilano/AndroidViewClient
src/com/dtmilano/android/culebron.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/culebron.py#L305-L386
def takeScreenshotAndShowItOnWindow(self): ''' Takes the current screenshot and shows it on the main window. It also: - sizes the window - create the canvas - set the focus - enable the events - create widgets - finds the targets (as explained in L{findTargets}) - hides the vignette (that could have been showed before) ''' if PROFILE: print >> sys.stderr, "PROFILING: takeScreenshotAndShowItOnWindow()" profileStart() if DEBUG: print >> sys.stderr, "takeScreenshotAndShowItOnWindow()" if self.vc and self.vc.uiAutomatorHelper: received = self.vc.uiAutomatorHelper.takeScreenshot() stream = StringIO.StringIO(received) self.unscaledScreenshot = Image.open(stream) else: self.unscaledScreenshot = self.device.takeSnapshot(reconnect=True) self.image = self.unscaledScreenshot (width, height) = self.image.size if self.scale != 1: scaledWidth = int(width * self.scale) scaledHeight = int(height * self.scale) self.image = self.image.resize((scaledWidth, scaledHeight), PIL.Image.ANTIALIAS) (width, height) = self.image.size if self.isDarwin and 14 < self.sdkVersion < 23: stream = StringIO.StringIO() self.image.save(stream, 'GIF') import base64 gif = base64.b64encode(stream.getvalue()) stream.close() if self.canvas is None: if DEBUG: print >> sys.stderr, "Creating canvas", width, 'x', height self.placeholder.grid_forget() self.canvas = Tkinter.Canvas(self.mainFrame, width=width, height=height) self.canvas.focus_set() self.enableEvents() self.createMessageArea(width, height) self.createVignette(width, height) if self.isDarwin and self.scale != 1 and 14 < self.sdkVersion < 23: # Extremely weird Tkinter bug, I guess # If the image was rotated and then resized if ImageTk.PhotoImage(self.image) # is used as usual then the result is a completely transparent image and only # the "Please wait..." is seen. # Converting it to GIF seems to solve the problem self.screenshot = Tkinter.PhotoImage(data=gif) else: self.screenshot = ImageTk.PhotoImage(self.image) if self.imageId is not None: self.canvas.delete(self.imageId) self.imageId = self.canvas.create_image(0, 0, anchor=Tkinter.NW, image=self.screenshot) if DEBUG: try: print >> sys.stderr, "Grid info", self.canvas.grid_info() except: print >> sys.stderr, "Exception getting grid info" gridInfo = None try: gridInfo = self.canvas.grid_info() except: if DEBUG: print >> sys.stderr, "Adding canvas to grid (1,1)" self.canvas.grid(row=1, column=1, rowspan=4) if not gridInfo: self.canvas.grid(row=1, column=1, rowspan=4) self.findTargets() self.hideVignette() if DEBUG: try: self.printGridInfo() except: pass if PROFILE: profileEnd()
[ "def", "takeScreenshotAndShowItOnWindow", "(", "self", ")", ":", "if", "PROFILE", ":", "print", ">>", "sys", ".", "stderr", ",", "\"PROFILING: takeScreenshotAndShowItOnWindow()\"", "profileStart", "(", ")", "if", "DEBUG", ":", "print", ">>", "sys", ".", "stderr", ...
Takes the current screenshot and shows it on the main window. It also: - sizes the window - create the canvas - set the focus - enable the events - create widgets - finds the targets (as explained in L{findTargets}) - hides the vignette (that could have been showed before)
[ "Takes", "the", "current", "screenshot", "and", "shows", "it", "on", "the", "main", "window", ".", "It", "also", ":", "-", "sizes", "the", "window", "-", "create", "the", "canvas", "-", "set", "the", "focus", "-", "enable", "the", "events", "-", "creat...
python
train
41.219512
googledatalab/pydatalab
datalab/storage/_item.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L130-L137
def exists(self): """ Checks if the item exists. """ try: return self.metadata is not None except datalab.utils.RequestException: return False except Exception as e: raise e
[ "def", "exists", "(", "self", ")", ":", "try", ":", "return", "self", ".", "metadata", "is", "not", "None", "except", "datalab", ".", "utils", ".", "RequestException", ":", "return", "False", "except", "Exception", "as", "e", ":", "raise", "e" ]
Checks if the item exists.
[ "Checks", "if", "the", "item", "exists", "." ]
python
train
25
astropy/photutils
photutils/aperture/mask.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/mask.py#L210-L241
def multiply(self, data, fill_value=0.): """ Multiply the aperture mask with the input data, taking any edge effects into account. The result is a mask-weighted cutout from the data. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array to multiply with the aperture mask. fill_value : float, optional The value is used to fill pixels where the aperture mask does not overlap with the input ``data``. The default is 0. Returns ------- result : `~numpy.ndarray` A 2D mask-weighted cutout from the input ``data``. If there is a partial overlap of the aperture mask with the input data, pixels outside of the data will be assigned to ``fill_value`` before being multipled with the mask. `None` is returned if there is no overlap of the aperture with the input ``data``. """ cutout = self.cutout(data, fill_value=fill_value) if cutout is None: return None else: return cutout * self.data
[ "def", "multiply", "(", "self", ",", "data", ",", "fill_value", "=", "0.", ")", ":", "cutout", "=", "self", ".", "cutout", "(", "data", ",", "fill_value", "=", "fill_value", ")", "if", "cutout", "is", "None", ":", "return", "None", "else", ":", "retu...
Multiply the aperture mask with the input data, taking any edge effects into account. The result is a mask-weighted cutout from the data. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array to multiply with the aperture mask. fill_value : float, optional The value is used to fill pixels where the aperture mask does not overlap with the input ``data``. The default is 0. Returns ------- result : `~numpy.ndarray` A 2D mask-weighted cutout from the input ``data``. If there is a partial overlap of the aperture mask with the input data, pixels outside of the data will be assigned to ``fill_value`` before being multipled with the mask. `None` is returned if there is no overlap of the aperture with the input ``data``.
[ "Multiply", "the", "aperture", "mask", "with", "the", "input", "data", "taking", "any", "edge", "effects", "into", "account", "." ]
python
train
35.625
uralbash/sqlalchemy_mptt
sqlalchemy_mptt/mixins.py
https://github.com/uralbash/sqlalchemy_mptt/blob/2971c9fa49bbeaa3e2fb96229d933ceae450b319/sqlalchemy_mptt/mixins.py#L153-L164
def move_inside(self, parent_id): """ Moving one node of tree inside another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function` """ # noqa session = Session.object_session(self) self.parent_id = parent_id self.mptt_move_inside = parent_id session.add(self)
[ "def", "move_inside", "(", "self", ",", "parent_id", ")", ":", "# noqa", "session", "=", "Session", ".", "object_session", "(", "self", ")", "self", ".", "parent_id", "=", "parent_id", "self", ".", "mptt_move_inside", "=", "parent_id", "session", ".", "add",...
Moving one node of tree inside another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function`
[ "Moving", "one", "node", "of", "tree", "inside", "another" ]
python
train
37.583333
LPgenerator/django-db-mailer
dbmail/providers/pubnub/push.py
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/pubnub/push.py#L11-L27
def send(channel, message, **kwargs): """ Site: http://www.pubnub.com/ API: https://www.mashape.com/pubnub/pubnub-network Desc: real-time browser notifications Installation and usage: pip install -U pubnub Tests for browser notification http://127.0.0.1:8000/browser_notification/ """ pubnub = Pubnub( publish_key=settings.PUBNUB_PUB_KEY, subscribe_key=settings.PUBNUB_SUB_KEY, secret_key=settings.PUBNUB_SEC_KEY, ssl_on=kwargs.pop('ssl_on', False), **kwargs) return pubnub.publish(channel=channel, message={"text": message})
[ "def", "send", "(", "channel", ",", "message", ",", "*", "*", "kwargs", ")", ":", "pubnub", "=", "Pubnub", "(", "publish_key", "=", "settings", ".", "PUBNUB_PUB_KEY", ",", "subscribe_key", "=", "settings", ".", "PUBNUB_SUB_KEY", ",", "secret_key", "=", "se...
Site: http://www.pubnub.com/ API: https://www.mashape.com/pubnub/pubnub-network Desc: real-time browser notifications Installation and usage: pip install -U pubnub Tests for browser notification http://127.0.0.1:8000/browser_notification/
[ "Site", ":", "http", ":", "//", "www", ".", "pubnub", ".", "com", "/", "API", ":", "https", ":", "//", "www", ".", "mashape", ".", "com", "/", "pubnub", "/", "pubnub", "-", "network", "Desc", ":", "real", "-", "time", "browser", "notifications" ]
python
train
34.294118
gunthercox/ChatterBot
chatterbot/parsing.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L639-L652
def date_from_adverb(base_date, name): """ Convert Day adverbs to dates Tomorrow => Date Today => Date """ # Reset date to start of the day adverb_date = datetime(base_date.year, base_date.month, base_date.day) if name == 'today' or name == 'tonite' or name == 'tonight': return adverb_date.today() elif name == 'yesterday': return adverb_date - timedelta(days=1) elif name == 'tomorrow' or name == 'tom': return adverb_date + timedelta(days=1)
[ "def", "date_from_adverb", "(", "base_date", ",", "name", ")", ":", "# Reset date to start of the day", "adverb_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "if", "name", "==", "'toda...
Convert Day adverbs to dates Tomorrow => Date Today => Date
[ "Convert", "Day", "adverbs", "to", "dates", "Tomorrow", "=", ">", "Date", "Today", "=", ">", "Date" ]
python
train
35.357143
mezz64/pyEight
pyeight/user.py
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L258-L270
def current_resp_rate(self): """Return current respiratory rate for in-progress session.""" try: rates = self.intervals[0]['timeseries']['respiratoryRate'] num_rates = len(rates) if num_rates == 0: return None rate = rates[num_rates-1][1] except KeyError: rate = None return rate
[ "def", "current_resp_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "0", "]", "[", "'timeseries'", "]", "[", "'respiratoryRate'", "]", "num_rates", "=", "len", "(", "rates", ")", "if", "num_rates", "==", "0", ":"...
Return current respiratory rate for in-progress session.
[ "Return", "current", "respiratory", "rate", "for", "in", "-", "progress", "session", "." ]
python
train
29
Capitains/flask-capitains-nemo
flask_nemo/__init__.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L375-L402
def get_reffs(self, objectId, subreference=None, collection=None, export_collection=False): """ Retrieve and transform a list of references. Returns the inventory collection object with its metadata and a callback function taking a level parameter \ and returning a list of strings. :param objectId: Collection Identifier :type objectId: str :param subreference: Subreference from which to retrieve children :type subreference: str :param collection: Collection object bearing metadata :type collection: Collection :param export_collection: Return collection metadata :type export_collection: bool :return: Returns either the list of references, or the text collection object with its references as tuple :rtype: (Collection, [str]) or [str] """ if collection is not None: text = collection else: text = self.get_collection(objectId) reffs = self.chunk( text, lambda level: self.resolver.getReffs(objectId, level=level, subreference=subreference) ) if export_collection is True: return text, reffs return reffs
[ "def", "get_reffs", "(", "self", ",", "objectId", ",", "subreference", "=", "None", ",", "collection", "=", "None", ",", "export_collection", "=", "False", ")", ":", "if", "collection", "is", "not", "None", ":", "text", "=", "collection", "else", ":", "t...
Retrieve and transform a list of references. Returns the inventory collection object with its metadata and a callback function taking a level parameter \ and returning a list of strings. :param objectId: Collection Identifier :type objectId: str :param subreference: Subreference from which to retrieve children :type subreference: str :param collection: Collection object bearing metadata :type collection: Collection :param export_collection: Return collection metadata :type export_collection: bool :return: Returns either the list of references, or the text collection object with its references as tuple :rtype: (Collection, [str]) or [str]
[ "Retrieve", "and", "transform", "a", "list", "of", "references", "." ]
python
valid
43.035714
campaignmonitor/createsend-python
lib/createsend/campaign.py
https://github.com/campaignmonitor/createsend-python/blob/4bfe2fd5cb2fc9d8f12280b23569eea0a6c66426/lib/createsend/campaign.py#L52-L88
def create_from_template(self, client_id, subject, name, from_name, from_email, reply_to, list_ids, segment_ids, template_id, template_content): """Creates a new campaign for a client, from a template. :param client_id: String representing the ID of the client for whom the campaign will be created. :param subject: String representing the subject of the campaign. :param name: String representing the name of the campaign. :param from_name: String representing the from name for the campaign. :param from_email: String representing the from address for the campaign. :param reply_to: String representing the reply-to address for the campaign. :param list_ids: Array of Strings representing the IDs of the lists to which the campaign will be sent. :param segment_ids: Array of Strings representing the IDs of the segments to which the campaign will be sent. :param template_id: String representing the ID of the template on which the campaign will be based. :param template_content: Hash representing the content to be used for the editable areas of the template. See documentation at campaignmonitor.com/api/campaigns/#creating_a_campaign_from_template for full details of template content format. :returns String representing the ID of the newly created campaign. """ body = { "Subject": subject, "Name": name, "FromName": from_name, "FromEmail": from_email, "ReplyTo": reply_to, "ListIDs": list_ids, "SegmentIDs": segment_ids, "TemplateID": template_id, "TemplateContent": template_content} response = self._post("/campaigns/%s/fromtemplate.json" % client_id, json.dumps(body)) self.campaign_id = json_to_py(response) return self.campaign_id
[ "def", "create_from_template", "(", "self", ",", "client_id", ",", "subject", ",", "name", ",", "from_name", ",", "from_email", ",", "reply_to", ",", "list_ids", ",", "segment_ids", ",", "template_id", ",", "template_content", ")", ":", "body", "=", "{", "\"...
Creates a new campaign for a client, from a template. :param client_id: String representing the ID of the client for whom the campaign will be created. :param subject: String representing the subject of the campaign. :param name: String representing the name of the campaign. :param from_name: String representing the from name for the campaign. :param from_email: String representing the from address for the campaign. :param reply_to: String representing the reply-to address for the campaign. :param list_ids: Array of Strings representing the IDs of the lists to which the campaign will be sent. :param segment_ids: Array of Strings representing the IDs of the segments to which the campaign will be sent. :param template_id: String representing the ID of the template on which the campaign will be based. :param template_content: Hash representing the content to be used for the editable areas of the template. See documentation at campaignmonitor.com/api/campaigns/#creating_a_campaign_from_template for full details of template content format. :returns String representing the ID of the newly created campaign.
[ "Creates", "a", "new", "campaign", "for", "a", "client", "from", "a", "template", "." ]
python
train
53.567568
Nekmo/amazon-dash
amazon_dash/config.py
https://github.com/Nekmo/amazon-dash/blob/0e2bdc24ff8ea32cecb2f5f54f5cc1c0f99c197b/amazon_dash/config.py#L227-L240
def read(self): """Parse and validate the config file. The read data is accessible as a dictionary in this instance :return: None """ try: data = load(open(self.file), Loader) except (UnicodeDecodeError, YAMLError) as e: raise InvalidConfig(self.file, '{}'.format(e)) try: validate(data, SCHEMA) except ValidationError as e: raise InvalidConfig(self.file, e) self.update(data)
[ "def", "read", "(", "self", ")", ":", "try", ":", "data", "=", "load", "(", "open", "(", "self", ".", "file", ")", ",", "Loader", ")", "except", "(", "UnicodeDecodeError", ",", "YAMLError", ")", "as", "e", ":", "raise", "InvalidConfig", "(", "self", ...
Parse and validate the config file. The read data is accessible as a dictionary in this instance :return: None
[ "Parse", "and", "validate", "the", "config", "file", ".", "The", "read", "data", "is", "accessible", "as", "a", "dictionary", "in", "this", "instance" ]
python
test
34
DarkEnergySurvey/ugali
ugali/observation/mask.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L406-L421
def plotSolidAngleCMD(self): """ Solid angle within the mask as a function of color and magnitude. """ msg = "'%s.plotSolidAngleCMD': ADW 2018-05-05"%self.__class__.__name__ DeprecationWarning(msg) import ugali.utils.plotting ugali.utils.plotting.twoDimensionalHistogram('mask', 'color', 'magnitude', self.solid_angle_cmd, self.roi.bins_color, self.roi.bins_mag, lim_x = [self.roi.bins_color[0], self.roi.bins_color[-1]], lim_y = [self.roi.bins_mag[-1], self.roi.bins_mag[0]])
[ "def", "plotSolidAngleCMD", "(", "self", ")", ":", "msg", "=", "\"'%s.plotSolidAngleCMD': ADW 2018-05-05\"", "%", "self", ".", "__class__", ".", "__name__", "DeprecationWarning", "(", "msg", ")", "import", "ugali", ".", "utils", ".", "plotting", "ugali", ".", "u...
Solid angle within the mask as a function of color and magnitude.
[ "Solid", "angle", "within", "the", "mask", "as", "a", "function", "of", "color", "and", "magnitude", "." ]
python
train
57.1875
acorg/dark-matter
dark/reads.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/reads.py#L1323-L1352
def save(self, filename, format_='fasta'): """ Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}. """ format_ = format_.lower() count = 0 if isinstance(filename, str): try: with open(filename, 'w') as fp: for read in self: fp.write(read.toString(format_)) count += 1 except ValueError: unlink(filename) raise else: # We have a file-like object. for read in self: filename.write(read.toString(format_)) count += 1 return count
[ "def", "save", "(", "self", ",", "filename", ",", "format_", "=", "'fasta'", ")", ":", "format_", "=", "format_", ".", "lower", "(", ")", "count", "=", "0", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "try", ":", "with", "open", "(",...
Write the reads to C{filename} in the requested format. @param filename: Either a C{str} file name to save into (the file will be overwritten) or an open file descriptor (e.g., sys.stdout). @param format_: A C{str} format to save as, either 'fasta', 'fastq' or 'fasta-ss'. @raise ValueError: if C{format_} is 'fastq' and a read with no quality is present, or if an unknown format is requested. @return: An C{int} giving the number of reads in C{self}.
[ "Write", "the", "reads", "to", "C", "{", "filename", "}", "in", "the", "requested", "format", "." ]
python
train
37.566667
Gorialis/jishaku
jishaku/cog.py
https://github.com/Gorialis/jishaku/blob/fc7c479b9d510ede189a929c8aa6f7c8ef7f9a6e/jishaku/cog.py#L217-L238
async def jsk_cancel(self, ctx: commands.Context, *, index: int): """ Cancels a task with the given index. If the index passed is -1, will cancel the last task instead. """ if not self.tasks: return await ctx.send("No tasks to cancel.") if index == -1: task = self.tasks.pop() else: task = discord.utils.get(self.tasks, index=index) if task: self.tasks.remove(task) else: return await ctx.send("Unknown task.") task.task.cancel() return await ctx.send(f"Cancelled task {task.index}: `{task.ctx.command.qualified_name}`," f" invoked at {task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC")
[ "async", "def", "jsk_cancel", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ",", "*", ",", "index", ":", "int", ")", ":", "if", "not", "self", ".", "tasks", ":", "return", "await", "ctx", ".", "send", "(", "\"No tasks to cancel.\"", ")", ...
Cancels a task with the given index. If the index passed is -1, will cancel the last task instead.
[ "Cancels", "a", "task", "with", "the", "given", "index", "." ]
python
train
35.318182
gebn/wood
wood/comparison.py
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/comparison.py#L224-L236
def is_modified(self) -> bool: """ Find whether the files on the left and right are different. Note, modified implies the contents of the file have changed, which is predicated on the file existing on both the left and right. Therefore this will be false if the file on the left has been deleted, or the file on the right is new. :return: Whether the file has been modified. """ if self.is_new or self.is_deleted: return False return self.left.md5 != self.right.md5
[ "def", "is_modified", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "is_new", "or", "self", ".", "is_deleted", ":", "return", "False", "return", "self", ".", "left", ".", "md5", "!=", "self", ".", "right", ".", "md5" ]
Find whether the files on the left and right are different. Note, modified implies the contents of the file have changed, which is predicated on the file existing on both the left and right. Therefore this will be false if the file on the left has been deleted, or the file on the right is new. :return: Whether the file has been modified.
[ "Find", "whether", "the", "files", "on", "the", "left", "and", "right", "are", "different", ".", "Note", "modified", "implies", "the", "contents", "of", "the", "file", "have", "changed", "which", "is", "predicated", "on", "the", "file", "existing", "on", "...
python
train
42
elehcimd/pynb
notebooks/sum.py
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/notebooks/sum.py#L1-L11
def cells(a, b): ''' # Sum ''' a, b = int(a), int(b) ''' ''' a + b
[ "def", "cells", "(", "a", ",", "b", ")", ":", "a", ",", "b", "=", "int", "(", "a", ")", ",", "int", "(", "b", ")", "'''\n '''", "a", "+", "b" ]
# Sum
[ "#", "Sum" ]
python
train
7.909091
kgiusti/pyngus
pyngus/connection.py
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/pyngus/connection.py#L609-L620
def output_data(self): """Get a buffer of data that needs to be written to the network. """ c = self.has_output if c <= 0: return None try: buf = self._pn_transport.peek(c) except Exception as e: self._connection_failed(str(e)) return None return buf
[ "def", "output_data", "(", "self", ")", ":", "c", "=", "self", ".", "has_output", "if", "c", "<=", "0", ":", "return", "None", "try", ":", "buf", "=", "self", ".", "_pn_transport", ".", "peek", "(", "c", ")", "except", "Exception", "as", "e", ":", ...
Get a buffer of data that needs to be written to the network.
[ "Get", "a", "buffer", "of", "data", "that", "needs", "to", "be", "written", "to", "the", "network", "." ]
python
test
28.583333