repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
jobovy/galpy
galpy/util/bovy_coords.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L660-L716
def pmllpmbb_to_pmrapmdec(pmll,pmbb,l,b,degree=False,epoch=2000.0): """ NAME: pmllpmbb_to_pmrapmdec PURPOSE: rotate proper motions in (l,b) into proper motions in (ra,dec) INPUT: pmll - proper motion in l (multplied with cos(b)) [mas/yr] pmbb - proper motion in b [mas/yr] l - Galactic longitude b - Galactic lattitude degree - if True, l and b are given in degrees (default=False) epoch - epoch of ra,dec (right now only 2000.0 and 1950.0 are supported when not using astropy's transformations internally; when internally using astropy's coordinate transformations, epoch can be None for ICRS, 'JXXXX' for FK5, and 'BXXXX' for FK4) OUTPUT: (pmra x cos(dec),pmdec), for vector inputs [:,2] HISTORY: 2010-04-07 - Written - Bovy (NYU) 2014-06-14 - Re-written w/ numpy functions for speed and w/ decorators for beauty - Bovy (IAS) """ theta,dec_ngp,ra_ngp= get_epoch_angles(epoch) #Whether to use degrees and scalar input is handled by decorators radec = lb_to_radec(l,b,degree=False,epoch=epoch) ra= radec[:,0] dec= radec[:,1] dec[dec == dec_ngp]+= 10.**-16 #deal w/ pole. sindec_ngp= nu.sin(dec_ngp) cosdec_ngp= nu.cos(dec_ngp) sindec= nu.sin(dec) cosdec= nu.cos(dec) sinrarangp= nu.sin(ra-ra_ngp) cosrarangp= nu.cos(ra-ra_ngp) #These were replaced by Poleski (2013)'s equivalent form that is better at the poles #cosphi= (sindec_ngp-sindec*sinb)/cosdec/cosb #sinphi= sinrarangp*cosdec_ngp/cosb cosphi= sindec_ngp*cosdec-cosdec_ngp*sindec*cosrarangp sinphi= sinrarangp*cosdec_ngp norm= nu.sqrt(cosphi**2.+sinphi**2.) cosphi/= norm sinphi/= norm return (nu.array([[cosphi,sinphi],[-sinphi,cosphi]]).T\ *nu.array([[pmll,pmll],[pmbb,pmbb]]).T).sum(-1)
[ "def", "pmllpmbb_to_pmrapmdec", "(", "pmll", ",", "pmbb", ",", "l", ",", "b", ",", "degree", "=", "False", ",", "epoch", "=", "2000.0", ")", ":", "theta", ",", "dec_ngp", ",", "ra_ngp", "=", "get_epoch_angles", "(", "epoch", ")", "#Whether to use degrees a...
NAME: pmllpmbb_to_pmrapmdec PURPOSE: rotate proper motions in (l,b) into proper motions in (ra,dec) INPUT: pmll - proper motion in l (multplied with cos(b)) [mas/yr] pmbb - proper motion in b [mas/yr] l - Galactic longitude b - Galactic lattitude degree - if True, l and b are given in degrees (default=False) epoch - epoch of ra,dec (right now only 2000.0 and 1950.0 are supported when not using astropy's transformations internally; when internally using astropy's coordinate transformations, epoch can be None for ICRS, 'JXXXX' for FK5, and 'BXXXX' for FK4) OUTPUT: (pmra x cos(dec),pmdec), for vector inputs [:,2] HISTORY: 2010-04-07 - Written - Bovy (NYU) 2014-06-14 - Re-written w/ numpy functions for speed and w/ decorators for beauty - Bovy (IAS)
[ "NAME", ":" ]
python
train
toomore/goristock
grs/goristock.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/goristock.py#L186-L231
def fetch_data(self, stock_no, nowdatetime, firsttime = 1): """ Fetch data from twse.com.tw return list. 從 twse.com.tw 下載資料,回傳格式為 list """ url = 'http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/STOCK_DAY_print.php?genpage=genpage/Report%(year)d%(mon)02d/%(year)d%(mon)02d_F3_1_8_%(stock)s.php&type=csv&r=%(rand)s' % {'year': nowdatetime.year, 'mon': nowdatetime.month, 'stock': stock_no, 'rand': random.randrange(1,1000000)} self.debug_print(url) logging.info(url) #print cc.info().headers # set memcache expire now = TWTime().now if now >= datetime(now.year, now.month, now.day, 14, 45): addday = 1 else: addday = 0 endtime = datetime(now.year, now.month, now.day, 14, 00) + timedelta(days = addday) ## change from 13:35 to 14:00 logging.info('endtime: %s' % str(endtime)) if firsttime == 0: if endtime <= now: expire = 'ALUP' ## always update. else: expire = (endtime - now).seconds else: expire = 0 ## never expire. logging.info('expire: %s' % expire) ## get memcache memname = '%(stock)s%(year)d%(mon)02d' % {'year': nowdatetime.year, 'mon': nowdatetime.month,'stock': stock_no} stkm = memcache.get(memname) if stkm: csv_read = csv.reader(stkm) logging.info('#MemcacheGet: %s' % memname) else: cc = urllib2.urlopen(url) cc_read = cc.readlines() csv_read = csv.reader(cc_read) if expire != 'ALUP': memcache.add(memname, cc_read, expire) else: memcache.delete(memname) memcache.add('time%s' % memname, '%s %s' % (now, expire)) logging.info('#MemcacheAdd: %s' % memname) return csv_read
[ "def", "fetch_data", "(", "self", ",", "stock_no", ",", "nowdatetime", ",", "firsttime", "=", "1", ")", ":", "url", "=", "'http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/STOCK_DAY_print.php?genpage=genpage/Report%(year)d%(mon)02d/%(year)d%(mon)02d_F3_1_8_%(stock)s.php&type=csv&...
Fetch data from twse.com.tw return list. 從 twse.com.tw 下載資料,回傳格式為 list
[ "Fetch", "data", "from", "twse", ".", "com", ".", "tw", "return", "list", ".", "從", "twse", ".", "com", ".", "tw", "下載資料,回傳格式為", "list" ]
python
train
havardgulldahl/jottalib
src/jottalib/JFS.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L940-L943
def files(self): 'iterate over found files' for _f in self.searchresult.files.iterchildren(): yield ProtoFile.factory(_f, jfs=self.jfs, parentpath=unicode(_f.abspath))
[ "def", "files", "(", "self", ")", ":", "for", "_f", "in", "self", ".", "searchresult", ".", "files", ".", "iterchildren", "(", ")", ":", "yield", "ProtoFile", ".", "factory", "(", "_f", ",", "jfs", "=", "self", ".", "jfs", ",", "parentpath", "=", "...
iterate over found files
[ "iterate", "over", "found", "files" ]
python
train
google/grr
grr/server/grr_response_server/notification.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/notification.py#L36-L179
def _MapLegacyArgs(nt, message, ref): """Maps UserNotification object to legacy GRRUser.Notify arguments.""" unt = rdf_objects.UserNotification.Type if nt == unt.TYPE_CLIENT_INTERROGATED: return [ "Discovery", aff4.ROOT_URN.Add(ref.client.client_id), _HostPrefix(ref.client.client_id) + message, "", ] elif nt == unt.TYPE_CLIENT_APPROVAL_REQUESTED: return [ "GrantAccess", aff4.ROOT_URN.Add("ACL").Add(ref.approval_request.subject_id).Add( ref.approval_request.requestor_username).Add( ref.approval_request.approval_id), message, "", ] elif nt == unt.TYPE_HUNT_APPROVAL_REQUESTED: return [ "GrantAccess", aff4.ROOT_URN.Add("ACL").Add("hunts").Add( ref.approval_request.subject_id).Add( ref.approval_request.requestor_username).Add( ref.approval_request.approval_id), message, "", ] elif nt == unt.TYPE_CRON_JOB_APPROVAL_REQUESTED: return [ "GrantAccess", aff4.ROOT_URN.Add("ACL").Add("cron").Add( ref.approval_request.subject_id).Add( ref.approval_request.requestor_username).Add( ref.approval_request.approval_id), message, "", ] elif nt == unt.TYPE_CLIENT_APPROVAL_GRANTED: return [ "ViewObject", aff4.ROOT_URN.Add(ref.client.client_id), message, "", ] elif nt == unt.TYPE_HUNT_APPROVAL_GRANTED: return [ "ViewObject", aff4.ROOT_URN.Add("hunts").Add(ref.hunt.hunt_id), message, "", ] elif nt == unt.TYPE_CRON_JOB_APPROVAL_GRANTED: return [ "ViewObject", aff4.ROOT_URN.Add("cron").Add(ref.cron_job.cron_job_id), message, "", ] elif nt == unt.TYPE_VFS_FILE_COLLECTED: return [ "ViewObject", ref.vfs_file.ToURN(), _HostPrefix(ref.vfs_file.client_id) + message, "", ] elif nt == unt.TYPE_VFS_FILE_COLLECTION_FAILED: return [ "ViewObject", ref.vfs_file.ToURN(), _HostPrefix(ref.vfs_file.client_id) + message, "", ] elif nt == unt.TYPE_HUNT_STOPPED: urn = aff4.ROOT_URN.Add("hunts").Add(ref.hunt.hunt_id) return [ "ViewObject", urn, message, urn, ] elif nt == unt.TYPE_FILE_ARCHIVE_GENERATED: return [ "ArchiveGenerationFinished", None, message, "", ] elif nt == unt.TYPE_FILE_ARCHIVE_GENERATION_FAILED: return [ "Error", None, message, "", ] elif nt == unt.TYPE_FLOW_RUN_COMPLETED: urn = None if ref.flow and ref.flow.client_id and ref.flow.flow_id: urn = aff4.ROOT_URN.Add(ref.flow.client_id).Add("flows").Add( ref.flow.flow_id) return [ "ViewObject", urn, _HostPrefix(ref.flow.client_id) + message, "", ] elif nt == unt.TYPE_FLOW_RUN_FAILED: client_id = None urn = None prefix = "" if ref.flow is not None: client_id = ref.flow.client_id if client_id: prefix = _HostPrefix(client_id) if ref.flow.flow_id: urn = aff4.ROOT_URN.Add(ref.flow.client_id).Add("flows").Add( ref.flow.flow_id) return [ "FlowStatus", client_id, prefix + message, urn, ] elif nt == unt.TYPE_VFS_LIST_DIRECTORY_COMPLETED: return [ "ViewObject", ref.vfs_file.ToURN(), message, "", ] elif nt == unt.TYPE_VFS_RECURSIVE_LIST_DIRECTORY_COMPLETED: return [ "ViewObject", ref.vfs_file.ToURN(), message, "", ] else: raise NotImplementedError()
[ "def", "_MapLegacyArgs", "(", "nt", ",", "message", ",", "ref", ")", ":", "unt", "=", "rdf_objects", ".", "UserNotification", ".", "Type", "if", "nt", "==", "unt", ".", "TYPE_CLIENT_INTERROGATED", ":", "return", "[", "\"Discovery\"", ",", "aff4", ".", "ROO...
Maps UserNotification object to legacy GRRUser.Notify arguments.
[ "Maps", "UserNotification", "object", "to", "legacy", "GRRUser", ".", "Notify", "arguments", "." ]
python
train
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L1135-L1169
def create_standalone_context(require=None, **settings) -> 'Context': ''' Create a standalone ModernGL context. Example:: # Create a context with highest possible supported version ctx = moderngl.create_context() # Require at least OpenGL 4.3 ctx = moderngl.create_context(require=430) Keyword Arguments: require (int): OpenGL version code. Returns: :py:class:`Context` object ''' backend = os.environ.get('MODERNGL_BACKEND') if backend is not None: settings['backend'] = backend ctx = Context.__new__(Context) ctx.mglo, ctx.version_code = mgl.create_standalone_context(settings) ctx._screen = None ctx.fbo = None ctx._info = None ctx.extra = None if require is not None and ctx.version_code < require: raise ValueError('Requested OpenGL version {}, got version {}'.format( require, ctx.version_code)) return ctx
[ "def", "create_standalone_context", "(", "require", "=", "None", ",", "*", "*", "settings", ")", "->", "'Context'", ":", "backend", "=", "os", ".", "environ", ".", "get", "(", "'MODERNGL_BACKEND'", ")", "if", "backend", "is", "not", "None", ":", "settings"...
Create a standalone ModernGL context. Example:: # Create a context with highest possible supported version ctx = moderngl.create_context() # Require at least OpenGL 4.3 ctx = moderngl.create_context(require=430) Keyword Arguments: require (int): OpenGL version code. Returns: :py:class:`Context` object
[ "Create", "a", "standalone", "ModernGL", "context", "." ]
python
train
Tanganelli/CoAPthon3
coapthon/forward_proxy/coap.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/forward_proxy/coap.py#L330-L340
def _start_separate_timer(self, transaction): """ Start a thread to handle separate mode. :type transaction: Transaction :param transaction: the transaction that is in processing :rtype : the Timer object """ t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,)) t.start() return t
[ "def", "_start_separate_timer", "(", "self", ",", "transaction", ")", ":", "t", "=", "threading", ".", "Timer", "(", "defines", ".", "ACK_TIMEOUT", ",", "self", ".", "_send_ack", ",", "(", "transaction", ",", ")", ")", "t", ".", "start", "(", ")", "ret...
Start a thread to handle separate mode. :type transaction: Transaction :param transaction: the transaction that is in processing :rtype : the Timer object
[ "Start", "a", "thread", "to", "handle", "separate", "mode", "." ]
python
train
googlefonts/ufo2ft
Lib/ufo2ft/featureWriters/baseFeatureWriter.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureWriters/baseFeatureWriter.py#L70-L95
def setContext(self, font, feaFile, compiler=None): """ Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance. """ todo = set(self.features) if self.mode == "skip": existing = ast.findFeatureTags(feaFile) todo.difference_update(existing) self.context = SimpleNamespace( font=font, feaFile=feaFile, compiler=compiler, todo=todo ) return self.context
[ "def", "setContext", "(", "self", ",", "font", ",", "feaFile", ",", "compiler", "=", "None", ")", ":", "todo", "=", "set", "(", "self", ".", "features", ")", "if", "self", ".", "mode", "==", "\"skip\"", ":", "existing", "=", "ast", ".", "findFeatureT...
Populate a temporary `self.context` namespace, which is reset after each new call to `_write` method. Subclasses can override this to provide contextual information which depends on other data, or set any temporary attributes. The default implementation sets: - the current font; - the current FeatureFile object; - the current compiler instance (only present when this writer was instantiated from a FeatureCompiler); - a set of features (tags) to be generated. If self.mode is "skip", these are all the features which are _not_ already present. Returns the context namespace instance.
[ "Populate", "a", "temporary", "self", ".", "context", "namespace", "which", "is", "reset", "after", "each", "new", "call", "to", "_write", "method", ".", "Subclasses", "can", "override", "this", "to", "provide", "contextual", "information", "which", "depends", ...
python
train
fangpenlin/pyramid-handy
pyramid_handy/tweens/basic_auth.py
https://github.com/fangpenlin/pyramid-handy/blob/e3cbc19224ab1f0a14aab556990bceabd2d1f658/pyramid_handy/tweens/basic_auth.py#L31-L41
def basic_auth_tween_factory(handler, registry): """Do basic authentication, parse HTTP_AUTHORIZATION and set remote_user variable to request """ def basic_auth_tween(request): remote_user = get_remote_user(request) if remote_user is not None: request.environ['REMOTE_USER'] = remote_user[0] return handler(request) return basic_auth_tween
[ "def", "basic_auth_tween_factory", "(", "handler", ",", "registry", ")", ":", "def", "basic_auth_tween", "(", "request", ")", ":", "remote_user", "=", "get_remote_user", "(", "request", ")", "if", "remote_user", "is", "not", "None", ":", "request", ".", "envir...
Do basic authentication, parse HTTP_AUTHORIZATION and set remote_user variable to request
[ "Do", "basic", "authentication", "parse", "HTTP_AUTHORIZATION", "and", "set", "remote_user", "variable", "to", "request" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L1114-L1126
def clear_rubric(self): """Clears the rubric. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.clear_avatar_template if (self.get_rubric_metadata().is_read_only() or self.get_rubric_metadata().is_required()): raise errors.NoAccess() self._my_map['rubricId'] = self._rubric_default
[ "def", "clear_rubric", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.clear_avatar_template", "if", "(", "self", ".", "get_rubric_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_rubric_metadata", "(", ")", ...
Clears the rubric. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Clears", "the", "rubric", "." ]
python
train
obsrvbl/flowlogs-reader
flowlogs_reader/__main__.py
https://github.com/obsrvbl/flowlogs-reader/blob/248d8cb3cc586859b6744d30cebce0f359d9900c/flowlogs_reader/__main__.py#L77-L87
def action_aggregate(reader, *args): """Aggregate flow records by 5-tuple and print a tab-separated stream""" all_aggregated = aggregated_records(reader) first_row = next(all_aggregated) keys = sorted(first_row.keys()) print(*keys, sep='\t') # Join the first row with the rest of the rows and print them iterable = chain([first_row], all_aggregated) for item in iterable: print(*[item[k] for k in keys], sep='\t')
[ "def", "action_aggregate", "(", "reader", ",", "*", "args", ")", ":", "all_aggregated", "=", "aggregated_records", "(", "reader", ")", "first_row", "=", "next", "(", "all_aggregated", ")", "keys", "=", "sorted", "(", "first_row", ".", "keys", "(", ")", ")"...
Aggregate flow records by 5-tuple and print a tab-separated stream
[ "Aggregate", "flow", "records", "by", "5", "-", "tuple", "and", "print", "a", "tab", "-", "separated", "stream" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/Ambiente.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ambiente.py#L116-L158
def buscar_por_equipamento(self, nome_equipamento, ip_equipamento): """Obtém um ambiente a partir do ip e nome de um equipamento. :param nome_equipamento: Nome do equipamento. :param ip_equipamento: IP do equipamento no formato XXX.XXX.XXX.XXX. :return: Dicionário com a seguinte estrutura: :: {'ambiente': {'id': < id_ambiente >, 'link': < link >, 'id_divisao': < id_divisao >, 'nome_divisao': < nome_divisao >, 'id_ambiente_logico': < id_ambiente_logico >, 'nome_ambiente_logico': < nome_ambiente_logico >, 'id_grupo_l3': < id_grupo_l3 >, 'nome_grupo_l3': < nome_grupo_l3 >, 'id_filter': < id_filter >, 'filter_name': < filter_name >, 'ambiente_rede': < ambiente_rede >}} :raise IpError: IP não cadastrado para o equipamento. :raise InvalidParameterError: O nome e/ou o IP do equipamento são vazios ou nulos, ou o IP é inválido. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if nome_equipamento == '' or nome_equipamento is None: raise InvalidParameterError( u'O nome do equipamento não foi informado.') if not is_valid_ip(ip_equipamento): raise InvalidParameterError( u'O IP do equipamento é inválido ou não foi informado.') url = 'ambiente/equipamento/' + \ urllib.quote(nome_equipamento) + '/ip/' + str(ip_equipamento) + '/' code, xml = self.submit(None, 'GET', url) return self.response(code, xml)
[ "def", "buscar_por_equipamento", "(", "self", ",", "nome_equipamento", ",", "ip_equipamento", ")", ":", "if", "nome_equipamento", "==", "''", "or", "nome_equipamento", "is", "None", ":", "raise", "InvalidParameterError", "(", "u'O nome do equipamento não foi informado.')"...
Obtém um ambiente a partir do ip e nome de um equipamento. :param nome_equipamento: Nome do equipamento. :param ip_equipamento: IP do equipamento no formato XXX.XXX.XXX.XXX. :return: Dicionário com a seguinte estrutura: :: {'ambiente': {'id': < id_ambiente >, 'link': < link >, 'id_divisao': < id_divisao >, 'nome_divisao': < nome_divisao >, 'id_ambiente_logico': < id_ambiente_logico >, 'nome_ambiente_logico': < nome_ambiente_logico >, 'id_grupo_l3': < id_grupo_l3 >, 'nome_grupo_l3': < nome_grupo_l3 >, 'id_filter': < id_filter >, 'filter_name': < filter_name >, 'ambiente_rede': < ambiente_rede >}} :raise IpError: IP não cadastrado para o equipamento. :raise InvalidParameterError: O nome e/ou o IP do equipamento são vazios ou nulos, ou o IP é inválido. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
[ "Obtém", "um", "ambiente", "a", "partir", "do", "ip", "e", "nome", "de", "um", "equipamento", "." ]
python
train
obriencj/python-javatools
javatools/distdiff.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/distdiff.py#L591-L617
def add_distdiff_optgroup(parser): """ Option group relating to the use of a DistChange or DistReport """ # for the --processes default cpus = cpu_count() og = parser.add_argument_group("Distribution Checking Options") og.add_argument("--processes", type=int, default=cpus, help="Number of child processes to spawn to handle" " sub-reports. Set to 0 to disable multi-processing." " Defaults to the number of CPUs (%r)" % cpus) og.add_argument("--shallow", action="store_true", default=False, help="Check only that the files of this dist have" "changed, do not infer the meaning") og.add_argument("--ignore-filenames", action="append", default=[], help="file glob to ignore. Can be specified multiple" " times") og.add_argument("--ignore-trailing-whitespace", action="store_true", default=False, help="ignore trailing whitespace when comparing text" " files")
[ "def", "add_distdiff_optgroup", "(", "parser", ")", ":", "# for the --processes default", "cpus", "=", "cpu_count", "(", ")", "og", "=", "parser", ".", "add_argument_group", "(", "\"Distribution Checking Options\"", ")", "og", ".", "add_argument", "(", "\"--processes\...
Option group relating to the use of a DistChange or DistReport
[ "Option", "group", "relating", "to", "the", "use", "of", "a", "DistChange", "or", "DistReport" ]
python
train
ansible/molecule
molecule/command/init/scenario.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/command/init/scenario.py#L162-L186
def scenario(ctx, dependency_name, driver_name, lint_name, provisioner_name, role_name, scenario_name, verifier_name): # pragma: no cover """ Initialize a new scenario for use with Molecule. """ command_args = { 'dependency_name': dependency_name, 'driver_name': driver_name, 'lint_name': lint_name, 'provisioner_name': provisioner_name, 'role_name': role_name, 'scenario_name': scenario_name, 'subcommand': __name__, 'verifier_name': verifier_name, } if verifier_name == 'inspec': command_args['verifier_lint_name'] = 'rubocop' if verifier_name == 'goss': command_args['verifier_lint_name'] = 'yamllint' if verifier_name == 'ansible': command_args['verifier_lint_name'] = 'ansible-lint' s = Scenario(command_args) s.execute()
[ "def", "scenario", "(", "ctx", ",", "dependency_name", ",", "driver_name", ",", "lint_name", ",", "provisioner_name", ",", "role_name", ",", "scenario_name", ",", "verifier_name", ")", ":", "# pragma: no cover", "command_args", "=", "{", "'dependency_name'", ":", ...
Initialize a new scenario for use with Molecule.
[ "Initialize", "a", "new", "scenario", "for", "use", "with", "Molecule", "." ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/templite.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/templite.py#L17-L25
def add_line(self, line): """Add a line of source to the code. Don't include indentations or newlines. """ self.code.append(" " * self.indent_amount) self.code.append(line) self.code.append("\n")
[ "def", "add_line", "(", "self", ",", "line", ")", ":", "self", ".", "code", ".", "append", "(", "\" \"", "*", "self", ".", "indent_amount", ")", "self", ".", "code", ".", "append", "(", "line", ")", "self", ".", "code", ".", "append", "(", "\"\\n\"...
Add a line of source to the code. Don't include indentations or newlines.
[ "Add", "a", "line", "of", "source", "to", "the", "code", "." ]
python
test
f3at/feat
src/feat/agencies/net/agency.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/net/agency.py#L522-L524
def reconfigure_database(self, host, port, name='feat'): '''force database reconnector to connect to the (host, port, name)''' self._database.reconfigure(host, port, name)
[ "def", "reconfigure_database", "(", "self", ",", "host", ",", "port", ",", "name", "=", "'feat'", ")", ":", "self", ".", "_database", ".", "reconfigure", "(", "host", ",", "port", ",", "name", ")" ]
force database reconnector to connect to the (host, port, name)
[ "force", "database", "reconnector", "to", "connect", "to", "the", "(", "host", "port", "name", ")" ]
python
train
jcushman/pdfquery
pdfquery/pdfquery.py
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L74-L83
def _comp_bbox(el, el2): """ Return 1 if el in el2, -1 if el2 in el, else 0""" # only compare if both elements have x/y coordinates if _comp_bbox_keys_required <= set(el.keys()) and \ _comp_bbox_keys_required <= set(el2.keys()): if _box_in_box(el2, el): return 1 if _box_in_box(el, el2): return -1 return 0
[ "def", "_comp_bbox", "(", "el", ",", "el2", ")", ":", "# only compare if both elements have x/y coordinates\r", "if", "_comp_bbox_keys_required", "<=", "set", "(", "el", ".", "keys", "(", ")", ")", "and", "_comp_bbox_keys_required", "<=", "set", "(", "el2", ".", ...
Return 1 if el in el2, -1 if el2 in el, else 0
[ "Return", "1", "if", "el", "in", "el2", "-", "1", "if", "el2", "in", "el", "else", "0" ]
python
train
pymacaron/pymacaron
pymacaron/utils.py
https://github.com/pymacaron/pymacaron/blob/af244f203f8216108b39d374d46bf8e1813f13d5/pymacaron/utils.py#L68-L76
def get_container_version(): """Return the version of the docker container running the present server, or '' if not in a container""" root_dir = os.path.dirname(os.path.realpath(sys.argv[0])) version_file = os.path.join(root_dir, 'VERSION') if os.path.exists(version_file): with open(version_file) as f: return f.read() return ''
[ "def", "get_container_version", "(", ")", ":", "root_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "version_file", "=", "os", ".", "path", ".", "join", "(", ...
Return the version of the docker container running the present server, or '' if not in a container
[ "Return", "the", "version", "of", "the", "docker", "container", "running", "the", "present", "server", "or", "if", "not", "in", "a", "container" ]
python
train
clalancette/pycdlib
pycdlib/rockridge.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L1083-L1100
def record(self): # type: () -> bytes ''' Generate a string representing the Rock Ridge Symbolic Link record. Parameters: None. Returns: String containing the Rock Ridge record. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!') outlist = [b'SL', struct.pack('=BBB', self.current_length(), SU_ENTRY_VERSION, self.flags)] for comp in self.symlink_components: outlist.append(comp.record()) return b''.join(outlist)
[ "def", "record", "(", "self", ")", ":", "# type: () -> bytes", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'SL record not yet initialized!'", ")", "outlist", "=", "[", "b'SL'", ",", "struct", ".", ...
Generate a string representing the Rock Ridge Symbolic Link record. Parameters: None. Returns: String containing the Rock Ridge record.
[ "Generate", "a", "string", "representing", "the", "Rock", "Ridge", "Symbolic", "Link", "record", "." ]
python
train
iotile/coretools
iotilesensorgraph/iotile/sg/walker.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/walker.py#L252-L267
def notify_rollover(self, stream): """Notify that a reading in the given stream was overwritten. Args: stream (DataStream): The stream that had overwritten data. """ self.offset -= 1 if not self.matches(stream): return if self._count == 0: raise InternalError("BufferedStreamWalker out of sync with storage engine, count was wrong.") self._count -= 1
[ "def", "notify_rollover", "(", "self", ",", "stream", ")", ":", "self", ".", "offset", "-=", "1", "if", "not", "self", ".", "matches", "(", "stream", ")", ":", "return", "if", "self", ".", "_count", "==", "0", ":", "raise", "InternalError", "(", "\"B...
Notify that a reading in the given stream was overwritten. Args: stream (DataStream): The stream that had overwritten data.
[ "Notify", "that", "a", "reading", "in", "the", "given", "stream", "was", "overwritten", "." ]
python
train
dj-stripe/dj-stripe
djstripe/webhooks.py
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/webhooks.py#L71-L98
def call_handlers(event): """ Invoke all handlers for the provided event type/sub-type. The handlers are invoked in the following order: 1. Global handlers 2. Event type handlers 3. Event sub-type handlers Handlers within each group are invoked in order of registration. :param event: The event model object. :type event: ``djstripe.models.Event`` """ chain = [registrations_global] # Build up a list of handlers with each qualified part of the event # category and verb. For example, "customer.subscription.created" creates: # 1. "customer" # 2. "customer.subscription" # 3. "customer.subscription.created" for index, _ in enumerate(event.parts): qualified_event_type = ".".join(event.parts[: (index + 1)]) chain.append(registrations[qualified_event_type]) for handler_func in itertools.chain(*chain): handler_func(event=event)
[ "def", "call_handlers", "(", "event", ")", ":", "chain", "=", "[", "registrations_global", "]", "# Build up a list of handlers with each qualified part of the event", "# category and verb. For example, \"customer.subscription.created\" creates:", "# 1. \"customer\"", "# 2. \"custome...
Invoke all handlers for the provided event type/sub-type. The handlers are invoked in the following order: 1. Global handlers 2. Event type handlers 3. Event sub-type handlers Handlers within each group are invoked in order of registration. :param event: The event model object. :type event: ``djstripe.models.Event``
[ "Invoke", "all", "handlers", "for", "the", "provided", "event", "type", "/", "sub", "-", "type", "." ]
python
train
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/proof.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/proof.py#L116-L138
async def request_proof(self, connection: Connection): """ Example: connection = await Connection.create(source_id) await connection.connect(phone_number) name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) await proof.request_proof(connection) :param connection: Connection :return: """ if not hasattr(Proof.request_proof, "cb"): self.logger.debug("vcx_proof_send_request: Creating callback") Proof.request_proof.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32)) c_proof_handle = c_uint32(self.handle) c_connection_handle = c_uint32(connection.handle) await do_call('vcx_proof_send_request', c_proof_handle, c_connection_handle, Proof.request_proof.cb)
[ "async", "def", "request_proof", "(", "self", ",", "connection", ":", "Connection", ")", ":", "if", "not", "hasattr", "(", "Proof", ".", "request_proof", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_proof_send_request: Creating cal...
Example: connection = await Connection.create(source_id) await connection.connect(phone_number) name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) await proof.request_proof(connection) :param connection: Connection :return:
[ "Example", ":", "connection", "=", "await", "Connection", ".", "create", "(", "source_id", ")", "await", "connection", ".", "connect", "(", "phone_number", ")", "name", "=", "proof", "name", "requested_attrs", "=", "[", "{", "name", ":", "age", "restrictions...
python
train
swift-nav/libsbp
python/sbp/client/util/settingmonitor.py
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/util/settingmonitor.py#L42-L55
def wait_for_setting_value(self, section, setting, value, wait_time=5.0): """Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value """ expire = time.time() + wait_time ok = False while not ok and time.time() < expire: settings = [x for x in self.settings if (x[0], x[1]) == (section, setting)] # Check to see if the last setting has the value we want if len(settings) > 0: ok = settings[-1][2] == value time.sleep(0.1) return ok
[ "def", "wait_for_setting_value", "(", "self", ",", "section", ",", "setting", ",", "value", ",", "wait_time", "=", "5.0", ")", ":", "expire", "=", "time", ".", "time", "(", ")", "+", "wait_time", "ok", "=", "False", "while", "not", "ok", "and", "time",...
Function to wait wait_time seconds to see a SBP_MSG_SETTINGS_READ_RESP message with a user-specified value
[ "Function", "to", "wait", "wait_time", "seconds", "to", "see", "a", "SBP_MSG_SETTINGS_READ_RESP", "message", "with", "a", "user", "-", "specified", "value" ]
python
train
ArtoLabs/SimpleSteem
simplesteem/simplesteem.py
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/simplesteem.py#L181-L199
def reward_pool_balances(self): ''' Fetches and returns the 3 values needed to calculate the reward pool and other associated values such as rshares. Returns the reward balance, all recent claims and the current price of steem. ''' if self.reward_balance > 0: return self.reward_balance else: reward_fund = self.steem_instance().get_reward_fund() self.reward_balance = Amount( reward_fund["reward_balance"]).amount self.recent_claims = float(reward_fund["recent_claims"]) self.base = Amount( self.steem_instance( ).get_current_median_history_price()["base"] ).amount return [self.reward_balance, self.recent_claims, self.base]
[ "def", "reward_pool_balances", "(", "self", ")", ":", "if", "self", ".", "reward_balance", ">", "0", ":", "return", "self", ".", "reward_balance", "else", ":", "reward_fund", "=", "self", ".", "steem_instance", "(", ")", ".", "get_reward_fund", "(", ")", "...
Fetches and returns the 3 values needed to calculate the reward pool and other associated values such as rshares. Returns the reward balance, all recent claims and the current price of steem.
[ "Fetches", "and", "returns", "the", "3", "values", "needed", "to", "calculate", "the", "reward", "pool", "and", "other", "associated", "values", "such", "as", "rshares", ".", "Returns", "the", "reward", "balance", "all", "recent", "claims", "and", "the", "cu...
python
train
biolink/ontobio
ontobio/sim/api/owlsim3.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim3.py#L35-L43
def compare(self, query_classes: List, reference_classes: List, method: Optional) -> SimResult: """ Given two lists of entites (classes, individual) return their similarity """ raise NotImplementedError
[ "def", "compare", "(", "self", ",", "query_classes", ":", "List", ",", "reference_classes", ":", "List", ",", "method", ":", "Optional", ")", "->", "SimResult", ":", "raise", "NotImplementedError" ]
Given two lists of entites (classes, individual) return their similarity
[ "Given", "two", "lists", "of", "entites", "(", "classes", "individual", ")", "return", "their", "similarity" ]
python
train
MrMinimal64/timezonefinder
timezonefinder/timezonefinder.py
https://github.com/MrMinimal64/timezonefinder/blob/96cc43afb3bae57ffd002ab4cf104fe15eda2257/timezonefinder/timezonefinder.py#L423-L489
def timezone_at(self, *, lng, lat): """ this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None """ lng, lat = rectify_coordinates(lng, lat) # x = longitude y = latitude both converted to 8byte int x = coord2int(lng) y = coord2int(lat) shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat) self.shortcuts_unique_id.seek( (180 * NR_SHORTCUTS_PER_LAT * NR_BYTES_H * shortcut_id_x + NR_BYTES_H * shortcut_id_y)) try: # if there is just one possible zone in this shortcut instantly return its name return timezone_names[unpack(DTYPE_FORMAT_H, self.shortcuts_unique_id.read(NR_BYTES_H))[0]] except IndexError: possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y) nr_possible_polygons = len(possible_polygons) if nr_possible_polygons == 0: return None if nr_possible_polygons == 1: # there is only one polygon in that area. return its timezone name without further checks return timezone_names[self.id_of(possible_polygons[0])] # create a list of all the timezone ids of all possible polygons ids = self.id_list(possible_polygons, nr_possible_polygons) # check until the point is included in one of the possible polygons for i in range(nr_possible_polygons): # when including the current polygon only polygons from the same zone remain, same_element = all_the_same(pointer=i, length=nr_possible_polygons, id_list=ids) if same_element != -1: # return the name of that zone return timezone_names[same_element] polygon_nr = possible_polygons[i] # get the boundaries of the polygon = (lng_max, lng_min, lat_max, lat_min) self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr) boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4) # only run the expensive algorithm if the point is withing the boundaries if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]): outside_all_holes = True # when the point is within a hole of the polygon, this timezone must not be returned for hole_coordinates in self._holes_of_line(polygon_nr): if inside_polygon(x, y, hole_coordinates): outside_all_holes = False break if outside_all_holes: if inside_polygon(x, y, self.coords_of(line=polygon_nr)): # the point is included in this polygon. return its timezone name without further checks return timezone_names[ids[i]] # the timezone name of the last polygon should always be returned # if no other polygon has been matched beforehand. raise ValueError('BUG: this statement should never be reached. Please open up an issue on Github!')
[ "def", "timezone_at", "(", "self", ",", "*", ",", "lng", ",", "lat", ")", ":", "lng", ",", "lat", "=", "rectify_coordinates", "(", "lng", ",", "lat", ")", "# x = longitude y = latitude both converted to 8byte int", "x", "=", "coord2int", "(", "lng", ")", "...
this function looks up in which polygons the point could be included in to speed things up there are shortcuts being used (stored in a binary file) especially for large polygons it is expensive to check if a point is really included, so certain simplifications are made and even when you get a hit the point might actually not be inside the polygon (for example when there is only one timezone nearby) if you want to make sure a point is really inside a timezone use 'certain_timezone_at' :param lng: longitude of the point in degree (-180 to 180) :param lat: latitude in degree (90 to -90) :return: the timezone name of a matching polygon or None
[ "this", "function", "looks", "up", "in", "which", "polygons", "the", "point", "could", "be", "included", "in", "to", "speed", "things", "up", "there", "are", "shortcuts", "being", "used", "(", "stored", "in", "a", "binary", "file", ")", "especially", "for"...
python
train
materialsproject/pymatgen
pymatgen/core/lattice.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/lattice.py#L779-L926
def get_niggli_reduced_lattice(self, tol: float = 1e-5) -> "Lattice": """ Get the Niggli reduced lattice using the numerically stable algo proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams, Acta Crystallographica Section A Foundations of Crystallography, 2003, 60(1), 1-6. doi:10.1107/S010876730302186X Args: tol (float): The numerical tolerance. The default of 1e-5 should result in stable behavior for most cases. Returns: Niggli-reduced lattice. """ # lll reduction is more stable for skewed cells matrix = self.lll_matrix a = matrix[0] b = matrix[1] c = matrix[2] e = tol * self.volume ** (1 / 3) # Define metric tensor G = [ [dot(a, a), dot(a, b), dot(a, c)], [dot(a, b), dot(b, b), dot(b, c)], [dot(a, c), dot(b, c), dot(c, c)], ] G = np.array(G) # This sets an upper limit on the number of iterations. for count in range(100): # The steps are labelled as Ax as per the labelling scheme in the # paper. (A, B, C, E, N, Y) = ( G[0, 0], G[1, 1], G[2, 2], 2 * G[1, 2], 2 * G[0, 2], 2 * G[0, 1], ) if A > B + e or (abs(A - B) < e and abs(E) > abs(N) + e): # A1 M = [[0, -1, 0], [-1, 0, 0], [0, 0, -1]] G = dot(transpose(M), dot(G, M)) if (B > C + e) or (abs(B - C) < e and abs(N) > abs(Y) + e): # A2 M = [[-1, 0, 0], [0, 0, -1], [0, -1, 0]] G = dot(transpose(M), dot(G, M)) continue l = 0 if abs(E) < e else E / abs(E) m = 0 if abs(N) < e else N / abs(N) n = 0 if abs(Y) < e else Y / abs(Y) if l * m * n == 1: # A3 i = -1 if l == -1 else 1 j = -1 if m == -1 else 1 k = -1 if n == -1 else 1 M = [[i, 0, 0], [0, j, 0], [0, 0, k]] G = dot(transpose(M), dot(G, M)) elif l * m * n == 0 or l * m * n == -1: # A4 i = -1 if l == 1 else 1 j = -1 if m == 1 else 1 k = -1 if n == 1 else 1 if i * j * k == -1: if n == 0: k = -1 elif m == 0: j = -1 elif l == 0: i = -1 M = [[i, 0, 0], [0, j, 0], [0, 0, k]] G = dot(transpose(M), dot(G, M)) (A, B, C, E, N, Y) = ( G[0, 0], G[1, 1], G[2, 2], 2 * G[1, 2], 2 * G[0, 2], 2 * G[0, 1], ) # A5 if ( abs(E) > B + e or (abs(E - B) < e and 2 * N < Y - e) or (abs(E + B) < e and Y < -e) ): M = [[1, 0, 0], [0, 1, -E / abs(E)], [0, 0, 1]] G = dot(transpose(M), dot(G, M)) continue # A6 if ( abs(N) > A + e or (abs(A - N) < e and 2 * E < Y - e) or (abs(A + N) < e and Y < -e) ): M = [[1, 0, -N / abs(N)], [0, 1, 0], [0, 0, 1]] G = dot(transpose(M), dot(G, M)) continue # A7 if ( abs(Y) > A + e or (abs(A - Y) < e and 2 * E < N - e) or (abs(A + Y) < e and N < -e) ): M = [[1, -Y / abs(Y), 0], [0, 1, 0], [0, 0, 1]] G = dot(transpose(M), dot(G, M)) continue # A8 if E + N + Y + A + B < -e or (abs(E + N + Y + A + B) < e < Y + (A + N) * 2): M = [[1, 0, 1], [0, 1, 1], [0, 0, 1]] G = dot(transpose(M), dot(G, M)) continue break A = G[0, 0] B = G[1, 1] C = G[2, 2] E = 2 * G[1, 2] N = 2 * G[0, 2] Y = 2 * G[0, 1] a = math.sqrt(A) b = math.sqrt(B) c = math.sqrt(C) alpha = math.acos(E / 2 / b / c) / math.pi * 180 beta = math.acos(N / 2 / a / c) / math.pi * 180 gamma = math.acos(Y / 2 / a / b) / math.pi * 180 latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma) mapped = self.find_mapping(latt, e, skip_rotation_matrix=True) if mapped is not None: if np.linalg.det(mapped[0].matrix) > 0: return mapped[0] else: return Lattice(-mapped[0].matrix) raise ValueError("can't find niggli")
[ "def", "get_niggli_reduced_lattice", "(", "self", ",", "tol", ":", "float", "=", "1e-5", ")", "->", "\"Lattice\"", ":", "# lll reduction is more stable for skewed cells", "matrix", "=", "self", ".", "lll_matrix", "a", "=", "matrix", "[", "0", "]", "b", "=", "m...
Get the Niggli reduced lattice using the numerically stable algo proposed by R. W. Grosse-Kunstleve, N. K. Sauter, & P. D. Adams, Acta Crystallographica Section A Foundations of Crystallography, 2003, 60(1), 1-6. doi:10.1107/S010876730302186X Args: tol (float): The numerical tolerance. The default of 1e-5 should result in stable behavior for most cases. Returns: Niggli-reduced lattice.
[ "Get", "the", "Niggli", "reduced", "lattice", "using", "the", "numerically", "stable", "algo", "proposed", "by", "R", ".", "W", ".", "Grosse", "-", "Kunstleve", "N", ".", "K", ".", "Sauter", "&", "P", ".", "D", ".", "Adams", "Acta", "Crystallographica", ...
python
train
frictionlessdata/tabulator-py
tabulator/validate.py
https://github.com/frictionlessdata/tabulator-py/blob/06c25845a7139d919326388cc6335f33f909db8c/tabulator/validate.py#L14-L42
def validate(source, scheme=None, format=None): '''Check if tabulator is able to load the source. Args: source (Union[str, IO]): The source path or IO object. scheme (str, optional): The source scheme. Auto-detect by default. format (str, optional): The source file format. Auto-detect by default. Returns: bool: Whether tabulator is able to load the source file. Raises: `tabulator.exceptions.SchemeError`: The file scheme is not supported. `tabulator.exceptions.FormatError`: The file format is not supported. ''' # Get scheme and format detected_scheme, detected_format = helpers.detect_scheme_and_format(source) scheme = scheme or detected_scheme format = format or detected_format # Validate scheme and format if scheme is not None: if scheme not in config.LOADERS: raise exceptions.SchemeError('Scheme "%s" is not supported' % scheme) if format not in config.PARSERS: raise exceptions.FormatError('Format "%s" is not supported' % format) return True
[ "def", "validate", "(", "source", ",", "scheme", "=", "None", ",", "format", "=", "None", ")", ":", "# Get scheme and format", "detected_scheme", ",", "detected_format", "=", "helpers", ".", "detect_scheme_and_format", "(", "source", ")", "scheme", "=", "scheme"...
Check if tabulator is able to load the source. Args: source (Union[str, IO]): The source path or IO object. scheme (str, optional): The source scheme. Auto-detect by default. format (str, optional): The source file format. Auto-detect by default. Returns: bool: Whether tabulator is able to load the source file. Raises: `tabulator.exceptions.SchemeError`: The file scheme is not supported. `tabulator.exceptions.FormatError`: The file format is not supported.
[ "Check", "if", "tabulator", "is", "able", "to", "load", "the", "source", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/opf_task_driver.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/opf_task_driver.py#L459-L467
def enterPhase(self): """ [_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase """ super(_IterationPhaseLearnOnly, self).enterPhase() self.__model.enableLearning() self.__model.disableInference() return
[ "def", "enterPhase", "(", "self", ")", ":", "super", "(", "_IterationPhaseLearnOnly", ",", "self", ")", ".", "enterPhase", "(", ")", "self", ".", "__model", ".", "enableLearning", "(", ")", "self", ".", "__model", ".", "disableInference", "(", ")", "return...
[_IterationPhase method implementation] Performs initialization that is necessary upon entry to the phase. Must be called before handleInputRecord() at the beginning of each phase
[ "[", "_IterationPhase", "method", "implementation", "]", "Performs", "initialization", "that", "is", "necessary", "upon", "entry", "to", "the", "phase", ".", "Must", "be", "called", "before", "handleInputRecord", "()", "at", "the", "beginning", "of", "each", "ph...
python
valid
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_dataflash_logger.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_dataflash_logger.py#L125-L144
def status(self): '''returns information about module''' if self.download is None: return "Not started" transferred = self.download - self.prev_download self.prev_download = self.download now = time.time() interval = now - self.last_status_time self.last_status_time = now return("DFLogger: %(state)s Rate(%(interval)ds):%(rate).3fkB/s " "Block:%(block_cnt)d Missing:%(missing)d Fixed:%(fixed)d " "Abandoned:%(abandoned)d" % {"interval": interval, "rate": transferred/(interval*1000), "block_cnt": self.last_seqno, "missing": len(self.missing_blocks), "fixed": self.missing_found, "abandoned": self.abandoned, "state": "Inactive" if self.stopped else "Active"})
[ "def", "status", "(", "self", ")", ":", "if", "self", ".", "download", "is", "None", ":", "return", "\"Not started\"", "transferred", "=", "self", ".", "download", "-", "self", ".", "prev_download", "self", ".", "prev_download", "=", "self", ".", "download...
returns information about module
[ "returns", "information", "about", "module" ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2846-L2857
def download_object(self, container, obj, directory, structure=True): """ Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters. """ return self._manager.download_object(container, obj, directory, structure=structure)
[ "def", "download_object", "(", "self", ",", "container", ",", "obj", ",", "directory", ",", "structure", "=", "True", ")", ":", "return", "self", ".", "_manager", ".", "download_object", "(", "container", ",", "obj", ",", "directory", ",", "structure", "="...
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
[ "Fetches", "the", "object", "from", "storage", "and", "writes", "it", "to", "the", "specified", "directory", ".", "The", "directory", "must", "exist", "before", "calling", "this", "method", "." ]
python
train
joelfrederico/SciSalt
scisalt/facettools/logbookForm.py
https://github.com/joelfrederico/SciSalt/blob/7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f/scisalt/facettools/logbookForm.py#L526-L530
def show(self): '''Display menus and connect even signals.''' self.parent.addLayout(self._logSelectLayout) self.menuCount += 1 self._connectSlots()
[ "def", "show", "(", "self", ")", ":", "self", ".", "parent", ".", "addLayout", "(", "self", ".", "_logSelectLayout", ")", "self", ".", "menuCount", "+=", "1", "self", ".", "_connectSlots", "(", ")" ]
Display menus and connect even signals.
[ "Display", "menus", "and", "connect", "even", "signals", "." ]
python
valid
nutechsoftware/alarmdecoder
alarmdecoder/devices/serial_device.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/devices/serial_device.py#L174-L192
def read(self): """ Reads a single character from the device. :returns: character read from the device :raises: :py:class:`~alarmdecoder.util.CommError` """ data = '' try: read_ready, _, _ = select.select([self._device.fileno()], [], [], 0.5) if len(read_ready) != 0: data = self._device.read(1) except serial.SerialException as err: raise CommError('Error reading from device: {0}'.format(str(err)), err) return data.decode('utf-8')
[ "def", "read", "(", "self", ")", ":", "data", "=", "''", "try", ":", "read_ready", ",", "_", ",", "_", "=", "select", ".", "select", "(", "[", "self", ".", "_device", ".", "fileno", "(", ")", "]", ",", "[", "]", ",", "[", "]", ",", "0.5", "...
Reads a single character from the device. :returns: character read from the device :raises: :py:class:`~alarmdecoder.util.CommError`
[ "Reads", "a", "single", "character", "from", "the", "device", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/blob.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/blob.py#L1592-L1619
def update_storage_class(self, new_class, client=None): """Update blob's storage class via a rewrite-in-place. This helper will wait for the rewrite to complete before returning, so it may take some time for large files. See https://cloud.google.com/storage/docs/per-object-storage-class If :attr:`user_project` is set on the bucket, bills the API request to that project. :type new_class: str :param new_class: new storage class for the object :type client: :class:`~google.cloud.storage.client.Client` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket. """ if new_class not in self._STORAGE_CLASSES: raise ValueError("Invalid storage class: %s" % (new_class,)) # Update current blob's storage class prior to rewrite self._patch_property("storageClass", new_class) # Execute consecutive rewrite operations until operation is done token, _, _ = self.rewrite(self) while token is not None: token, _, _ = self.rewrite(self, token=token)
[ "def", "update_storage_class", "(", "self", ",", "new_class", ",", "client", "=", "None", ")", ":", "if", "new_class", "not", "in", "self", ".", "_STORAGE_CLASSES", ":", "raise", "ValueError", "(", "\"Invalid storage class: %s\"", "%", "(", "new_class", ",", "...
Update blob's storage class via a rewrite-in-place. This helper will wait for the rewrite to complete before returning, so it may take some time for large files. See https://cloud.google.com/storage/docs/per-object-storage-class If :attr:`user_project` is set on the bucket, bills the API request to that project. :type new_class: str :param new_class: new storage class for the object :type client: :class:`~google.cloud.storage.client.Client` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket.
[ "Update", "blob", "s", "storage", "class", "via", "a", "rewrite", "-", "in", "-", "place", ".", "This", "helper", "will", "wait", "for", "the", "rewrite", "to", "complete", "before", "returning", "so", "it", "may", "take", "some", "time", "for", "large",...
python
train
bolt-project/bolt
bolt/spark/array.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/array.py#L125-L191
def map(self, func, axis=(0,), value_shape=None, dtype=None, with_keys=False): """ Apply a function across an axis. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function of a single array to apply. If with_keys=True, function should be of a (tuple, array) pair. axis : tuple or int, optional, default=(0,) Axis or multiple axes to apply function along. value_shape : tuple, optional, default=None Known shape of values resulting from operation dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation with_keys : bool, optional, default=False Include keys as an argument to the function Returns ------- BoltArraySpark """ axis = tupleize(axis) swapped = self._align(axis) if with_keys: test_func = lambda x: func(((0,), x)) else: test_func = func if value_shape is None or dtype is None: # try to compute the size of each mapped element by applying func to a random array try: mapped = test_func(random.randn(*swapped.values.shape).astype(self.dtype)) except Exception: first = swapped._rdd.first() if first: # eval func on the first element mapped = test_func(first[1]) if value_shape is None: value_shape = mapped.shape if dtype is None: dtype = mapped.dtype shape = tuple([swapped._shape[ax] for ax in range(len(axis))]) + tupleize(value_shape) if with_keys: rdd = swapped._rdd.map(lambda kv: (kv[0], func(kv))) else: rdd = swapped._rdd.mapValues(func) # reshaping will fail if the elements aren't uniformly shaped def check(v): if len(v.shape) > 0 and v.shape != tupleize(value_shape): raise Exception("Map operation did not produce values of uniform shape.") return v rdd = rdd.mapValues(lambda v: check(v)) return self._constructor(rdd, shape=shape, dtype=dtype, split=swapped.split).__finalize__(swapped)
[ "def", "map", "(", "self", ",", "func", ",", "axis", "=", "(", "0", ",", ")", ",", "value_shape", "=", "None", ",", "dtype", "=", "None", ",", "with_keys", "=", "False", ")", ":", "axis", "=", "tupleize", "(", "axis", ")", "swapped", "=", "self",...
Apply a function across an axis. Array will be aligned so that the desired set of axes are in the keys, which may incur a swap. Parameters ---------- func : function Function of a single array to apply. If with_keys=True, function should be of a (tuple, array) pair. axis : tuple or int, optional, default=(0,) Axis or multiple axes to apply function along. value_shape : tuple, optional, default=None Known shape of values resulting from operation dtype: numpy.dtype, optional, default=None Known dtype of values resulting from operation with_keys : bool, optional, default=False Include keys as an argument to the function Returns ------- BoltArraySpark
[ "Apply", "a", "function", "across", "an", "axis", "." ]
python
test
payu-org/payu
payu/runlog.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/runlog.py#L107-L138
def push(self): """Push the changes to the remote repository. Usage: payu push This command pushes local runlog changes to the remote runlog repository, currently named `payu`, using the SSH key associated with this experiment. For an experiment `test`, it is equivalent to the following command:: ssh-agent bash -c " ssh-add $HOME/.ssh/payu/id_rsa_payu_test git push --all payu " """ expt_name = self.config.get('name', self.expt.name) default_ssh_key = 'id_rsa_payu_' + expt_name ssh_key = self.config.get('sshid', default_ssh_key) ssh_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'payu', ssh_key) if not os.path.isfile(ssh_key_path): print('payu: error: Github SSH key {key} not found.' ''.format(key=ssh_key_path)) print('payu: error: Run `payu ghsetup` to generate a new key.') sys.exit(-1) cmd = ('ssh-agent bash -c "ssh-add {key}; git push --all payu"' ''.format(key=ssh_key_path)) subprocess.check_call(shlex.split(cmd), cwd=self.expt.control_path)
[ "def", "push", "(", "self", ")", ":", "expt_name", "=", "self", ".", "config", ".", "get", "(", "'name'", ",", "self", ".", "expt", ".", "name", ")", "default_ssh_key", "=", "'id_rsa_payu_'", "+", "expt_name", "ssh_key", "=", "self", ".", "config", "."...
Push the changes to the remote repository. Usage: payu push This command pushes local runlog changes to the remote runlog repository, currently named `payu`, using the SSH key associated with this experiment. For an experiment `test`, it is equivalent to the following command:: ssh-agent bash -c " ssh-add $HOME/.ssh/payu/id_rsa_payu_test git push --all payu "
[ "Push", "the", "changes", "to", "the", "remote", "repository", "." ]
python
train
bachiraoun/pyrep
Repository.py
https://github.com/bachiraoun/pyrep/blob/0449bf2fad3e3e8dda855d4686a8869efeefd433/Repository.py#L1109-L1135
def is_name_allowed(self, path): """ Get whether creating a file or a directory from the basenane of the given path is allowed :Parameters: #. path (str): The absolute or relative path or simply the file or directory name. :Returns: #. allowed (bool): Whether name is allowed. #. message (None, str): Reason for the name to be forbidden. """ assert isinstance(path, basestring), "given path must be a string" name = os.path.basename(path) if not len(name): return False, "empty name is not allowed" # exact match for em in [self.__repoLock,self.__repoFile,self.__dirInfo,self.__dirLock]: if name == em: return False, "name '%s' is reserved for pyrep internal usage"%em # pattern match for pm in [self.__fileInfo,self.__fileLock]:#,self.__objectDir]: if name == pm or (name.endswith(pm[3:]) and name.startswith('.')): return False, "name pattern '%s' is not allowed as result may be reserved for pyrep internal usage"%pm # name is ok return True, None
[ "def", "is_name_allowed", "(", "self", ",", "path", ")", ":", "assert", "isinstance", "(", "path", ",", "basestring", ")", ",", "\"given path must be a string\"", "name", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "not", "len", "(", ...
Get whether creating a file or a directory from the basenane of the given path is allowed :Parameters: #. path (str): The absolute or relative path or simply the file or directory name. :Returns: #. allowed (bool): Whether name is allowed. #. message (None, str): Reason for the name to be forbidden.
[ "Get", "whether", "creating", "a", "file", "or", "a", "directory", "from", "the", "basenane", "of", "the", "given", "path", "is", "allowed" ]
python
valid
iancmcc/txrestapi
txrestapi/json_resource.py
https://github.com/iancmcc/txrestapi/blob/25c1f2d0087db45ea9edefc3c6fb81f5293458b6/txrestapi/json_resource.py#L34-L44
def _setHeaders(self, request): """ Those headers will allow you to call API methods from web browsers, they require CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing """ request.responseHeaders.addRawHeader(b'content-type', b'application/json') request.responseHeaders.addRawHeader(b'Access-Control-Allow-Origin', b'*') request.responseHeaders.addRawHeader(b'Access-Control-Allow-Methods', b'GET, POST, PUT, DELETE') request.responseHeaders.addRawHeader(b'Access-Control-Allow-Headers', b'x-prototype-version,x-requested-with') request.responseHeaders.addRawHeader(b'Access-Control-Max-Age', 2520) return request
[ "def", "_setHeaders", "(", "self", ",", "request", ")", ":", "request", ".", "responseHeaders", ".", "addRawHeader", "(", "b'content-type'", ",", "b'application/json'", ")", "request", ".", "responseHeaders", ".", "addRawHeader", "(", "b'Access-Control-Allow-Origin'",...
Those headers will allow you to call API methods from web browsers, they require CORS: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing
[ "Those", "headers", "will", "allow", "you", "to", "call", "API", "methods", "from", "web", "browsers", "they", "require", "CORS", ":", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Cross", "-", "origin_resource_sharing" ]
python
train
fishtown-analytics/dbt
core/dbt/main.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/main.py#L116-L134
def initialize_config_values(parsed): """Given the parsed args, initialize the dbt tracking code. It would be nice to re-use this profile later on instead of parsing it twice, but dbt's intialization is not structured in a way that makes that easy. """ try: cfg = UserConfig.from_directory(parsed.profiles_dir) except RuntimeException: cfg = UserConfig.from_dict(None) if cfg.send_anonymous_usage_stats: dbt.tracking.initialize_tracking(parsed.profiles_dir) else: dbt.tracking.do_not_track() if cfg.use_colors: dbt.ui.printer.use_colors()
[ "def", "initialize_config_values", "(", "parsed", ")", ":", "try", ":", "cfg", "=", "UserConfig", ".", "from_directory", "(", "parsed", ".", "profiles_dir", ")", "except", "RuntimeException", ":", "cfg", "=", "UserConfig", ".", "from_dict", "(", "None", ")", ...
Given the parsed args, initialize the dbt tracking code. It would be nice to re-use this profile later on instead of parsing it twice, but dbt's intialization is not structured in a way that makes that easy.
[ "Given", "the", "parsed", "args", "initialize", "the", "dbt", "tracking", "code", "." ]
python
train
mitsei/dlkit
dlkit/json_/authorization/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/searches.py#L99-L111
def get_authorizations(self): """Gets the authorization list resulting from the search. return: (osid.authorization.AuthorizationList) - the authorization list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.AuthorizationList(self._results, runtime=self._runtime)
[ "def", "get_authorizations", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "AuthorizationList", ...
Gets the authorization list resulting from the search. return: (osid.authorization.AuthorizationList) - the authorization list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "authorization", "list", "resulting", "from", "the", "search", "." ]
python
train
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L188-L193
def _wrap_data(data: Union[str, bytes]): """ Wraps data into the right event. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage return MsgType(data=data, frame_finished=True, message_finished=True)
[ "def", "_wrap_data", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", ":", "MsgType", "=", "TextMessage", "if", "isinstance", "(", "data", ",", "str", ")", "else", "BytesMessage", "return", "MsgType", "(", "data", "=", "data", ",", "frame...
Wraps data into the right event.
[ "Wraps", "data", "into", "the", "right", "event", "." ]
python
train
Azure/blobxfer
blobxfer/operations/progress.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/progress.py#L52-L102
def update_progress_bar( go, optext, start, total_files, files_sofar, total_bytes, bytes_sofar, stdin_upload=False): # type: (blobxfer.models.options.General, str, datetime.datetime, int, # int, int, int, bool) -> None """Update the progress bar :param blobxfer.models.options.General go: general options :param str optext: operation prefix text :param datetime.datetime start: start time :param int total_files: total number of files :param int files_sofar: files transfered so far :param int total_bytes: total number of bytes :param int bytes_sofar: bytes transferred so far :param bool stdin_upload: stdin upload """ if (go.quiet or not go.progress_bar or blobxfer.util.is_none_or_empty(go.log_file) or start is None): return diff = (blobxfer.util.datetime_now() - start).total_seconds() if diff <= 0: # arbitrarily give a small delta diff = 1e-9 if total_bytes is None or total_bytes == 0 or bytes_sofar > total_bytes: done = 0 else: done = float(bytes_sofar) / total_bytes rate = bytes_sofar / blobxfer.util.MEGABYTE / diff if optext == 'synccopy': rtext = 'sync-copied' else: rtext = optext + 'ed' if total_files is None: fprog = 'n/a' else: fprog = '{}/{}'.format(files_sofar, total_files) if stdin_upload: sys.stdout.write( ('\r{0} progress: [{1:30s}] n/a % {2:12.3f} MiB/sec, ' '{3} {4}').format( optext, '>' * int(total_bytes % 30), rate, fprog, rtext) ) else: sys.stdout.write( ('\r{0} progress: [{1:30s}] {2:.2f}% {3:12.3f} MiB/sec, ' '{4} {5}').format( optext, '>' * int(done * 30), done * 100, rate, fprog, rtext) ) if files_sofar == total_files: sys.stdout.write('\n') sys.stdout.flush()
[ "def", "update_progress_bar", "(", "go", ",", "optext", ",", "start", ",", "total_files", ",", "files_sofar", ",", "total_bytes", ",", "bytes_sofar", ",", "stdin_upload", "=", "False", ")", ":", "# type: (blobxfer.models.options.General, str, datetime.datetime, int,", "...
Update the progress bar :param blobxfer.models.options.General go: general options :param str optext: operation prefix text :param datetime.datetime start: start time :param int total_files: total number of files :param int files_sofar: files transfered so far :param int total_bytes: total number of bytes :param int bytes_sofar: bytes transferred so far :param bool stdin_upload: stdin upload
[ "Update", "the", "progress", "bar", ":", "param", "blobxfer", ".", "models", ".", "options", ".", "General", "go", ":", "general", "options", ":", "param", "str", "optext", ":", "operation", "prefix", "text", ":", "param", "datetime", ".", "datetime", "sta...
python
train
ev3dev/ev3dev-lang-python
ev3dev2/led.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/led.py#L413-L457
def animate_police_lights(self, color1, color2, group1='LEFT', group2='RIGHT', sleeptime=0.5, duration=5, block=True): """ Cycle the ``group1`` and ``group2`` LEDs between ``color1`` and ``color2`` to give the effect of police lights. Alternate the ``group1`` and ``group2`` LEDs every ``sleeptime`` seconds. Animate for ``duration`` seconds. If ``duration`` is None animate for forever. Example: .. code-block:: python from ev3dev2.led import Leds leds = Leds() leds.animate_police_lights('RED', 'GREEN', sleeptime=0.75, duration=10) """ def _animate_police_lights(): self.all_off() even = True start_time = dt.datetime.now() while True: if even: self.set_color(group1, color1) self.set_color(group2, color2) else: self.set_color(group1, color2) self.set_color(group2, color1) if self.animate_thread_stop or duration_expired(start_time, duration): break even = not even sleep(sleeptime) self.animate_thread_stop = False self.animate_thread_id = None self.animate_stop() if block: _animate_police_lights() else: self.animate_thread_id = _thread.start_new_thread(_animate_police_lights, ())
[ "def", "animate_police_lights", "(", "self", ",", "color1", ",", "color2", ",", "group1", "=", "'LEFT'", ",", "group2", "=", "'RIGHT'", ",", "sleeptime", "=", "0.5", ",", "duration", "=", "5", ",", "block", "=", "True", ")", ":", "def", "_animate_police_...
Cycle the ``group1`` and ``group2`` LEDs between ``color1`` and ``color2`` to give the effect of police lights. Alternate the ``group1`` and ``group2`` LEDs every ``sleeptime`` seconds. Animate for ``duration`` seconds. If ``duration`` is None animate for forever. Example: .. code-block:: python from ev3dev2.led import Leds leds = Leds() leds.animate_police_lights('RED', 'GREEN', sleeptime=0.75, duration=10)
[ "Cycle", "the", "group1", "and", "group2", "LEDs", "between", "color1", "and", "color2", "to", "give", "the", "effect", "of", "police", "lights", ".", "Alternate", "the", "group1", "and", "group2", "LEDs", "every", "sleeptime", "seconds", "." ]
python
train
project-ncl/pnc-cli
pnc_cli/buildconfigurations.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigurations.py#L21-L30
def get_build_configuration_id_by_name(name): """ Returns the id of the build configuration matching name :param name: name of build configuration :return: id of the matching build configuration, or None if no match found """ response = utils.checked_api_call(pnc_api.build_configs, 'get_all', q='name==' + name).content if not response: return None return response[0].id
[ "def", "get_build_configuration_id_by_name", "(", "name", ")", ":", "response", "=", "utils", ".", "checked_api_call", "(", "pnc_api", ".", "build_configs", ",", "'get_all'", ",", "q", "=", "'name=='", "+", "name", ")", ".", "content", "if", "not", "response",...
Returns the id of the build configuration matching name :param name: name of build configuration :return: id of the matching build configuration, or None if no match found
[ "Returns", "the", "id", "of", "the", "build", "configuration", "matching", "name", ":", "param", "name", ":", "name", "of", "build", "configuration", ":", "return", ":", "id", "of", "the", "matching", "build", "configuration", "or", "None", "if", "no", "ma...
python
train
soravux/scoop
scoop/_comm/scoopzmq.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/_comm/scoopzmq.py#L399-L426
def _sendReply(self, destination, fid, *args): """Send a REPLY directly to its destination. If it doesn't work, launch it back to the broker.""" # Try to send the result directly to its parent self.addPeer(destination) try: self.direct_socket.send_multipart([ destination, REPLY, ] + list(args), flags=zmq.NOBLOCK) except zmq.error.ZMQError as e: # Fallback on Broker routing if no direct connection possible scoop.logger.debug( "{0}: Could not send result directly to peer {1}, routing through " "broker.".format(scoop.worker, destination) ) self.socket.send_multipart([ REPLY, ] + list(args) + [ destination, ]) self.socket.send_multipart([ STATUS_DONE, fid, ])
[ "def", "_sendReply", "(", "self", ",", "destination", ",", "fid", ",", "*", "args", ")", ":", "# Try to send the result directly to its parent", "self", ".", "addPeer", "(", "destination", ")", "try", ":", "self", ".", "direct_socket", ".", "send_multipart", "("...
Send a REPLY directly to its destination. If it doesn't work, launch it back to the broker.
[ "Send", "a", "REPLY", "directly", "to", "its", "destination", ".", "If", "it", "doesn", "t", "work", "launch", "it", "back", "to", "the", "broker", "." ]
python
train
apache/incubator-mxnet
python/mxnet/model.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L908-L928
def save(self, prefix, epoch=None): """Checkpoint the model checkpoint into file. You can also use `pickle` to do the job if you only work on Python. The advantage of `load` and `save` (as compared to `pickle`) is that the resulting file can be loaded from other MXNet language bindings. One can also directly `load`/`save` from/to cloud storage(S3, HDFS) Parameters ---------- prefix : str Prefix of model name. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters. """ if epoch is None: epoch = self.num_epoch assert epoch is not None save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
[ "def", "save", "(", "self", ",", "prefix", ",", "epoch", "=", "None", ")", ":", "if", "epoch", "is", "None", ":", "epoch", "=", "self", ".", "num_epoch", "assert", "epoch", "is", "not", "None", "save_checkpoint", "(", "prefix", ",", "epoch", ",", "se...
Checkpoint the model checkpoint into file. You can also use `pickle` to do the job if you only work on Python. The advantage of `load` and `save` (as compared to `pickle`) is that the resulting file can be loaded from other MXNet language bindings. One can also directly `load`/`save` from/to cloud storage(S3, HDFS) Parameters ---------- prefix : str Prefix of model name. Notes ----- - ``prefix-symbol.json`` will be saved for symbol. - ``prefix-epoch.params`` will be saved for parameters.
[ "Checkpoint", "the", "model", "checkpoint", "into", "file", ".", "You", "can", "also", "use", "pickle", "to", "do", "the", "job", "if", "you", "only", "work", "on", "Python", ".", "The", "advantage", "of", "load", "and", "save", "(", "as", "compared", ...
python
train
joke2k/faker
faker/providers/ssn/it_IT/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/ssn/it_IT/__init__.py#L18-L24
def checksum(value): """ Calculates the checksum char used for the 16th char. Author: Vincenzo Palazzo """ return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]] for index, char in enumerate(value)) % 26)
[ "def", "checksum", "(", "value", ")", ":", "return", "chr", "(", "65", "+", "sum", "(", "CHECKSUM_TABLE", "[", "index", "%", "2", "]", "[", "ALPHANUMERICS_DICT", "[", "char", "]", "]", "for", "index", ",", "char", "in", "enumerate", "(", "value", ")"...
Calculates the checksum char used for the 16th char. Author: Vincenzo Palazzo
[ "Calculates", "the", "checksum", "char", "used", "for", "the", "16th", "char", ".", "Author", ":", "Vincenzo", "Palazzo" ]
python
train
rytilahti/python-songpal
songpal/method.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/method.py#L107-L115
def asdict(self) -> Dict[str, Union[Dict, Union[str, Dict]]]: """Return a dictionary describing the method. This can be used to dump the information into a JSON file. """ return { "service": self.service.name, **self.signature.serialize(), }
[ "def", "asdict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Union", "[", "Dict", ",", "Union", "[", "str", ",", "Dict", "]", "]", "]", ":", "return", "{", "\"service\"", ":", "self", ".", "service", ".", "name", ",", "*", "*", "self", "....
Return a dictionary describing the method. This can be used to dump the information into a JSON file.
[ "Return", "a", "dictionary", "describing", "the", "method", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L255-L270
def imagetransformer_cifar10_base(): """Best config for 2.90 bits/dim on CIFAR10 using cross entropy.""" hparams = image_transformer_base() hparams.batch_size = 4 hparams.num_heads = 4 hparams.num_decoder_layers = 12 hparams.block_length = 256 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.learning_rate = 0.5 hparams.learning_rate_warmup_steps = 4000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.3 hparams.unconditional = True return hparams
[ "def", "imagetransformer_cifar10_base", "(", ")", ":", "hparams", "=", "image_transformer_base", "(", ")", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "num_heads", "=", "4", "hparams", ".", "num_decoder_layers", "=", "12", "hparams", ".", "block_leng...
Best config for 2.90 bits/dim on CIFAR10 using cross entropy.
[ "Best", "config", "for", "2", ".", "90", "bits", "/", "dim", "on", "CIFAR10", "using", "cross", "entropy", "." ]
python
train
mpg-age-bioinformatics/AGEpy
AGEpy/cytoscape.py
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/cytoscape.py#L462-L504
def result(filetype="PNG",saveas=None, host=cytoscape_host,port=cytoscape_port): """ Checks the current network. Note: works only on localhost :param filetype: file type, default="PNG" :param saveas: /path/to/non/tmp/file.prefix :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :returns: an image """ sleep(1) def MAKETMP(): (fd, tmp_file) = tempfile.mkstemp() tmp_file="/tmp/"+tmp_file.split("/")[-1] return tmp_file outfile=MAKETMP() extensions={"PNG":".png","PDF":".pdf","CYS":".cys","CYJS":".cyjs"} ext=extensions[filetype] response=cytoscape("view","fit content",host=host,port=port) response=cytoscape("view", "export" , \ {"options":filetype,\ "OutputFile":outfile},\ host=host,port=port) if host!='localhost': ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host) ftp_client=ssh.open_sftp() ftp_client.get(outfile+ext,outfile+ext) ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("rm "+outfile+ext ) img = WImage(filename=outfile+ext) if saveas: copyfile(outfile+ext,saveas) os.remove(outfile+ext) return img
[ "def", "result", "(", "filetype", "=", "\"PNG\"", ",", "saveas", "=", "None", ",", "host", "=", "cytoscape_host", ",", "port", "=", "cytoscape_port", ")", ":", "sleep", "(", "1", ")", "def", "MAKETMP", "(", ")", ":", "(", "fd", ",", "tmp_file", ")", ...
Checks the current network. Note: works only on localhost :param filetype: file type, default="PNG" :param saveas: /path/to/non/tmp/file.prefix :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :returns: an image
[ "Checks", "the", "current", "network", ".", "Note", ":", "works", "only", "on", "localhost", ":", "param", "filetype", ":", "file", "type", "default", "=", "PNG", ":", "param", "saveas", ":", "/", "path", "/", "to", "/", "non", "/", "tmp", "/", "file...
python
train
saltstack/salt
salt/modules/dockermod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockermod.py#L1509-L1548
def diff(name): ''' Get information on changes made to container's filesystem since it was created. Equivalent to running the ``docker diff`` Docker CLI command. name Container name or ID **RETURN DATA** A dictionary containing any of the following keys: - ``Added`` - A list of paths that were added. - ``Changed`` - A list of paths that were changed. - ``Deleted`` - A list of paths that were deleted. These keys will only be present if there were changes, so if the container has no differences the return dict will be empty. CLI Example: .. code-block:: bash salt myminion docker.diff mycontainer ''' changes = _client_wrapper('diff', name) kind_map = {0: 'Changed', 1: 'Added', 2: 'Deleted'} ret = {} for change in changes: key = kind_map.get(change['Kind'], 'Unknown') ret.setdefault(key, []).append(change['Path']) if 'Unknown' in ret: log.error( 'Unknown changes detected in docker.diff of container %s. ' 'This is probably due to a change in the Docker API. Please ' 'report this to the SaltStack developers', name ) return ret
[ "def", "diff", "(", "name", ")", ":", "changes", "=", "_client_wrapper", "(", "'diff'", ",", "name", ")", "kind_map", "=", "{", "0", ":", "'Changed'", ",", "1", ":", "'Added'", ",", "2", ":", "'Deleted'", "}", "ret", "=", "{", "}", "for", "change",...
Get information on changes made to container's filesystem since it was created. Equivalent to running the ``docker diff`` Docker CLI command. name Container name or ID **RETURN DATA** A dictionary containing any of the following keys: - ``Added`` - A list of paths that were added. - ``Changed`` - A list of paths that were changed. - ``Deleted`` - A list of paths that were deleted. These keys will only be present if there were changes, so if the container has no differences the return dict will be empty. CLI Example: .. code-block:: bash salt myminion docker.diff mycontainer
[ "Get", "information", "on", "changes", "made", "to", "container", "s", "filesystem", "since", "it", "was", "created", ".", "Equivalent", "to", "running", "the", "docker", "diff", "Docker", "CLI", "command", "." ]
python
train
RPi-Distro/python-gpiozero
gpiozero/output_devices.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/output_devices.py#L123-L132
def toggle(self): """ Reverse the state of the device. If it's on, turn it off; if it's off, turn it on. """ with self._lock: if self.is_active: self.off() else: self.on()
[ "def", "toggle", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "is_active", ":", "self", ".", "off", "(", ")", "else", ":", "self", ".", "on", "(", ")" ]
Reverse the state of the device. If it's on, turn it off; if it's off, turn it on.
[ "Reverse", "the", "state", "of", "the", "device", ".", "If", "it", "s", "on", "turn", "it", "off", ";", "if", "it", "s", "off", "turn", "it", "on", "." ]
python
train
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1726-L1732
def ms_cmap_restore(self, viewer, event, data_x, data_y, msg=True): """An interactive way to restore the colormap settings after a rotate or invert operation. """ if self.cancmap and (event.state == 'down'): self.restore_colormap(viewer, msg) return True
[ "def", "ms_cmap_restore", "(", "self", ",", "viewer", ",", "event", ",", "data_x", ",", "data_y", ",", "msg", "=", "True", ")", ":", "if", "self", ".", "cancmap", "and", "(", "event", ".", "state", "==", "'down'", ")", ":", "self", ".", "restore_colo...
An interactive way to restore the colormap settings after a rotate or invert operation.
[ "An", "interactive", "way", "to", "restore", "the", "colormap", "settings", "after", "a", "rotate", "or", "invert", "operation", "." ]
python
train
opentracing-contrib/python-flask
flask_opentracing/tracing.py
https://github.com/opentracing-contrib/python-flask/blob/74bfe8bcd00eee9ce75a15c1634fda4c5d5f26ca/flask_opentracing/tracing.py#L66-L93
def trace(self, *attributes): """ Function decorator that traces functions NOTE: Must be placed after the @app.route decorator @param attributes any number of flask.Request attributes (strings) to be set as tags on the created span """ def decorator(f): def wrapper(*args, **kwargs): if self._trace_all_requests: return f(*args, **kwargs) self._before_request_fn(list(attributes)) try: r = f(*args, **kwargs) self._after_request_fn() except Exception as e: self._after_request_fn(error=e) raise self._after_request_fn() return r wrapper.__name__ = f.__name__ return wrapper return decorator
[ "def", "trace", "(", "self", ",", "*", "attributes", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_trace_all_requests", ":", "return", "f", "(", "*", ...
Function decorator that traces functions NOTE: Must be placed after the @app.route decorator @param attributes any number of flask.Request attributes (strings) to be set as tags on the created span
[ "Function", "decorator", "that", "traces", "functions" ]
python
train
gouthambs/Flask-Blogging
flask_blogging/storage.py
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/storage.py#L9-L43
def save_post(self, title, text, user_id, tags, draft=False, post_date=None, last_modified_date=None, meta_data=None, post_id=None): """ Persist the blog post data. If ``post_id`` is ``None`` or ``post_id`` is invalid, the post must be inserted into the storage. If ``post_id`` is a valid id, then the data must be updated. :param title: The title of the blog post :type title: str :param text: The text of the blog post :type text: str :param user_id: The user identifier :type user_id: str :param tags: A list of tags :type tags: list :param draft: If the post is a draft of if needs to be published. :type draft: bool :param post_date: (Optional) The date the blog was posted (default datetime.datetime.utcnow()) :type post_date: datetime.datetime :param last_modified_date: (Optional) The date when blog was last modified (default datetime.datetime.utcnow()) :type last_modified_date: datetime.datetime :param meta_data: The meta data for the blog post :type meta_data: dict :param post_id: The post identifier. This should be ``None`` for an insert call, and a valid value for update. :type post_id: int :return: The post_id value, in case of a successful insert or update. Return ``None`` if there were errors. """ raise NotImplementedError("This method needs to be implemented by " "the inheriting class")
[ "def", "save_post", "(", "self", ",", "title", ",", "text", ",", "user_id", ",", "tags", ",", "draft", "=", "False", ",", "post_date", "=", "None", ",", "last_modified_date", "=", "None", ",", "meta_data", "=", "None", ",", "post_id", "=", "None", ")",...
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id`` is invalid, the post must be inserted into the storage. If ``post_id`` is a valid id, then the data must be updated. :param title: The title of the blog post :type title: str :param text: The text of the blog post :type text: str :param user_id: The user identifier :type user_id: str :param tags: A list of tags :type tags: list :param draft: If the post is a draft of if needs to be published. :type draft: bool :param post_date: (Optional) The date the blog was posted (default datetime.datetime.utcnow()) :type post_date: datetime.datetime :param last_modified_date: (Optional) The date when blog was last modified (default datetime.datetime.utcnow()) :type last_modified_date: datetime.datetime :param meta_data: The meta data for the blog post :type meta_data: dict :param post_id: The post identifier. This should be ``None`` for an insert call, and a valid value for update. :type post_id: int :return: The post_id value, in case of a successful insert or update. Return ``None`` if there were errors.
[ "Persist", "the", "blog", "post", "data", ".", "If", "post_id", "is", "None", "or", "post_id", "is", "invalid", "the", "post", "must", "be", "inserted", "into", "the", "storage", ".", "If", "post_id", "is", "a", "valid", "id", "then", "the", "data", "m...
python
train
all-umass/graphs
graphs/mixins/viz.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/viz.py#L13-L55
def plot(self, coordinates, directed=False, weighted=False, fig='current', ax=None, edge_style=None, vertex_style=None, title=None, cmap=None): '''Plot the graph using matplotlib in 2 or 3 dimensions. coordinates : (n,2) or (n,3) array of vertex coordinates directed : if True, edges have arrows indicating direction. weighted : if True, edges are colored by their weight. fig : a matplotlib Figure to use, or one of {'new','current'}. Defaults to 'current', which will call gcf(). Only used when ax=None. ax : a matplotlib Axes to use. Defaults to gca() edge_style : string or dict of styles for edges. Defaults to 'k-' vertex_style : string or dict of styles for vertices. Defaults to 'ko' title : string to display as the plot title cmap : a matplotlib Colormap to use for edge weight coloring ''' X = np.atleast_2d(coordinates) assert 0 < X.shape[1] <= 3, 'too many dimensions to plot' if X.shape[1] == 1: X = np.column_stack((np.arange(X.shape[0]), X)) is_3d = (X.shape[1] == 3) if ax is None: ax = _get_axis(is_3d, fig) edge_kwargs = dict(colors='k', linestyles='-', linewidths=1, zorder=1) vertex_kwargs = dict(marker='o', c='k', s=20, edgecolor='none', zorder=2) if edge_style is not None: if not isinstance(edge_style, dict): edge_style = _parse_fmt(edge_style, color_key='colors') edge_kwargs.update(edge_style) if vertex_style is not None: if not isinstance(vertex_style, dict): vertex_style = _parse_fmt(vertex_style, color_key='c') vertex_kwargs.update(vertex_style) if weighted and self.is_weighted(): edge_kwargs['array'] = self.edge_weights() if directed and self.is_directed(): _directed_edges(self, X, ax, is_3d, edge_kwargs, cmap) else: _undirected_edges(self, X, ax, is_3d, edge_kwargs, cmap) ax.scatter(*X.T, **vertex_kwargs) ax.autoscale_view() if title: ax.set_title(title) return pyplot.show
[ "def", "plot", "(", "self", ",", "coordinates", ",", "directed", "=", "False", ",", "weighted", "=", "False", ",", "fig", "=", "'current'", ",", "ax", "=", "None", ",", "edge_style", "=", "None", ",", "vertex_style", "=", "None", ",", "title", "=", "...
Plot the graph using matplotlib in 2 or 3 dimensions. coordinates : (n,2) or (n,3) array of vertex coordinates directed : if True, edges have arrows indicating direction. weighted : if True, edges are colored by their weight. fig : a matplotlib Figure to use, or one of {'new','current'}. Defaults to 'current', which will call gcf(). Only used when ax=None. ax : a matplotlib Axes to use. Defaults to gca() edge_style : string or dict of styles for edges. Defaults to 'k-' vertex_style : string or dict of styles for vertices. Defaults to 'ko' title : string to display as the plot title cmap : a matplotlib Colormap to use for edge weight coloring
[ "Plot", "the", "graph", "using", "matplotlib", "in", "2", "or", "3", "dimensions", "." ]
python
train
quantopian/zipline
zipline/pipeline/factors/factor.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1393-L1404
def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
[ "def", "_compute", "(", "self", ",", "arrays", ",", "dates", ",", "assets", ",", "mask", ")", ":", "return", "masked_rankdata_2d", "(", "arrays", "[", "0", "]", ",", "mask", ",", "self", ".", "inputs", "[", "0", "]", ".", "missing_value", ",", "self"...
For each row in the input, compute a like-shaped array of per-row ranks.
[ "For", "each", "row", "in", "the", "input", "compute", "a", "like", "-", "shaped", "array", "of", "per", "-", "row", "ranks", "." ]
python
train
spdx/tools-python
spdx/parsers/tagvaluebuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L408-L414
def reset_annotations(self): """Resets the builder's state to allow building new annotations.""" # FIXME: this state does not make sense self.annotation_date_set = False self.annotation_comment_set = False self.annotation_type_set = False self.annotation_spdx_id_set = False
[ "def", "reset_annotations", "(", "self", ")", ":", "# FIXME: this state does not make sense", "self", ".", "annotation_date_set", "=", "False", "self", ".", "annotation_comment_set", "=", "False", "self", ".", "annotation_type_set", "=", "False", "self", ".", "annotat...
Resets the builder's state to allow building new annotations.
[ "Resets", "the", "builder", "s", "state", "to", "allow", "building", "new", "annotations", "." ]
python
valid
chaoss/grimoirelab-perceval
perceval/backends/core/gitlab.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/gitlab.py#L481-L492
def notes(self, item_type, item_id): """Get the notes from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.NOTES) return self.fetch_items(path, payload)
[ "def", "notes", "(", "self", ",", "item_type", ",", "item_id", ")", ":", "payload", "=", "{", "'order_by'", ":", "'updated_at'", ",", "'sort'", ":", "'asc'", ",", "'per_page'", ":", "PER_PAGE", "}", "path", "=", "urijoin", "(", "item_type", ",", "str", ...
Get the notes from pagination
[ "Get", "the", "notes", "from", "pagination" ]
python
test
StackStorm/pybind
pybind/nos/v6_0_2f/protocol/spanning_tree/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/protocol/spanning_tree/__init__.py#L166-L187
def _set_pvst(self, v, load=False): """ Setter method for pvst, mapped from YANG variable /protocol/spanning_tree/pvst (container) If this variable is read-only (config: false) in the source YANG file, then _set_pvst is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pvst() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=pvst.pvst, is_container='container', presence=True, yang_name="pvst", rest_name="pvst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'callpoint': u'pvst-config', u'info': u'PVST spanning-tree', u'display-when': u'not ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """pvst must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=pvst.pvst, is_container='container', presence=True, yang_name="pvst", rest_name="pvst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'callpoint': u'pvst-config', u'info': u'PVST spanning-tree', u'display-when': u'not ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/mstp) or (/protocol/spanning-tree/rpvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""", }) self.__pvst = t if hasattr(self, '_set'): self._set()
[ "def", "_set_pvst", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for pvst, mapped from YANG variable /protocol/spanning_tree/pvst (container) If this variable is read-only (config: false) in the source YANG file, then _set_pvst is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_pvst() directly.
[ "Setter", "method", "for", "pvst", "mapped", "from", "YANG", "variable", "/", "protocol", "/", "spanning_tree", "/", "pvst", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "sourc...
python
train
influxdata/influxdb-python
influxdb/influxdb08/helper.py
https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/influxdb08/helper.py#L136-L148
def _json_body_(cls): """Return JSON body of the datapoints. :return: JSON body of the datapoints. """ json = [] for series_name, data in six.iteritems(cls._datapoints): json.append({'name': series_name, 'columns': cls._fields, 'points': [[getattr(point, k) for k in cls._fields] for point in data] }) return json
[ "def", "_json_body_", "(", "cls", ")", ":", "json", "=", "[", "]", "for", "series_name", ",", "data", "in", "six", ".", "iteritems", "(", "cls", ".", "_datapoints", ")", ":", "json", ".", "append", "(", "{", "'name'", ":", "series_name", ",", "'colum...
Return JSON body of the datapoints. :return: JSON body of the datapoints.
[ "Return", "JSON", "body", "of", "the", "datapoints", "." ]
python
train
MacHu-GWU/angora-project
angora/crawler/simplecrawler.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L185-L192
def html_with_encoding(self, url, timeout=None, encoding="utf-8"): """Manually get html with user encoding setting. """ response = self.get_response(url, timeout=timeout) if response: return self.decoder.decode(response.content, encoding)[0] else: return None
[ "def", "html_with_encoding", "(", "self", ",", "url", ",", "timeout", "=", "None", ",", "encoding", "=", "\"utf-8\"", ")", ":", "response", "=", "self", ".", "get_response", "(", "url", ",", "timeout", "=", "timeout", ")", "if", "response", ":", "return"...
Manually get html with user encoding setting.
[ "Manually", "get", "html", "with", "user", "encoding", "setting", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_cmdlong.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_cmdlong.py#L316-L344
def cmd_long(self, args): '''execute supplied command long''' if len(args) < 1: print("Usage: long <command> [arg1] [arg2]...") return command = None if args[0].isdigit(): command = int(args[0]) else: try: command = eval("mavutil.mavlink." + args[0]) except AttributeError as e: try: command = eval("mavutil.mavlink.MAV_CMD_" + args[0]) except AttributeError as e: pass if command is None: print("Unknown command long ({0})".format(args[0])) return floating_args = [ float(x) for x in args[1:] ] while len(floating_args) < 7: floating_args.append(float(0)) self.master.mav.command_long_send(self.settings.target_system, self.settings.target_component, command, 0, *floating_args)
[ "def", "cmd_long", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "1", ":", "print", "(", "\"Usage: long <command> [arg1] [arg2]...\"", ")", "return", "command", "=", "None", "if", "args", "[", "0", "]", ".", "isdigit", "(", ")...
execute supplied command long
[ "execute", "supplied", "command", "long" ]
python
train
mitsei/dlkit
dlkit/json_/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L628-L643
def get_objective_admin_session(self): """Gets the ``OsidSession`` associated with the objective administration service. return: (osid.learning.ObjectiveAdminSession) - an ``ObjectiveAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_admin()`` is ``true``.* """ if not self.supports_objective_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ObjectiveAdminSession(runtime=self._runtime)
[ "def", "get_objective_admin_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_objective_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ObjectiveAdminSession", "(...
Gets the ``OsidSession`` associated with the objective administration service. return: (osid.learning.ObjectiveAdminSession) - an ``ObjectiveAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_admin()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "objective", "administration", "service", "." ]
python
train
genialis/django-priority-batch
src/django_priority_batch/prioritized_batcher.py
https://github.com/genialis/django-priority-batch/blob/63da74ef7348a67b7e31a131f295f51511495f30/src/django_priority_batch/prioritized_batcher.py#L33-L43
def global_instance(cls): """Return a per-thread global batcher instance.""" try: return GLOBAL_BATCHER.instance except AttributeError: instance = PrioritizedBatcher( **getattr(settings, 'PRIORITIZED_BATCHER', {}) ) GLOBAL_BATCHER.instance = instance return instance
[ "def", "global_instance", "(", "cls", ")", ":", "try", ":", "return", "GLOBAL_BATCHER", ".", "instance", "except", "AttributeError", ":", "instance", "=", "PrioritizedBatcher", "(", "*", "*", "getattr", "(", "settings", ",", "'PRIORITIZED_BATCHER'", ",", "{", ...
Return a per-thread global batcher instance.
[ "Return", "a", "per", "-", "thread", "global", "batcher", "instance", "." ]
python
train
log2timeline/plaso
plaso/multi_processing/task_manager.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/task_manager.py#L511-L519
def SampleTaskStatus(self, task, status): """Takes a sample of the status of the task for profiling. Args: task (Task): a task. status (str): status. """ if self._tasks_profiler: self._tasks_profiler.Sample(task, status)
[ "def", "SampleTaskStatus", "(", "self", ",", "task", ",", "status", ")", ":", "if", "self", ".", "_tasks_profiler", ":", "self", ".", "_tasks_profiler", ".", "Sample", "(", "task", ",", "status", ")" ]
Takes a sample of the status of the task for profiling. Args: task (Task): a task. status (str): status.
[ "Takes", "a", "sample", "of", "the", "status", "of", "the", "task", "for", "profiling", "." ]
python
train
bhmm/bhmm
bhmm/estimators/bayesian_sampling.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/bayesian_sampling.py#L279-L287
def _updateHiddenStateTrajectories(self): """Sample a new set of state trajectories from the conditional distribution P(S | T, E, O) """ self.model.hidden_state_trajectories = list() for trajectory_index in range(self.nobs): hidden_state_trajectory = self._sampleHiddenStateTrajectory(self.observations[trajectory_index]) self.model.hidden_state_trajectories.append(hidden_state_trajectory) return
[ "def", "_updateHiddenStateTrajectories", "(", "self", ")", ":", "self", ".", "model", ".", "hidden_state_trajectories", "=", "list", "(", ")", "for", "trajectory_index", "in", "range", "(", "self", ".", "nobs", ")", ":", "hidden_state_trajectory", "=", "self", ...
Sample a new set of state trajectories from the conditional distribution P(S | T, E, O)
[ "Sample", "a", "new", "set", "of", "state", "trajectories", "from", "the", "conditional", "distribution", "P", "(", "S", "|", "T", "E", "O", ")" ]
python
train
disqus/nydus
nydus/db/routers/base.py
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L175-L184
def check_down_connections(self): """ Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up. """ now = time.time() for db_num, marked_down_at in self._down_connections.items(): if marked_down_at + self.retry_timeout <= now: self.mark_connection_up(db_num)
[ "def", "check_down_connections", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "for", "db_num", ",", "marked_down_at", "in", "self", ".", "_down_connections", ".", "items", "(", ")", ":", "if", "marked_down_at", "+", "self", ".", "ret...
Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up.
[ "Iterates", "through", "all", "connections", "which", "were", "previously", "listed", "as", "unavailable", "and", "marks", "any", "that", "have", "expired", "their", "retry_timeout", "as", "being", "up", "." ]
python
train
nugget/python-insteonplm
insteonplm/messagecallback.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messagecallback.py#L83-L105
def remove(self, msg, callback): """Remove a callback from the callback list. msg: Message template callback: Callback method to remove. If callback is None, all callbacks for the message template are removed. """ if callback is None: self._dict.pop(msg, None) else: cb = self._dict.get(msg, []) try: cb.remove(callback) except ValueError: pass if cb: _LOGGER.debug('%d callbacks for message: %s', len(cb), msg) self.add(msg, cb, True) else: self._dict.pop(msg, None) _LOGGER.debug('Removed all callbacks for message: %s', msg)
[ "def", "remove", "(", "self", ",", "msg", ",", "callback", ")", ":", "if", "callback", "is", "None", ":", "self", ".", "_dict", ".", "pop", "(", "msg", ",", "None", ")", "else", ":", "cb", "=", "self", ".", "_dict", ".", "get", "(", "msg", ",",...
Remove a callback from the callback list. msg: Message template callback: Callback method to remove. If callback is None, all callbacks for the message template are removed.
[ "Remove", "a", "callback", "from", "the", "callback", "list", "." ]
python
train
erdc/RAPIDpy
RAPIDpy/dataset.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/dataset.py#L948-L1077
def write_flows_to_gssha_time_series_xys(self, path_to_output_file, series_name, series_id, river_index=None, river_id=None, date_search_start=None, date_search_end=None, daily=False, filter_mode="mean"): """ Write out RAPID output to GSSHA WMS time series xys file. Parameters ---------- path_to_output_file: str Path to the output xys file. series_name: str The name for the series. series_id: int The ID to give the series. river_index: :obj:`datetime.datetime`, optional This is the index of the river in the file you want the streamflow for. river_id: :obj:`datetime.datetime`, optional This is the river ID that you want the streamflow for. date_search_start: :obj:`datetime.datetime`, optional This is a datetime object with the date of the minimum date for starting. date_search_end: :obj:`datetime.datetime`, optional This is a datetime object with the date of the maximum date for ending. daily: bool, optional If True and the file is CF-Compliant, write out daily flows. filter_mode: str, optional You can get the daily average "mean" or the maximum "max". Defauls is "mean". Example writing entire time series to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_{0}.xys'.format(river_id), series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_id=river_id) Example writing entire time series as daily average to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can write out daily average qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, daily=True) Example writing subset of time series as daily maximum to file: .. code:: python from datetime import datetime from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can filter by date and # get daily values qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily_date_filter.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, date_search_start=datetime(2002, 8, 31), date_search_end=datetime(2002, 9, 15), daily=True, filter_mode="max") """ if river_id is not None: river_index = self.get_river_index(river_id) elif river_id is None and river_index is None: raise ValueError(" Need reach id or reach index ...") self.raise_time_valid() # analyze and write qout_df = self.get_qout_index(river_index, date_search_start=date_search_start, date_search_end=date_search_end, daily=daily, filter_mode=filter_mode, as_dataframe=True) with open_csv(path_to_output_file, 'w') as out_ts: out_ts.write("XYS {0} {1} \"{2}\"\r\n".format(series_id, len(qout_df.index), series_name)) for index, pd_row in qout_df.iterrows(): date_str = index.strftime("%m/%d/%Y %I:%M:%S %p") out_ts.write("\"{0}\" {1:.5f}\n".format(date_str, pd_row[0]))
[ "def", "write_flows_to_gssha_time_series_xys", "(", "self", ",", "path_to_output_file", ",", "series_name", ",", "series_id", ",", "river_index", "=", "None", ",", "river_id", "=", "None", ",", "date_search_start", "=", "None", ",", "date_search_end", "=", "None", ...
Write out RAPID output to GSSHA WMS time series xys file. Parameters ---------- path_to_output_file: str Path to the output xys file. series_name: str The name for the series. series_id: int The ID to give the series. river_index: :obj:`datetime.datetime`, optional This is the index of the river in the file you want the streamflow for. river_id: :obj:`datetime.datetime`, optional This is the river ID that you want the streamflow for. date_search_start: :obj:`datetime.datetime`, optional This is a datetime object with the date of the minimum date for starting. date_search_end: :obj:`datetime.datetime`, optional This is a datetime object with the date of the maximum date for ending. daily: bool, optional If True and the file is CF-Compliant, write out daily flows. filter_mode: str, optional You can get the daily average "mean" or the maximum "max". Defauls is "mean". Example writing entire time series to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_{0}.xys'.format(river_id), series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_id=river_id) Example writing entire time series as daily average to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can write out daily average qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, daily=True) Example writing subset of time series as daily maximum to file: .. code:: python from datetime import datetime from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can filter by date and # get daily values qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily_date_filter.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, date_search_start=datetime(2002, 8, 31), date_search_end=datetime(2002, 9, 15), daily=True, filter_mode="max")
[ "Write", "out", "RAPID", "output", "to", "GSSHA", "WMS", "time", "series", "xys", "file", "." ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L594-L621
def commonancestors(Class, *args): """Generator function to find common ancestors of a particular type for any two or more FoLiA element instances. The function produces all common ancestors of the type specified, starting from the closest one up to the most distant one. Parameters: Class: The type of ancestor to find, should be the :class:`AbstractElement` class or any subclass thereof (not an instance!) *args: The elements to find the common ancestors of, elements are instances derived from :class:`AbstractElement` Yields: instance derived from :class:`AbstractElement`: A common ancestor of the arguments, an instance of the specified ``Class``. """ commonancestors = None #pylint: disable=redefined-outer-name for sibling in args: ancestors = list( sibling.ancestors(Class) ) if commonancestors is None: commonancestors = copy(ancestors) else: removeancestors = [] for a in commonancestors: #pylint: disable=not-an-iterable if not a in ancestors: removeancestors.append(a) for a in removeancestors: commonancestors.remove(a) if commonancestors: for commonancestor in commonancestors: yield commonancestor
[ "def", "commonancestors", "(", "Class", ",", "*", "args", ")", ":", "commonancestors", "=", "None", "#pylint: disable=redefined-outer-name", "for", "sibling", "in", "args", ":", "ancestors", "=", "list", "(", "sibling", ".", "ancestors", "(", "Class", ")", ")"...
Generator function to find common ancestors of a particular type for any two or more FoLiA element instances. The function produces all common ancestors of the type specified, starting from the closest one up to the most distant one. Parameters: Class: The type of ancestor to find, should be the :class:`AbstractElement` class or any subclass thereof (not an instance!) *args: The elements to find the common ancestors of, elements are instances derived from :class:`AbstractElement` Yields: instance derived from :class:`AbstractElement`: A common ancestor of the arguments, an instance of the specified ``Class``.
[ "Generator", "function", "to", "find", "common", "ancestors", "of", "a", "particular", "type", "for", "any", "two", "or", "more", "FoLiA", "element", "instances", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/groupsio.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/groupsio.py#L271-L294
def setup_cmd_parser(cls): """Returns the Groupsio argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Optional arguments group = parser.parser.add_argument_group('Groupsio arguments') group.add_argument('--mboxes-path', dest='mboxes_path', help="Path where mbox files will be stored") group.add_argument('--no-verify', dest='verify', action='store_false', help="Value 'True' enable SSL verification") # Required arguments parser.parser.add_argument('group_name', help="Name of the group on Groups.io") return parser
[ "def", "setup_cmd_parser", "(", "cls", ")", ":", "parser", "=", "BackendCommandArgumentParser", "(", "cls", ".", "BACKEND", ".", "CATEGORIES", ",", "from_date", "=", "True", ",", "token_auth", "=", "True", ")", "# Backend token is required", "action", "=", "pars...
Returns the Groupsio argument parser.
[ "Returns", "the", "Groupsio", "argument", "parser", "." ]
python
test
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L665-L672
def _iter_descendants_levelorder(self, is_leaf_fn=None): """ Iterate over all desdecendant nodes.""" tovisit = deque([self]) while len(tovisit) > 0: node = tovisit.popleft() yield node if not is_leaf_fn or not is_leaf_fn(node): tovisit.extend(node.children)
[ "def", "_iter_descendants_levelorder", "(", "self", ",", "is_leaf_fn", "=", "None", ")", ":", "tovisit", "=", "deque", "(", "[", "self", "]", ")", "while", "len", "(", "tovisit", ")", ">", "0", ":", "node", "=", "tovisit", ".", "popleft", "(", ")", "...
Iterate over all desdecendant nodes.
[ "Iterate", "over", "all", "desdecendant", "nodes", "." ]
python
train
LettError/MutatorMath
Lib/mutatorMath/objects/mutator.py
https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/objects/mutator.py#L153-L163
def _collectOffAxisPoints(self): """ Return a dictionary with all off-axis locations. """ offAxis = {} for l, (value, deltaName) in self.items(): location = Location(l) name = location.isOnAxis() if name is None or name is False: offAxis[l] = 1 return list(offAxis.keys())
[ "def", "_collectOffAxisPoints", "(", "self", ")", ":", "offAxis", "=", "{", "}", "for", "l", ",", "(", "value", ",", "deltaName", ")", "in", "self", ".", "items", "(", ")", ":", "location", "=", "Location", "(", "l", ")", "name", "=", "location", "...
Return a dictionary with all off-axis locations.
[ "Return", "a", "dictionary", "with", "all", "off", "-", "axis", "locations", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/_pty.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/_pty.py#L61-L77
def slave_open(tty_name): """slave_open(tty_name) -> slave_fd Open the pty slave and acquire the controlling terminal, returning opened filedescriptor. Deprecated, use openpty() instead.""" result = os.open(tty_name, os.O_RDWR) try: from fcntl import ioctl, I_PUSH except ImportError: return result try: ioctl(result, I_PUSH, "ptem") ioctl(result, I_PUSH, "ldterm") except OSError: pass return result
[ "def", "slave_open", "(", "tty_name", ")", ":", "result", "=", "os", ".", "open", "(", "tty_name", ",", "os", ".", "O_RDWR", ")", "try", ":", "from", "fcntl", "import", "ioctl", ",", "I_PUSH", "except", "ImportError", ":", "return", "result", "try", ":...
slave_open(tty_name) -> slave_fd Open the pty slave and acquire the controlling terminal, returning opened filedescriptor. Deprecated, use openpty() instead.
[ "slave_open", "(", "tty_name", ")", "-", ">", "slave_fd", "Open", "the", "pty", "slave", "and", "acquire", "the", "controlling", "terminal", "returning", "opened", "filedescriptor", ".", "Deprecated", "use", "openpty", "()", "instead", "." ]
python
train
rstoneback/pysat
pysat/instruments/pysat_sgp4.py
https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/pysat_sgp4.py#L35-L67
def init(self): """ Adds custom calculations to orbit simulation. This routine is run once, and only once, upon instantiation. Adds quasi-dipole coordiantes, velocity calculation in ECEF coords, adds the attitude vectors of spacecraft assuming x is ram pointing and z is generally nadir, adds ionospheric parameters from the Interational Reference Ionosphere (IRI), as well as simulated winds from the Horiontal Wind Model (HWM). """ self.custom.add(add_quasi_dipole_coordinates, 'modify') self.custom.add(add_aacgm_coordinates, 'modify') self.custom.add(calculate_ecef_velocity, 'modify') self.custom.add(add_sc_attitude_vectors, 'modify') self.custom.add(add_iri_thermal_plasma, 'modify') self.custom.add(add_hwm_winds_and_ecef_vectors, 'modify') self.custom.add(add_igrf, 'modify') # project simulated vectors onto s/c basis # IGRF # create metadata to be added along with vector projection in_meta = {'desc':'IGRF geomagnetic field expressed in the s/c basis.', 'units':'nT'} # project IGRF self.custom.add(project_ecef_vector_onto_sc, 'modify', 'end', 'B_ecef_x', 'B_ecef_y', 'B_ecef_z', 'B_sc_x', 'B_sc_y', 'B_sc_z', meta=[in_meta.copy(), in_meta.copy(), in_meta.copy()]) # project total wind vector self.custom.add(project_hwm_onto_sc, 'modify') # neutral parameters self.custom.add(add_msis, 'modify')
[ "def", "init", "(", "self", ")", ":", "self", ".", "custom", ".", "add", "(", "add_quasi_dipole_coordinates", ",", "'modify'", ")", "self", ".", "custom", ".", "add", "(", "add_aacgm_coordinates", ",", "'modify'", ")", "self", ".", "custom", ".", "add", ...
Adds custom calculations to orbit simulation. This routine is run once, and only once, upon instantiation. Adds quasi-dipole coordiantes, velocity calculation in ECEF coords, adds the attitude vectors of spacecraft assuming x is ram pointing and z is generally nadir, adds ionospheric parameters from the Interational Reference Ionosphere (IRI), as well as simulated winds from the Horiontal Wind Model (HWM).
[ "Adds", "custom", "calculations", "to", "orbit", "simulation", ".", "This", "routine", "is", "run", "once", "and", "only", "once", "upon", "instantiation", ".", "Adds", "quasi", "-", "dipole", "coordiantes", "velocity", "calculation", "in", "ECEF", "coords", "...
python
train
bskinn/opan
opan/vpt2/repo.py
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/vpt2/repo.py#L437-L456
def has_param(self, param): """ .. todo:: has_param docstring """ # Imports from ..error import RepoError # Try to get the param; pass along all errors, except 'data' error # from RepoError retval = True try: self.get_param(param) except RepoError as RErr: if RErr.tc == RErr.DATA: retval = False ## end if ## end try # Should be good to return return retval
[ "def", "has_param", "(", "self", ",", "param", ")", ":", "# Imports", "from", ".", ".", "error", "import", "RepoError", "# Try to get the param; pass along all errors, except 'data' error", "# from RepoError", "retval", "=", "True", "try", ":", "self", ".", "get_para...
.. todo:: has_param docstring
[ "..", "todo", "::", "has_param", "docstring" ]
python
train
ungarj/tilematrix
tilematrix/_funcs.py
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L63-L78
def snap_bounds(bounds=None, tile_pyramid=None, zoom=None, pixelbuffer=0): """ Extend bounds to be aligned with union of tile bboxes. - bounds: (left, bottom, right, top) - tile_pyramid: a TilePyramid object - zoom: target zoom level - pixelbuffer: apply pixelbuffer """ bounds = Bounds(*bounds) validate_zoom(zoom) lb = _tile_from_xy(tile_pyramid, bounds.left, bounds.bottom, zoom, on_edge_use="rt") rt = _tile_from_xy(tile_pyramid, bounds.right, bounds.top, zoom, on_edge_use="lb") left, bottom, _, _ = lb.bounds(pixelbuffer) _, _, right, top = rt.bounds(pixelbuffer) return Bounds(left, bottom, right, top)
[ "def", "snap_bounds", "(", "bounds", "=", "None", ",", "tile_pyramid", "=", "None", ",", "zoom", "=", "None", ",", "pixelbuffer", "=", "0", ")", ":", "bounds", "=", "Bounds", "(", "*", "bounds", ")", "validate_zoom", "(", "zoom", ")", "lb", "=", "_ti...
Extend bounds to be aligned with union of tile bboxes. - bounds: (left, bottom, right, top) - tile_pyramid: a TilePyramid object - zoom: target zoom level - pixelbuffer: apply pixelbuffer
[ "Extend", "bounds", "to", "be", "aligned", "with", "union", "of", "tile", "bboxes", "." ]
python
train
MolSSI-BSE/basis_set_exchange
basis_set_exchange/references.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/references.py#L55-L103
def reference_text(ref): '''Convert a single reference to plain text format Parameters ---------- ref : dict Information about a single reference ''' ref_wrap = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 8) s = '' if ref['type'] == 'unpublished': s += ref_wrap.fill(', '.join(ref['authors'])) + '\n' s += ref_wrap.fill(ref['title']) + '\n' s += ref_wrap.fill(ref['note']) + '\n' elif ref['type'] == 'article': s += ref_wrap.fill(', '.join(ref['authors'])) + '\n' s += ref_wrap.fill(ref['title']) + '\n' s += '{}, {}, {} ({})'.format(ref['journal'], ref['volume'], ref['page'], ref['year']) s += '\n' + ref['doi'] elif ref['type'] == 'incollection': s += ref_wrap.fill(', '.join(ref['authors'])) s += ref_wrap.fill('\n{}'.format(ref['title'])) s += ref_wrap.fill('\nin \'{}\''.format(ref['booktitle'])) if 'editors' in ref: s += ref_wrap.fill('\ned. ' + ', '.join(ref['editors'])) if 'series' in ref: s += '\n{}, {}, {} ({})'.format(ref['series'], ref['volume'], ref['page'], ref['year']) if 'doi' in ref: s += '\n' + ref['doi'] elif ref['type'] == 'techreport': s += ref_wrap.fill(', '.join(ref['authors'])) s += ref_wrap.fill('\n{}'.format(ref['title'])) s += '\n\'{}\''.format(ref['institution']) s += '\nTechnical Report {}'.format(ref['number']) s += '\n{}'.format(ref['year']) if 'doi' in ref: s += '\n' + ref['doi'] elif ref['type'] == 'misc': s += ref_wrap.fill(', '.join(ref['authors'])) + '\n' s += ref_wrap.fill(ref['title']) if 'note' in ref: s += '\n' + ref['note'] if 'doi' in ref: s += '\n' + ref['doi'] else: raise RuntimeError('Cannot handle reference type {}'.format(ref['type'])) return s
[ "def", "reference_text", "(", "ref", ")", ":", "ref_wrap", "=", "textwrap", ".", "TextWrapper", "(", "initial_indent", "=", "''", ",", "subsequent_indent", "=", "' '", "*", "8", ")", "s", "=", "''", "if", "ref", "[", "'type'", "]", "==", "'unpublished'",...
Convert a single reference to plain text format Parameters ---------- ref : dict Information about a single reference
[ "Convert", "a", "single", "reference", "to", "plain", "text", "format" ]
python
train
materialsproject/pymatgen
pymatgen/io/qchem/outputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/qchem/outputs.py#L404-L430
def _read_optimized_geometry(self): """ Parses optimized XYZ coordinates. If not present, parses optimized Z-matrix. """ header_pattern = r"\*+\s+OPTIMIZATION\s+CONVERGED\s+\*+\s+\*+\s+Coordinates \(Angstroms\)\s+ATOM\s+X\s+Y\s+Z" table_pattern = r"\s+\d+\s+\w+\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)" footer_pattern = r"\s+Z-matrix Print:" parsed_optimized_geometry = read_table_pattern( self.text, header_pattern, table_pattern, footer_pattern) if parsed_optimized_geometry == [] or None: self.data["optimized_geometry"] = None header_pattern = r"^\s+\*+\s+OPTIMIZATION CONVERGED\s+\*+\s+\*+\s+Z-matrix\s+Print:\s+\$molecule\s+[\d\-]+\s+[\d\-]+\n" table_pattern = r"\s*(\w+)(?:\s+(\d+)\s+([\d\-\.]+)(?:\s+(\d+)\s+([\d\-\.]+)(?:\s+(\d+)\s+([\d\-\.]+))*)*)*(?:\s+0)*" footer_pattern = r"^\$end\n" self.data["optimized_zmat"] = read_table_pattern( self.text, header_pattern, table_pattern, footer_pattern) else: self.data["optimized_geometry"] = process_parsed_coords( parsed_optimized_geometry[0]) if self.data.get('charge') != None: self.data["molecule_from_optimized_geometry"] = Molecule( species=self.data.get('species'), coords=self.data.get('optimized_geometry'), charge=self.data.get('charge'), spin_multiplicity=self.data.get('multiplicity'))
[ "def", "_read_optimized_geometry", "(", "self", ")", ":", "header_pattern", "=", "r\"\\*+\\s+OPTIMIZATION\\s+CONVERGED\\s+\\*+\\s+\\*+\\s+Coordinates \\(Angstroms\\)\\s+ATOM\\s+X\\s+Y\\s+Z\"", "table_pattern", "=", "r\"\\s+\\d+\\s+\\w+\\s+([\\d\\-\\.]+)\\s+([\\d\\-\\.]+)\\s+([\\d\\-\\.]+)\"", ...
Parses optimized XYZ coordinates. If not present, parses optimized Z-matrix.
[ "Parses", "optimized", "XYZ", "coordinates", ".", "If", "not", "present", "parses", "optimized", "Z", "-", "matrix", "." ]
python
train
datacats/datacats
datacats/docker.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/docker.py#L349-L367
def container_logs(name, tail, follow, timestamps): """ Wrapper for docker logs, attach commands. """ if follow: return _get_docker().attach( name, stdout=True, stderr=True, stream=True ) return _docker.logs( name, stdout=True, stderr=True, tail=tail, timestamps=timestamps, )
[ "def", "container_logs", "(", "name", ",", "tail", ",", "follow", ",", "timestamps", ")", ":", "if", "follow", ":", "return", "_get_docker", "(", ")", ".", "attach", "(", "name", ",", "stdout", "=", "True", ",", "stderr", "=", "True", ",", "stream", ...
Wrapper for docker logs, attach commands.
[ "Wrapper", "for", "docker", "logs", "attach", "commands", "." ]
python
train
flo-compbio/genometools
genometools/expression/visualize/heatmap.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/visualize/heatmap.py#L117-L413
def get_figure( self, emin=None, emax=None, width=800, height=400, margin_left=100, margin_bottom=60, margin_top=30, margin_right=0, colorbar_size=0.4, xaxis_label=None, yaxis_label=None, xaxis_nticks=None, yaxis_nticks=None, xtick_angle=30, font='"Droid Serif", "Open Serif", serif', font_size=12, title_font_size=None, show_sample_labels=True, **kwargs): """Generate a plotly figure of the heatmap. Parameters ---------- emin : int, float, or None, optional The expression value corresponding to the lower end of the colorscale. If None, determine, automatically. [None] emax : int, float, or None, optional The expression value corresponding to the upper end of the colorscale. If None, determine automatically. [None] margin_left : int, optional The size of the left margin (in px). [100] margin_right : int, optional The size of the right margin (in px). [0] margin_top : int, optional The size of the top margin (in px). [30] margin_bottom : int, optional The size of the bottom margin (in px). [60] colorbar_size : int or float, optional The sze of the colorbar, relative to the figure size. [0.4] xaxis_label : str or None, optional X-axis label. If None, use `ExpMatrix` default. [None] yaxis_label : str or None, optional y-axis label. If None, use `ExpMatrix` default. [None] xtick_angle : int or float, optional X-axis tick angle (in degrees). [30] font : str, optional Name of font to use. Can be multiple, separated by comma, to specify a prioritized list. [' "Droid Serif", "Open Serif", "serif"'] font_size : int or float, optional Font size to use throughout the figure, in points. [12] title_font_size : int or float or None, optional Font size to use for labels on axes and the colorbar. If None, use `font_size` value. [None] show_sample_labels : bool, optional Whether to show the sample labels. [True] Returns ------- `plotly.graph_objs.Figure` The plotly figure. """ # emin and/or emax are unspecified, set to data min/max values if emax is None: emax = self.matrix.X.max() if emin is None: emin = self.matrix.X.min() title = self.title if title_font_size is None: title_font_size = font_size colorbar_label = self.colorbar_label or 'Expression' colorbar = go.ColorBar( lenmode='fraction', len=colorbar_size, title=colorbar_label, titlefont=dict( size=title_font_size, ), titleside='right', xpad=0, ypad=0, outlinewidth=0, # no border thickness=20, # in pixels # outlinecolor = '#000000', ) def fix_plotly_label_bug(labels): """ This fixes a bug whereby plotly treats labels that look like numbers (integers or floats) as numeric instead of categorical, even when they are passed as strings. The fix consists of appending an underscore to any label that looks like a number. """ assert isinstance(labels, Iterable) fixed_labels = [] for l in labels: try: float(l) except (ValueError, TypeError): fixed_labels.append(str(l)) else: fixed_labels.append(str(l) + '_') return fixed_labels x = fix_plotly_label_bug(self.matrix.samples) gene_labels = self.matrix.genes.tolist() if self.gene_aliases: for i, gene in enumerate(gene_labels): try: alias = self.gene_aliases[gene] except KeyError: pass else: gene_labels[i] = '%s/%s' % (gene, alias) gene_labels = fix_plotly_label_bug(gene_labels) data = [ go.Heatmap( z=self.matrix.X, x=x, y=gene_labels, zmin=emin, zmax=emax, colorscale=self.colorscale, colorbar=colorbar, hoverinfo='x+y+z', **kwargs ), ] xticks = 'outside' if not show_sample_labels: xticks = '' if xaxis_label is None: if self.matrix.samples.name is not None: xaxis_label = self.matrix.samples.name else: xaxis_label = 'Samples' xaxis_label = xaxis_label + ' (n = %d)' % self.matrix.n if yaxis_label is None: if self.matrix.genes.name is not None: yaxis_label = self.matrix.genes.name else: yaxis_label = 'Genes' yaxis_label = yaxis_label + ' (p = %d)' % self.matrix.p layout = go.Layout( width=width, height=height, title=title, titlefont=go.Font( size=title_font_size ), font=go.Font( size=font_size, family=font ), xaxis=go.XAxis( title=xaxis_label, titlefont=dict(size=title_font_size), showticklabels=show_sample_labels, ticks=xticks, nticks=xaxis_nticks, tickangle=xtick_angle, showline=True ), yaxis=go.YAxis( title=yaxis_label, titlefont=dict(size=title_font_size), nticks=yaxis_nticks, autorange='reversed', showline=True ), margin=go.Margin( l=margin_left, t=margin_top, b=margin_bottom, r=margin_right, pad=0 ), ) # add annotations # we need separate, but overlaying, axes to place the annotations layout['xaxis2'] = go.XAxis( overlaying = 'x', showline = False, tickfont = dict(size=0), autorange=False, range=[-0.5, self.matrix.n-0.5], ticks='', showticklabels=False ) layout['yaxis2'] = go.YAxis( overlaying='y', showline=False, tickfont=dict(size=0), autorange=False, range=[self.matrix.p-0.5, -0.5], ticks='', showticklabels=False ) # gene (row) annotations for ann in self.gene_annotations: i = self.matrix.genes.get_loc(ann.gene) xmn = -0.5 xmx = self.matrix.n-0.5 ymn = i-0.5 ymx = i+0.5 #logger.debug('Transparency is %.1f', ann.transparency) data.append( go.Scatter( x=[xmn, xmx, xmx, xmn, xmn], y=[ymn, ymn, ymx, ymx, ymn], mode='lines', hoverinfo='none', showlegend=False, line=dict(color=ann.color), xaxis='x2', yaxis='y2', #opacity=0.5, opacity=1-ann.transparency, ) ) if ann.label is not None: layout.annotations.append( go.Annotation( text=ann.label, x=0.01, y=i-0.5, #y=i+0.5, xref='paper', yref='y2', xanchor='left', yanchor='bottom', showarrow=False, bgcolor='white', #opacity=1-ann.transparency, opacity=0.8, borderpad=0, #textangle=30, font=dict(color=ann.color) ) ) # sample (column) annotations for ann in self.sample_annotations: j = self.matrix.samples.get_loc(ann.sample) xmn = j-0.5 xmx = j+0.5 ymn = -0.5 ymx = self.matrix.p-0.5 data.append( go.Scatter( x=[xmn, xmx, xmx, xmn, xmn], y=[ymn, ymn, ymx, ymx, ymn], mode='lines', hoverinfo='none', showlegend=False, line=dict(color=ann.color), xaxis='x2', yaxis='y2', opacity=1.0) ) if ann.label is not None: layout.annotations.append( go.Annotation( text=ann.label, y=0.99, x=j+0.5, #y=i+0.5, xref='x2', yref='paper', xanchor='left', yanchor='top', showarrow=False, bgcolor='white', opacity=1-ann.transparency, borderpad=0, textangle=90, font=dict(color=ann.color) ) ) fig = go.Figure( data=data, layout=layout ) return fig
[ "def", "get_figure", "(", "self", ",", "emin", "=", "None", ",", "emax", "=", "None", ",", "width", "=", "800", ",", "height", "=", "400", ",", "margin_left", "=", "100", ",", "margin_bottom", "=", "60", ",", "margin_top", "=", "30", ",", "margin_rig...
Generate a plotly figure of the heatmap. Parameters ---------- emin : int, float, or None, optional The expression value corresponding to the lower end of the colorscale. If None, determine, automatically. [None] emax : int, float, or None, optional The expression value corresponding to the upper end of the colorscale. If None, determine automatically. [None] margin_left : int, optional The size of the left margin (in px). [100] margin_right : int, optional The size of the right margin (in px). [0] margin_top : int, optional The size of the top margin (in px). [30] margin_bottom : int, optional The size of the bottom margin (in px). [60] colorbar_size : int or float, optional The sze of the colorbar, relative to the figure size. [0.4] xaxis_label : str or None, optional X-axis label. If None, use `ExpMatrix` default. [None] yaxis_label : str or None, optional y-axis label. If None, use `ExpMatrix` default. [None] xtick_angle : int or float, optional X-axis tick angle (in degrees). [30] font : str, optional Name of font to use. Can be multiple, separated by comma, to specify a prioritized list. [' "Droid Serif", "Open Serif", "serif"'] font_size : int or float, optional Font size to use throughout the figure, in points. [12] title_font_size : int or float or None, optional Font size to use for labels on axes and the colorbar. If None, use `font_size` value. [None] show_sample_labels : bool, optional Whether to show the sample labels. [True] Returns ------- `plotly.graph_objs.Figure` The plotly figure.
[ "Generate", "a", "plotly", "figure", "of", "the", "heatmap", ".", "Parameters", "----------", "emin", ":", "int", "float", "or", "None", "optional", "The", "expression", "value", "corresponding", "to", "the", "lower", "end", "of", "the", "colorscale", ".", "...
python
train
QuantEcon/QuantEcon.py
quantecon/gridtools.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/gridtools.py#L229-L263
def simplex_index(x, m, n): r""" Return the index of the point x in the lexicographic order of the integer points of the (m-1)-dimensional simplex :math:`\{x \mid x_0 + \cdots + x_{m-1} = n\}`. Parameters ---------- x : array_like(int, ndim=1) Integer point in the simplex, i.e., an array of m nonnegative itegers that sum to n. m : scalar(int) Dimension of each point. Must be a positive integer. n : scalar(int) Number which the coordinates of each point sum to. Must be a nonnegative integer. Returns ------- idx : scalar(int) Index of x. """ if m == 1: return 0 decumsum = np.cumsum(x[-1:0:-1])[::-1] idx = num_compositions(m, n) - 1 for i in range(m-1): if decumsum[i] == 0: break idx -= num_compositions(m-i, decumsum[i]-1) return idx
[ "def", "simplex_index", "(", "x", ",", "m", ",", "n", ")", ":", "if", "m", "==", "1", ":", "return", "0", "decumsum", "=", "np", ".", "cumsum", "(", "x", "[", "-", "1", ":", "0", ":", "-", "1", "]", ")", "[", ":", ":", "-", "1", "]", "i...
r""" Return the index of the point x in the lexicographic order of the integer points of the (m-1)-dimensional simplex :math:`\{x \mid x_0 + \cdots + x_{m-1} = n\}`. Parameters ---------- x : array_like(int, ndim=1) Integer point in the simplex, i.e., an array of m nonnegative itegers that sum to n. m : scalar(int) Dimension of each point. Must be a positive integer. n : scalar(int) Number which the coordinates of each point sum to. Must be a nonnegative integer. Returns ------- idx : scalar(int) Index of x.
[ "r", "Return", "the", "index", "of", "the", "point", "x", "in", "the", "lexicographic", "order", "of", "the", "integer", "points", "of", "the", "(", "m", "-", "1", ")", "-", "dimensional", "simplex", ":", "math", ":", "\\", "{", "x", "\\", "mid", "...
python
train
jahuth/litus
spikes.py
https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L902-L970
def label_by_time(self,time_signals,label_names=[],time_units='ms',time_dimension=0,copy=True,backup_original_spike_times_to=None,**kwargs): """ creates a labeled spike data structure `time_signals` is list of lists (or matrix), containing a timestamp in the first column (or first element of each element) and indizes that are to be applied to the data in the remaining columns / elements. This function will not add or remove spikes, but only shift spikes according to the preceding time signals. If you want to get spikes relative to a time signal with fixed limits, use `label_peri_signals`, which will leave out and duplicate spikes, but can manage overlapping time signals. To get the absolute spike times back, `.absolute_spike_times_from_labels` can be used on the resulting SpikeContainer. However, the order and length of the timing signals might not be correct, if eg. the intervals between time signals vary in length. If `backup_original_spike_times_to` is set to a string, the original spike times will be saved as this dimension as the new (relative) spike times replace the old time dimension. """ if self.data_format == 'empty': return SpikeContainer(None,units=self.units,copy_from=self) spike_times = self.spike_times.get_converted(time_dimension, units=time_units)[1].copy() # this is read only time_signals = np.array(time_signals) re_zeroed_spike_times = spike_times.copy() # this will be modified indizes = np.zeros((len(spike_times),time_signals.shape[0] -1 )) maximal_time_gap = np.max(np.diff(time_signals[0])) for t in range(len(time_signals[0])): if t + 1 < len(time_signals[0]): # we are past the last time signal spike_range = (spike_times > time_signals[0][t]) * (spike_times <= time_signals[0][t+1]) indizes[spike_range,:] = [time_signals[_i][t] for _i in range(1,time_signals.shape[0])] re_zeroed_spike_times[spike_range] = ( spike_times[spike_range] - time_signals[0][t] ) else: # we move all spikes in the future back by this time signal # (this will overwrite the spike times multiple times) indizes[spike_times > time_signals[0][t],:] = [time_signals[_i][t] for _i in range(1,time_signals.shape[0])] re_zeroed_spike_times[spike_times > time_signals[0][t]] = ( spike_times[spike_times > time_signals[0][t]] - time_signals[0][t] ) new_spike_times = LabeledMatrix(self.spike_times.matrix,self.spike_times.labels) new_spike_times.add_label_dimension(label_names,indizes) new_spike_times.labels[0].units = time_units new_spike_times.matrix[:,0] = re_zeroed_spike_times new_spike_times.labels[0].min = 0 new_spike_times.labels[0].max = maximal_time_gap if kwargs.get('recalculate_time_extent',False): new_spike_times.labels[0].min = np.min(re_zeroed_spike_times) new_spike_times.labels[0].max = np.max(re_zeroed_spike_times) if backup_original_spike_times_to is not None: new_spike_times.add_label_dimension(backup_original_spike_times_to,self[time_dimension]) if copy: s = SpikeContainer(new_spike_times, copy_from=self) #if backup_original_spike_times_to is not None: # # copying spikes # time_label = self.get_label(time_dimension) # s[backup_original_spike_times_to] = {'data':self[time_dimension],'label': time_label} return s else: #if backup_original_spike_times_to is not None: # # copying spikes # time_label = self.get_label(time_dimension) # self[backup_original_spike_times_to] = {'data':self[time_dimension],'label': time_label} self.set_spike_times(new_spike_times) return self
[ "def", "label_by_time", "(", "self", ",", "time_signals", ",", "label_names", "=", "[", "]", ",", "time_units", "=", "'ms'", ",", "time_dimension", "=", "0", ",", "copy", "=", "True", ",", "backup_original_spike_times_to", "=", "None", ",", "*", "*", "kwar...
creates a labeled spike data structure `time_signals` is list of lists (or matrix), containing a timestamp in the first column (or first element of each element) and indizes that are to be applied to the data in the remaining columns / elements. This function will not add or remove spikes, but only shift spikes according to the preceding time signals. If you want to get spikes relative to a time signal with fixed limits, use `label_peri_signals`, which will leave out and duplicate spikes, but can manage overlapping time signals. To get the absolute spike times back, `.absolute_spike_times_from_labels` can be used on the resulting SpikeContainer. However, the order and length of the timing signals might not be correct, if eg. the intervals between time signals vary in length. If `backup_original_spike_times_to` is set to a string, the original spike times will be saved as this dimension as the new (relative) spike times replace the old time dimension.
[ "creates", "a", "labeled", "spike", "data", "structure" ]
python
train
ktdreyer/txkoji
txkoji/connection.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L286-L304
def getTaskDescendents(self, task_id, **kwargs): """ Load all information about a task's descendents into Task classes. Calls "getTaskDescendents" XML-RPC (with request=True to get the full information.) :param task_id: ``int``, for example 12345, parent task ID :returns: deferred that when fired returns a list of Task (Munch, dict-like) objects representing Koji tasks. """ kwargs['request'] = True data = yield self.call('getTaskDescendents', task_id, **kwargs) tasks = [] for tdata in data[str(task_id)]: task = Task.fromDict(tdata) task.connection = self tasks.append(task) defer.returnValue(tasks)
[ "def", "getTaskDescendents", "(", "self", ",", "task_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'request'", "]", "=", "True", "data", "=", "yield", "self", ".", "call", "(", "'getTaskDescendents'", ",", "task_id", ",", "*", "*", "kwargs", ...
Load all information about a task's descendents into Task classes. Calls "getTaskDescendents" XML-RPC (with request=True to get the full information.) :param task_id: ``int``, for example 12345, parent task ID :returns: deferred that when fired returns a list of Task (Munch, dict-like) objects representing Koji tasks.
[ "Load", "all", "information", "about", "a", "task", "s", "descendents", "into", "Task", "classes", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L1098-L1128
def gen_tmp_file(i): """ Input: { (suffix) - temp file suffix (prefix) - temp file prefix (remove_dir) - if 'yes', remove dir } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 file_name - temp file name } """ xs=i.get('suffix','') xp=i.get('prefix','') s=i.get('string','') import tempfile fd, fn=tempfile.mkstemp(suffix=xs, prefix=xp) os.close(fd) os.remove(fn) if i.get('remove_dir','')=='yes': fn=os.path.basename(fn) return {'return':0, 'file_name':fn}
[ "def", "gen_tmp_file", "(", "i", ")", ":", "xs", "=", "i", ".", "get", "(", "'suffix'", ",", "''", ")", "xp", "=", "i", ".", "get", "(", "'prefix'", ",", "''", ")", "s", "=", "i", ".", "get", "(", "'string'", ",", "''", ")", "import", "tempfi...
Input: { (suffix) - temp file suffix (prefix) - temp file prefix (remove_dir) - if 'yes', remove dir } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 file_name - temp file name }
[ "Input", ":", "{", "(", "suffix", ")", "-", "temp", "file", "suffix", "(", "prefix", ")", "-", "temp", "file", "prefix", "(", "remove_dir", ")", "-", "if", "yes", "remove", "dir", "}" ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L2436-L2503
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False): """Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature. """ def add(root): """Recursively add thresholds.""" if 'split_index' in root: # non-leaf if feature_names is not None and isinstance(feature, string_type): split_feature = feature_names[root['split_feature']] else: split_feature = root['split_feature'] if split_feature == feature: if isinstance(root['threshold'], string_type): raise LightGBMError('Cannot compute split value histogram for the categorical feature') else: values.append(root['threshold']) add(root['left_child']) add(root['right_child']) model = self.dump_model() feature_names = model.get('feature_names') tree_infos = model['tree_info'] values = [] for tree_info in tree_infos: add(tree_info['tree_structure']) if bins is None or isinstance(bins, integer_types) and xgboost_style: n_unique = len(np.unique(values)) bins = max(min(n_unique, bins) if bins is not None else n_unique, 1) hist, bin_edges = np.histogram(values, bins=bins) if xgboost_style: ret = np.column_stack((bin_edges[1:], hist)) ret = ret[ret[:, 1] > 0] if PANDAS_INSTALLED: return DataFrame(ret, columns=['SplitValue', 'Count']) else: return ret else: return hist, bin_edges
[ "def", "get_split_value_histogram", "(", "self", ",", "feature", ",", "bins", "=", "None", ",", "xgboost_style", "=", "False", ")", ":", "def", "add", "(", "root", ")", ":", "\"\"\"Recursively add thresholds.\"\"\"", "if", "'split_index'", "in", "root", ":", "...
Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
[ "Get", "split", "value", "histogram", "for", "the", "specified", "feature", "." ]
python
train
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L834-L840
def isComment(self, line, column): """Check if text at given position is a comment. Including block comments and here documents. If language is not known, or text is not parsed yet, ``False`` is returned """ return self._highlighter is not None and \ self._highlighter.isComment(self.document().findBlockByNumber(line), column)
[ "def", "isComment", "(", "self", ",", "line", ",", "column", ")", ":", "return", "self", ".", "_highlighter", "is", "not", "None", "and", "self", ".", "_highlighter", ".", "isComment", "(", "self", ".", "document", "(", ")", ".", "findBlockByNumber", "("...
Check if text at given position is a comment. Including block comments and here documents. If language is not known, or text is not parsed yet, ``False`` is returned
[ "Check", "if", "text", "at", "given", "position", "is", "a", "comment", ".", "Including", "block", "comments", "and", "here", "documents", "." ]
python
train
prompt-toolkit/ptpython
ptpython/python_input.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L555-L582
def _create_application(self): """ Create an `Application` instance. """ return Application( input=self.input, output=self.output, layout=self.ptpython_layout.layout, key_bindings=merge_key_bindings([ load_python_bindings(self), load_auto_suggest_bindings(), load_sidebar_bindings(self), load_confirm_exit_bindings(self), ConditionalKeyBindings( load_open_in_editor_bindings(), Condition(lambda: self.enable_open_in_editor)), # Extra key bindings should not be active when the sidebar is visible. ConditionalKeyBindings( self.extra_key_bindings, Condition(lambda: not self.show_sidebar)) ]), color_depth=lambda: self.color_depth, paste_mode=Condition(lambda: self.paste_mode), mouse_support=Condition(lambda: self.enable_mouse_support), style=DynamicStyle(lambda: self._current_style), style_transformation=self.style_transformation, include_default_pygments_style=False, reverse_vi_search_direction=True)
[ "def", "_create_application", "(", "self", ")", ":", "return", "Application", "(", "input", "=", "self", ".", "input", ",", "output", "=", "self", ".", "output", ",", "layout", "=", "self", ".", "ptpython_layout", ".", "layout", ",", "key_bindings", "=", ...
Create an `Application` instance.
[ "Create", "an", "Application", "instance", "." ]
python
train
vtkiorg/vtki
vtki/grid.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/grid.py#L369-L392
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)): """ Create VTK image data directly from numpy arrays. A uniform grid is defined by the node spacings for each axis (uniform along each individual axis) and the number of nodes on each axis. These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``). Parameters ---------- dims : tuple(int) Length 3 tuple of ints specifying how many nodes along each axis spacing : tuple(float) Length 3 tuple of floats/ints specifying the node spacings for each axis origin : tuple(float) Length 3 tuple of floats/ints specifying minimum value for each axis """ xn, yn, zn = dims[0], dims[1], dims[2] xs, ys, zs = spacing[0], spacing[1], spacing[2] xo, yo, zo = origin[0], origin[1], origin[2] self.SetDimensions(xn, yn, zn) self.SetOrigin(xo, yo, zo) self.SetSpacing(xs, ys, zs)
[ "def", "_from_specs", "(", "self", ",", "dims", ",", "spacing", "=", "(", "1.0", ",", "1.0", ",", "1.0", ")", ",", "origin", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", ")", ":", "xn", ",", "yn", ",", "zn", "=", "dims", "[", "0", "]", ","...
Create VTK image data directly from numpy arrays. A uniform grid is defined by the node spacings for each axis (uniform along each individual axis) and the number of nodes on each axis. These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``). Parameters ---------- dims : tuple(int) Length 3 tuple of ints specifying how many nodes along each axis spacing : tuple(float) Length 3 tuple of floats/ints specifying the node spacings for each axis origin : tuple(float) Length 3 tuple of floats/ints specifying minimum value for each axis
[ "Create", "VTK", "image", "data", "directly", "from", "numpy", "arrays", ".", "A", "uniform", "grid", "is", "defined", "by", "the", "node", "spacings", "for", "each", "axis", "(", "uniform", "along", "each", "individual", "axis", ")", "and", "the", "number...
python
train
samastur/Impostor
impostor/backend.py
https://github.com/samastur/Impostor/blob/1a9b1cf1568d5d657b069af5fdf882f2d9bfefce/impostor/backend.py#L15-L30
def find_request(): ''' Inspect running environment for request object. There should be one, but don't rely on it. ''' frame = inspect.currentframe() request = None f = frame while not request and f: if 'request' in f.f_locals and isinstance(f.f_locals['request'], HttpRequest): request = f.f_locals['request'] f = f.f_back del frame return request
[ "def", "find_request", "(", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", "request", "=", "None", "f", "=", "frame", "while", "not", "request", "and", "f", ":", "if", "'request'", "in", "f", ".", "f_locals", "and", "isinstance", "("...
Inspect running environment for request object. There should be one, but don't rely on it.
[ "Inspect", "running", "environment", "for", "request", "object", ".", "There", "should", "be", "one", "but", "don", "t", "rely", "on", "it", "." ]
python
train
FNNDSC/med2image
med2image/error.py
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/error.py#L31-L73
def report( callingClass, astr_key, ab_exitToOs=1, astr_header="" ): ''' Error handling. Based on the <astr_key>, error information is extracted from _dictErr and sent to log object. If <ab_exitToOs> is False, error is considered non-fatal and processing can continue, otherwise processing terminates. ''' log = callingClass.log() b_syslog = log.syslog() log.syslog(False) if ab_exitToOs: log( Colors.RED + "\n:: FATAL ERROR :: " + Colors.NO_COLOUR ) else: log( Colors.YELLOW + "\n:: WARNING :: " + Colors.NO_COLOUR ) if len(astr_header): log( Colors.BROWN + astr_header + Colors.NO_COLOUR ) log( "\n" ) log( "\tSorry, some error seems to have occurred in:\n\t<" ) log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::") log( Colors.LIGHT_CYAN + ("%s" % inspect.stack()[2][4][0].strip()) + Colors.NO_COLOUR) log( "> called by <") try: caller = inspect.stack()[3][4][0].strip() except: caller = '__main__' log( Colors.LIGHT_GREEN + ("%s" % callingClass.name()) + Colors.NO_COLOUR + "::") log( Colors.LIGHT_CYAN + ("%s" % caller) + Colors.NO_COLOUR) log( ">\n") log( "\tWhile %s\n" % callingClass._dictErr[astr_key]['action'] ) log( "\t%s\n" % callingClass._dictErr[astr_key]['error'] ) log( "\n" ) if ab_exitToOs: log( "Returning to system with error code %d\n" % \ callingClass._dictErr[astr_key]['exitCode'] ) sys.exit( callingClass._dictErr[astr_key]['exitCode'] ) log.syslog(b_syslog) return callingClass._dictErr[astr_key]['exitCode']
[ "def", "report", "(", "callingClass", ",", "astr_key", ",", "ab_exitToOs", "=", "1", ",", "astr_header", "=", "\"\"", ")", ":", "log", "=", "callingClass", ".", "log", "(", ")", "b_syslog", "=", "log", ".", "syslog", "(", ")", "log", ".", "syslog", "...
Error handling. Based on the <astr_key>, error information is extracted from _dictErr and sent to log object. If <ab_exitToOs> is False, error is considered non-fatal and processing can continue, otherwise processing terminates.
[ "Error", "handling", "." ]
python
train
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L258-L328
def separate_particles_into_groups(s, region_size=40, bounds=None, doshift=False): """ Separates particles into convenient groups for optimization. Given a state, returns a list of groups of particles. Each group of particles are located near each other in the image. Every particle located in the desired region is contained in exactly 1 group. Parameters ---------- s : :class:`peri.states.ImageState` The peri state to find particles in. region_size : Int or 3-element list-like of ints, optional The size of the box. Groups particles into boxes of shape (region_size[0], region_size[1], region_size[2]). If region_size is a scalar, the box is a cube of length region_size. Default is 40. bounds : 2-element list-like of 3-element lists, optional The sub-region of the image over which to look for particles. bounds[0]: The lower-left corner of the image region. bounds[1]: The upper-right corner of the image region. Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire image size, i.e. the default places every particle in the image somewhere in the groups. doshift : {True, False, `'rand'`}, optional Whether or not to shift the tile boxes by half a region size, to prevent the same particles to be chosen every time. If `'rand'`, randomly chooses either True or False. Default is False Returns ------- particle_groups : List Each element of particle_groups is an int numpy.ndarray of the group of nearby particles. Only contains groups with a nonzero number of particles, so the elements don't necessarily correspond to a given image region. """ imtile = s.oshape.translate(-s.pad) bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1])) rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if np.size(region_size) == 1 else np.array(region_size)) n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int') particle_groups = [] tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs) if doshift == 'rand': doshift = np.random.choice([True, False]) if doshift: shift = rs // 2 n_translate += 1 else: shift = 0 deltas = np.meshgrid(*[np.arange(i) for i in n_translate]) positions = s.obj_get_positions() if bounds is None: # FIXME this (deliberately) masks a problem where optimization # places particles outside the image. However, it ensures that # all particles are in at least one group when `bounds is None`, # which is the use case within opt. The 1e-3 is to ensure that # they are inside the box and not on the edge. positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3) groups = list(map(lambda *args: find_particles_in_tile(positions, tile.translate( np.array(args) * rs - shift)), *[d.ravel() for d in deltas])) for i in range(len(groups)-1, -1, -1): if groups[i].size == 0: groups.pop(i) assert _check_groups(s, groups) return groups
[ "def", "separate_particles_into_groups", "(", "s", ",", "region_size", "=", "40", ",", "bounds", "=", "None", ",", "doshift", "=", "False", ")", ":", "imtile", "=", "s", ".", "oshape", ".", "translate", "(", "-", "s", ".", "pad", ")", "bounding_tile", ...
Separates particles into convenient groups for optimization. Given a state, returns a list of groups of particles. Each group of particles are located near each other in the image. Every particle located in the desired region is contained in exactly 1 group. Parameters ---------- s : :class:`peri.states.ImageState` The peri state to find particles in. region_size : Int or 3-element list-like of ints, optional The size of the box. Groups particles into boxes of shape (region_size[0], region_size[1], region_size[2]). If region_size is a scalar, the box is a cube of length region_size. Default is 40. bounds : 2-element list-like of 3-element lists, optional The sub-region of the image over which to look for particles. bounds[0]: The lower-left corner of the image region. bounds[1]: The upper-right corner of the image region. Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire image size, i.e. the default places every particle in the image somewhere in the groups. doshift : {True, False, `'rand'`}, optional Whether or not to shift the tile boxes by half a region size, to prevent the same particles to be chosen every time. If `'rand'`, randomly chooses either True or False. Default is False Returns ------- particle_groups : List Each element of particle_groups is an int numpy.ndarray of the group of nearby particles. Only contains groups with a nonzero number of particles, so the elements don't necessarily correspond to a given image region.
[ "Separates", "particles", "into", "convenient", "groups", "for", "optimization", "." ]
python
valid
tBuLi/symfit
symfit/core/support.py
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L190-L200
def variables(names, **kwargs): """ Convenience function for the creation of multiple variables. For more control, consider using ``symbols(names, cls=Variable, **kwargs)`` directly. :param names: string of variable names. Example: x, y = variables('x, y') :param kwargs: kwargs to be passed onto :func:`sympy.core.symbol.symbols` :return: iterable of :class:`symfit.core.argument.Variable` objects """ return symbols(names, cls=Variable, seq=True, **kwargs)
[ "def", "variables", "(", "names", ",", "*", "*", "kwargs", ")", ":", "return", "symbols", "(", "names", ",", "cls", "=", "Variable", ",", "seq", "=", "True", ",", "*", "*", "kwargs", ")" ]
Convenience function for the creation of multiple variables. For more control, consider using ``symbols(names, cls=Variable, **kwargs)`` directly. :param names: string of variable names. Example: x, y = variables('x, y') :param kwargs: kwargs to be passed onto :func:`sympy.core.symbol.symbols` :return: iterable of :class:`symfit.core.argument.Variable` objects
[ "Convenience", "function", "for", "the", "creation", "of", "multiple", "variables", ".", "For", "more", "control", "consider", "using", "symbols", "(", "names", "cls", "=", "Variable", "**", "kwargs", ")", "directly", "." ]
python
train
aio-libs/aiohttp
aiohttp/multipart.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/multipart.py#L293-L312
async def read_chunk(self, size: int=chunk_size) -> bytes: """Reads body part content chunk of the specified size. size: chunk size """ if self._at_eof: return b'' if self._length: chunk = await self._read_chunk_from_length(size) else: chunk = await self._read_chunk_from_stream(size) self._read_bytes += len(chunk) if self._read_bytes == self._length: self._at_eof = True if self._at_eof: newline = await self._content.readline() assert newline == self._newline, \ 'reader did not read all the data or it is malformed' return chunk
[ "async", "def", "read_chunk", "(", "self", ",", "size", ":", "int", "=", "chunk_size", ")", "->", "bytes", ":", "if", "self", ".", "_at_eof", ":", "return", "b''", "if", "self", ".", "_length", ":", "chunk", "=", "await", "self", ".", "_read_chunk_from...
Reads body part content chunk of the specified size. size: chunk size
[ "Reads", "body", "part", "content", "chunk", "of", "the", "specified", "size", "." ]
python
train
UCL-INGI/INGInious
inginious/agent/docker_agent/_docker_interface.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/docker_agent/_docker_interface.py#L213-L222
def close_socket(self): """ Correctly closes the socket :return: """ try: self.docker_py_sock._sock.close() # pylint: disable=protected-access except AttributeError: pass self.docker_py_sock.close()
[ "def", "close_socket", "(", "self", ")", ":", "try", ":", "self", ".", "docker_py_sock", ".", "_sock", ".", "close", "(", ")", "# pylint: disable=protected-access", "except", "AttributeError", ":", "pass", "self", ".", "docker_py_sock", ".", "close", "(", ")" ...
Correctly closes the socket :return:
[ "Correctly", "closes", "the", "socket", ":", "return", ":" ]
python
train
bretth/woven
woven/environment.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/environment.py#L76-L150
def _parse_project_version(version=''): """ Returns the significant part of the version excluding the build The final forms returned can be major.minor major.minor stage (spaces will be replaced with '-') major.minor.stage major.minor-stage major.minorstage (eg 1.0rc1) major.minor.maintenance major.minor.maintenance-stage major.minor.maintenancestage Anything beyond the maintenance or stage whichever is last is ignored """ def mm_version(vers): stage = '' stage_sep = '' finalvers = '' if not vers.isdigit(): for num,char in enumerate(vers): if char.isdigit(): finalvers += str(char) elif char.isalpha(): stage = vers[num:] break elif char in [' ','-']: #sep #We will strip spaces to avoid needing to 'quote' paths stage_sep = '-' stage = vers[num+1:] break else: finalvers = vers #remove any final build numbers if ' ' in stage: stage = stage.split(' ')[0] elif '-' in stage: stage = stage.split('-')[0] return (finalvers,stage,stage_sep) v = version.split('.') if len(v)==1: return v[0] major = v[0] minor = v[1] maint = '' stage = '' if len(v)>2 and v[2]<>'0': #(1.0.0 == 1.0) maint = v[2] if len(v)>3 and v[3][0].isalpha(): stage = v[3] project_version = '.'.join([major,minor,maint,stage]) else: #Detect stage in minor minor,stage_minor,stage_minor_sep = mm_version(minor) if maint: #may be maint = '' maint, stage_maint, stage_maint_sep = mm_version(maint) else: stage_maint = ''; stage_maint_sep = '' if stage_minor: stage = stage_minor stage_sep = stage_minor_sep elif stage_maint: stage = stage_maint stage_sep = stage_maint_sep finalvers = [major,minor] if maint: finalvers.append(maint) finalvers = '.'.join(finalvers) if stage: finalvers = stage_sep.join([finalvers,stage]) project_version = finalvers return project_version
[ "def", "_parse_project_version", "(", "version", "=", "''", ")", ":", "def", "mm_version", "(", "vers", ")", ":", "stage", "=", "''", "stage_sep", "=", "''", "finalvers", "=", "''", "if", "not", "vers", ".", "isdigit", "(", ")", ":", "for", "num", ",...
Returns the significant part of the version excluding the build The final forms returned can be major.minor major.minor stage (spaces will be replaced with '-') major.minor.stage major.minor-stage major.minorstage (eg 1.0rc1) major.minor.maintenance major.minor.maintenance-stage major.minor.maintenancestage Anything beyond the maintenance or stage whichever is last is ignored
[ "Returns", "the", "significant", "part", "of", "the", "version", "excluding", "the", "build", "The", "final", "forms", "returned", "can", "be", "major", ".", "minor", "major", ".", "minor", "stage", "(", "spaces", "will", "be", "replaced", "with", "-", ")"...
python
train
blazelibs/blazeutils
blazeutils/spreadsheets.py
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/spreadsheets.py#L63-L72
def workbook_to_reader(xlwt_wb): """ convert xlwt Workbook instance to an xlrd instance for reading """ _xlrd_required() fh = BytesIO() xlwt_wb.save(fh) # prep for reading fh.seek(0) return xlrd.open_workbook(file_contents=fh.read())
[ "def", "workbook_to_reader", "(", "xlwt_wb", ")", ":", "_xlrd_required", "(", ")", "fh", "=", "BytesIO", "(", ")", "xlwt_wb", ".", "save", "(", "fh", ")", "# prep for reading", "fh", ".", "seek", "(", "0", ")", "return", "xlrd", ".", "open_workbook", "("...
convert xlwt Workbook instance to an xlrd instance for reading
[ "convert", "xlwt", "Workbook", "instance", "to", "an", "xlrd", "instance", "for", "reading" ]
python
train
sony/nnabla
python/src/nnabla/parametric_functions.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1732-L1755
def embed(inp, n_inputs, n_features, initializer=None, fix_parameters=False, apply_w=None): """ Embed. Embed slices a matrix/tensor with indexing array/tensor. Weights are initialized with :obj:`nnabla.initializer.UniformInitializer` within the range of :math:`-\\sqrt{3}` and :math:`\\sqrt{3}`. Args: x(~nnabla.Variable): [Integer] Indices with shape :math:`(I_0, ..., I_N)` n_inputs : number of possible inputs, words or vocabraries n_features : number of embedding features fix_parameters (bool): When set to `True`, the embedding weight matrix will not be updated. apply_w (function): Lambda, function, or callable object applied to the weights. Returns: ~nnabla.Variable: Output with shape :math:`(I_0, ..., I_N, W_1, ..., W_M)` """ if not initializer: initializer = UniformInitializer((-np.sqrt(3.), np.sqrt(3))) w = get_parameter_or_create("W", [n_inputs, n_features], initializer, True, not fix_parameters) if apply_w is not None: w = apply_w(w) return F.embed(inp, w)
[ "def", "embed", "(", "inp", ",", "n_inputs", ",", "n_features", ",", "initializer", "=", "None", ",", "fix_parameters", "=", "False", ",", "apply_w", "=", "None", ")", ":", "if", "not", "initializer", ":", "initializer", "=", "UniformInitializer", "(", "("...
Embed. Embed slices a matrix/tensor with indexing array/tensor. Weights are initialized with :obj:`nnabla.initializer.UniformInitializer` within the range of :math:`-\\sqrt{3}` and :math:`\\sqrt{3}`. Args: x(~nnabla.Variable): [Integer] Indices with shape :math:`(I_0, ..., I_N)` n_inputs : number of possible inputs, words or vocabraries n_features : number of embedding features fix_parameters (bool): When set to `True`, the embedding weight matrix will not be updated. apply_w (function): Lambda, function, or callable object applied to the weights. Returns: ~nnabla.Variable: Output with shape :math:`(I_0, ..., I_N, W_1, ..., W_M)`
[ "Embed", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/host.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/host.py#L19-L43
def register_dispatch_wrapper(wrapper): """Register a dispatch wrapper for servers The wrapper must have this exact signature: (func, *args, **kwargs) """ signature = inspect.getargspec(wrapper) if any([len(signature.args) != 1, signature.varargs is None, signature.keywords is None]): raise TypeError("Wrapper signature mismatch") def _wrapper(func, *args, **kwargs): """Exception handling""" try: return wrapper(func, *args, **kwargs) except Exception as e: # Kill subprocess _state["currentServer"].stop() traceback.print_exc() raise e _state["dispatchWrapper"] = _wrapper
[ "def", "register_dispatch_wrapper", "(", "wrapper", ")", ":", "signature", "=", "inspect", ".", "getargspec", "(", "wrapper", ")", "if", "any", "(", "[", "len", "(", "signature", ".", "args", ")", "!=", "1", ",", "signature", ".", "varargs", "is", "None"...
Register a dispatch wrapper for servers The wrapper must have this exact signature: (func, *args, **kwargs)
[ "Register", "a", "dispatch", "wrapper", "for", "servers" ]
python
train
thomasvandoren/bugzscout-py
bugzscout/ext/cli.py
https://github.com/thomasvandoren/bugzscout-py/blob/514528e958a97e0e7b36870037c5c69661511824/bugzscout/ext/cli.py#L63-L94
def _parse_args(): """Parse and return command line arguments.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=_CliFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output.') fb_group = parser.add_argument_group('FogBugz arguments') fb_group.add_argument( '-u', '--url', help=( 'URL for bugzscout requests to be sent. Should be something ' 'like .../scoutSubmit.asp.')) fb_group.add_argument( '--user', help='User to designate when submitting via bugzscout.') fb_group.add_argument( '--project', help='Fogbugz project to file cases under.') fb_group.add_argument( '--area', help='Fogbugz area to file cases under.') error_group = parser.add_argument_group('error arguments') error_group.add_argument('-e', '--extra', help='Extra data to send with error.') error_group.add_argument('--default-message', help='Set default message if case is new.') error_group.add_argument('description', help=('Description of error. Will be matched ' 'against existing cases.')) parser.set_defaults(**_defaults()) return parser.parse_args()
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ",", "formatter_class", "=", "_CliFormatter", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", ...
Parse and return command line arguments.
[ "Parse", "and", "return", "command", "line", "arguments", "." ]
python
train