repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
DLR-RM/RAFCON
source/rafcon/gui/views/graphical_editor.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/views/graphical_editor.py#L250-L281
def _configure(self, *args): """Configure viewport This method is called when the widget is resized or something triggers a redraw. The method configures the view to show all elements in an orthogonal perspective. """ # Obtain a reference to the OpenGL drawable # and rendering context. gldrawable = self.get_gl_drawable() glcontext = self.get_gl_context() # logger.debug("configure") # OpenGL begin if not gldrawable or not gldrawable.gl_begin(glcontext): return False # Draw on the full viewport glViewport(0, 0, self.get_allocation().width, self.get_allocation().height) glMatrixMode(GL_PROJECTION) glLoadIdentity() # Orthogonal view with correct aspect ratio self._apply_orthogonal_view() glMatrixMode(GL_MODELVIEW) glLoadIdentity() # OpenGL end gldrawable.gl_end() return False
[ "def", "_configure", "(", "self", ",", "*", "args", ")", ":", "# Obtain a reference to the OpenGL drawable", "# and rendering context.", "gldrawable", "=", "self", ".", "get_gl_drawable", "(", ")", "glcontext", "=", "self", ".", "get_gl_context", "(", ")", "# logger...
Configure viewport This method is called when the widget is resized or something triggers a redraw. The method configures the view to show all elements in an orthogonal perspective.
[ "Configure", "viewport" ]
python
train
defunkt/pystache
setup.py
https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/setup.py#L248-L279
def make_long_description(): """ Generate the reST long_description for setup() from source files. Returns the generated long_description as a unicode string. """ readme_path = README_PATH # Remove our HTML comments because PyPI does not allow it. # See the setup.py docstring for more info on this. readme_md = strip_html_comments(read(readme_path)) history_md = strip_html_comments(read(HISTORY_PATH)) license_md = """\ License ======= """ + read(LICENSE_PATH) sections = [readme_md, history_md, license_md] md_description = '\n\n'.join(sections) # Write the combined Markdown file to a temp path. md_ext = os.path.splitext(readme_path)[1] md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext) write(md_description, md_description_path) rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH) long_description = convert_md_to_rst(md_path=md_description_path, rst_temp_path=rst_temp_path) return "\n".join([RST_LONG_DESCRIPTION_INTRO, long_description])
[ "def", "make_long_description", "(", ")", ":", "readme_path", "=", "README_PATH", "# Remove our HTML comments because PyPI does not allow it.", "# See the setup.py docstring for more info on this.", "readme_md", "=", "strip_html_comments", "(", "read", "(", "readme_path", ")", ")...
Generate the reST long_description for setup() from source files. Returns the generated long_description as a unicode string.
[ "Generate", "the", "reST", "long_description", "for", "setup", "()", "from", "source", "files", "." ]
python
train
lsst-sqre/lsst-projectmeta-kit
lsstprojectmeta/jsonld.py
https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/jsonld.py#L34-L44
def default(self, obj): """Encode values as JSON strings. This method overrides the default implementation from `json.JSONEncoder`. """ if isinstance(obj, datetime.datetime): return self._encode_datetime(obj) # Fallback to the default encoding return json.JSONEncoder.default(self, obj)
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "self", ".", "_encode_datetime", "(", "obj", ")", "# Fallback to the default encoding", "return", "json", ".", "JSONEnc...
Encode values as JSON strings. This method overrides the default implementation from `json.JSONEncoder`.
[ "Encode", "values", "as", "JSON", "strings", "." ]
python
valid
LionelAuroux/pyrser
pyrser/type_system/scope.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/scope.py#L206-L210
def intersection(self, sig: Scope) -> Scope: """ Create a new Set produce by the intersection of 2 Set """ new = Scope(sig=self._hsig.values(), state=self.state) new &= sig return new
[ "def", "intersection", "(", "self", ",", "sig", ":", "Scope", ")", "->", "Scope", ":", "new", "=", "Scope", "(", "sig", "=", "self", ".", "_hsig", ".", "values", "(", ")", ",", "state", "=", "self", ".", "state", ")", "new", "&=", "sig", "return"...
Create a new Set produce by the intersection of 2 Set
[ "Create", "a", "new", "Set", "produce", "by", "the", "intersection", "of", "2", "Set" ]
python
test
spyder-ide/spyder
spyder/plugins/console/widgets/shell.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/widgets/shell.py#L790-L797
def _key_period(self, text): """Action for '.'""" self.insert_text(text) if self.codecompletion_auto: # Enable auto-completion only if last token isn't a float last_obj = self.get_last_obj() if last_obj and not last_obj.isdigit(): self.show_code_completion()
[ "def", "_key_period", "(", "self", ",", "text", ")", ":", "self", ".", "insert_text", "(", "text", ")", "if", "self", ".", "codecompletion_auto", ":", "# Enable auto-completion only if last token isn't a float\r", "last_obj", "=", "self", ".", "get_last_obj", "(", ...
Action for '.
[ "Action", "for", "." ]
python
train
Kozea/pygal
pygal/graph/public.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/public.py#L150-L169
def render_sparkline(self, **kwargs): """Render a sparkline""" spark_options = dict( width=200, height=50, show_dots=False, show_legend=False, show_x_labels=False, show_y_labels=False, spacing=0, margin=5, min_scale=1, max_scale=2, explicit_size=True, no_data_text='', js=(), classes=(_ellipsis, 'pygal-sparkline') ) spark_options.update(kwargs) return self.render(**spark_options)
[ "def", "render_sparkline", "(", "self", ",", "*", "*", "kwargs", ")", ":", "spark_options", "=", "dict", "(", "width", "=", "200", ",", "height", "=", "50", ",", "show_dots", "=", "False", ",", "show_legend", "=", "False", ",", "show_x_labels", "=", "F...
Render a sparkline
[ "Render", "a", "sparkline" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L1251-L1269
def insertIndividual(self, individual): """ Inserts the specified individual into this repository. """ try: models.Individual.create( id=individual.getId(), datasetId=individual.getParentContainer().getId(), name=individual.getLocalId(), description=individual.getDescription(), created=individual.getCreated(), updated=individual.getUpdated(), species=json.dumps(individual.getSpecies()), sex=json.dumps(individual.getSex()), attributes=json.dumps(individual.getAttributes())) except Exception: raise exceptions.DuplicateNameException( individual.getLocalId(), individual.getParentContainer().getLocalId())
[ "def", "insertIndividual", "(", "self", ",", "individual", ")", ":", "try", ":", "models", ".", "Individual", ".", "create", "(", "id", "=", "individual", ".", "getId", "(", ")", ",", "datasetId", "=", "individual", ".", "getParentContainer", "(", ")", "...
Inserts the specified individual into this repository.
[ "Inserts", "the", "specified", "individual", "into", "this", "repository", "." ]
python
train
gmr/tredis
tredis/client.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/client.py#L510-L517
def _on_closed(self): """Invoked by connections when they are closed.""" self._connected.clear() if not self._closing: if self._on_close_callback: self._on_close_callback() else: raise exceptions.ConnectionError('closed')
[ "def", "_on_closed", "(", "self", ")", ":", "self", ".", "_connected", ".", "clear", "(", ")", "if", "not", "self", ".", "_closing", ":", "if", "self", ".", "_on_close_callback", ":", "self", ".", "_on_close_callback", "(", ")", "else", ":", "raise", "...
Invoked by connections when they are closed.
[ "Invoked", "by", "connections", "when", "they", "are", "closed", "." ]
python
train
Erotemic/utool
utool/util_dict.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L553-L560
def iter_all_dict_combinations_ordered(varied_dict): """ Same as all_dict_combinations but preserves order """ tups_list = [[(key, val) for val in val_list] for (key, val_list) in six.iteritems(varied_dict)] dict_iter = (OrderedDict(tups) for tups in it.product(*tups_list)) return dict_iter
[ "def", "iter_all_dict_combinations_ordered", "(", "varied_dict", ")", ":", "tups_list", "=", "[", "[", "(", "key", ",", "val", ")", "for", "val", "in", "val_list", "]", "for", "(", "key", ",", "val_list", ")", "in", "six", ".", "iteritems", "(", "varied_...
Same as all_dict_combinations but preserves order
[ "Same", "as", "all_dict_combinations", "but", "preserves", "order" ]
python
train
PaulHancock/Aegean
AegeanTools/cluster.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/cluster.py#L138-L206
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist): """ Regroup the islands of a catalog according to their normalised distance. Assumes srccat is recarray-like for efficiency. Return a list of island groups. Parameters ---------- srccat : np.rec.arry or pd.DataFrame Should have the following fields[units]: ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any] eps : float maximum normalised distance within which sources are considered to be grouped far : float (degrees) sources that are further than this distance apart will not be grouped, and will not be tested. Default = 0.5. dist : func a function that calculates the distance between a source and each element of an array of sources. Default = :func:`AegeanTools.cluster.norm_dist` Returns ------- islands : list of lists Each island contians integer indices for members from srccat (in descending dec order). """ if far is None: far = 0.5 # 10*max(a.a/3600 for a in srccat) # most negative declination first # XXX: kind='mergesort' ensures stable sorting for determinism. # Do we need this? order = np.argsort(srccat.dec, kind='mergesort')[::-1] # TODO: is it better to store groups as arrays even if appends are more # costly? groups = [[order[0]]] for idx in order[1:]: rec = srccat[idx] # TODO: Find out if groups are big enough for this to give us a speed # gain. If not, get distance to all entries in groups above # decmin simultaneously. decmin = rec.dec - far for group in reversed(groups): # when an island's largest (last) declination is smaller than # decmin, we don't need to look at any more islands if srccat.dec[group[-1]] < decmin: # new group groups.append([idx]) rafar = far / np.cos(np.radians(rec.dec)) group_recs = np.take(srccat, group, mode='clip') group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar] if len(group_recs) and dist(rec, group_recs).min() < eps: group.append(idx) break else: # new group groups.append([idx]) # TODO?: a more numpy-like interface would return only an array providing # the mapping: # group_idx = np.empty(len(srccat), dtype=int) # for i, group in enumerate(groups): # group_idx[group] = i # return group_idx return groups
[ "def", "regroup_vectorized", "(", "srccat", ",", "eps", ",", "far", "=", "None", ",", "dist", "=", "norm_dist", ")", ":", "if", "far", "is", "None", ":", "far", "=", "0.5", "# 10*max(a.a/3600 for a in srccat)", "# most negative declination first", "# XXX: kind='me...
Regroup the islands of a catalog according to their normalised distance. Assumes srccat is recarray-like for efficiency. Return a list of island groups. Parameters ---------- srccat : np.rec.arry or pd.DataFrame Should have the following fields[units]: ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any] eps : float maximum normalised distance within which sources are considered to be grouped far : float (degrees) sources that are further than this distance apart will not be grouped, and will not be tested. Default = 0.5. dist : func a function that calculates the distance between a source and each element of an array of sources. Default = :func:`AegeanTools.cluster.norm_dist` Returns ------- islands : list of lists Each island contians integer indices for members from srccat (in descending dec order).
[ "Regroup", "the", "islands", "of", "a", "catalog", "according", "to", "their", "normalised", "distance", "." ]
python
train
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/crypto/encryption.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/crypto/encryption.py#L135-L151
def decrypt(algorithm, key, encrypted_data, associated_data): """Decrypts a frame body. :param algorithm: Algorithm used to encrypt this body :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes key: Plaintext data key :param encrypted_data: EncryptedData containing body data :type encrypted_data: :class:`aws_encryption_sdk.internal.structures.EncryptedData`, :class:`aws_encryption_sdk.internal.structures.FrameBody`, or :class:`aws_encryption_sdk.internal.structures.MessageNoFrameBody` :param bytes associated_data: AAD string generated for body :type associated_data: bytes :returns: Plaintext of body :rtype: bytes """ decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag) return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()
[ "def", "decrypt", "(", "algorithm", ",", "key", ",", "encrypted_data", ",", "associated_data", ")", ":", "decryptor", "=", "Decryptor", "(", "algorithm", ",", "key", ",", "associated_data", ",", "encrypted_data", ".", "iv", ",", "encrypted_data", ".", "tag", ...
Decrypts a frame body. :param algorithm: Algorithm used to encrypt this body :type algorithm: aws_encryption_sdk.identifiers.Algorithm :param bytes key: Plaintext data key :param encrypted_data: EncryptedData containing body data :type encrypted_data: :class:`aws_encryption_sdk.internal.structures.EncryptedData`, :class:`aws_encryption_sdk.internal.structures.FrameBody`, or :class:`aws_encryption_sdk.internal.structures.MessageNoFrameBody` :param bytes associated_data: AAD string generated for body :type associated_data: bytes :returns: Plaintext of body :rtype: bytes
[ "Decrypts", "a", "frame", "body", "." ]
python
train
AtteqCom/zsl
src/zsl/resource/json_server_resource.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/resource/json_server_resource.py#L188-L231
def read(self, params, args, data): """Modifies the parameters and adds metadata for read results.""" result_count = None result_links = None if params is None: params = [] if args: args = args.copy() else: args = {} ctx = self._create_context(params, args, data) row_id = ctx.get_row_id() if not row_id: self._transform_list_args(args) if 'page' in args or 'limit' in args: ctx = self._create_context(params, args, data) result_count = self._get_collection_count(ctx) if 'page' in args: result_links = _get_link_pages( page=args['page'], per_page=int(args['limit']), count=result_count, page_url=request.url ) if 'limit' not in args: args['limit'] = 'unlimited' self._create_related(args) try: return ResourceResult( body=super(JsonServerResource, self).read(params, args, data), count=result_count, links=result_links ) except NoResultFound: return NOT_FOUND
[ "def", "read", "(", "self", ",", "params", ",", "args", ",", "data", ")", ":", "result_count", "=", "None", "result_links", "=", "None", "if", "params", "is", "None", ":", "params", "=", "[", "]", "if", "args", ":", "args", "=", "args", ".", "copy"...
Modifies the parameters and adds metadata for read results.
[ "Modifies", "the", "parameters", "and", "adds", "metadata", "for", "read", "results", "." ]
python
train
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L2817-L2823
def update(self, key, item): """ Update item into hash table with specified key and item. If key is already present, destroys old item and inserts new one. Use free_fn method to ensure deallocator is properly called on item. """ return lib.zhash_update(self._as_parameter_, key, item)
[ "def", "update", "(", "self", ",", "key", ",", "item", ")", ":", "return", "lib", ".", "zhash_update", "(", "self", ".", "_as_parameter_", ",", "key", ",", "item", ")" ]
Update item into hash table with specified key and item. If key is already present, destroys old item and inserts new one. Use free_fn method to ensure deallocator is properly called on item.
[ "Update", "item", "into", "hash", "table", "with", "specified", "key", "and", "item", ".", "If", "key", "is", "already", "present", "destroys", "old", "item", "and", "inserts", "new", "one", ".", "Use", "free_fn", "method", "to", "ensure", "deallocator", "...
python
train
authomatic/authomatic
authomatic/core.py
https://github.com/authomatic/authomatic/blob/90a9ce60cc405ae8a2bf5c3713acd5d78579a04e/authomatic/core.py#L879-L928
def deserialize(cls, config, credentials): """ A *class method* which reconstructs credentials created by :meth:`serialize`. You can also pass it a :class:`.Credentials` instance. :param dict config: The same :doc:`config` used in the :func:`.login` to get the credentials. :param str credentials: :class:`string` The serialized credentials or :class:`.Credentials` instance. :returns: :class:`.Credentials` """ # Accept both serialized and normal. if isinstance(credentials, Credentials): return credentials decoded = parse.unquote(credentials) split = decoded.split('\n') # We need the provider ID to move forward. if split[0] is None: raise CredentialsError( 'To deserialize credentials you need to specify a unique ' 'integer under the "id" key in the config for each provider!') # Get provider config by short name. provider_name = id_to_name(config, int(split[0])) cfg = config.get(provider_name) # Get the provider class. ProviderClass = resolve_provider_class(cfg.get('class_')) deserialized = Credentials(config) deserialized.provider_id = provider_id deserialized.provider_type = ProviderClass.get_type() deserialized.provider_type_id = split[1] deserialized.provider_class = ProviderClass deserialized.provider_name = provider_name deserialized.provider_class = ProviderClass # Add provider type specific properties. return ProviderClass.reconstruct(split[2:], deserialized, cfg)
[ "def", "deserialize", "(", "cls", ",", "config", ",", "credentials", ")", ":", "# Accept both serialized and normal.", "if", "isinstance", "(", "credentials", ",", "Credentials", ")", ":", "return", "credentials", "decoded", "=", "parse", ".", "unquote", "(", "c...
A *class method* which reconstructs credentials created by :meth:`serialize`. You can also pass it a :class:`.Credentials` instance. :param dict config: The same :doc:`config` used in the :func:`.login` to get the credentials. :param str credentials: :class:`string` The serialized credentials or :class:`.Credentials` instance. :returns: :class:`.Credentials`
[ "A", "*", "class", "method", "*", "which", "reconstructs", "credentials", "created", "by", ":", "meth", ":", "serialize", ".", "You", "can", "also", "pass", "it", "a", ":", "class", ":", ".", "Credentials", "instance", "." ]
python
test
gmr/tinman
tinman/session.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/session.py#L273-L282
def save(self): """Store the session data in redis :param method callback: The callback method to invoke when done """ result = yield gen.Task(RedisSession._redis_client.set, self._key, self.dumps()) LOGGER.debug('Saved session %s (%r)', self.id, result) raise gen.Return(result)
[ "def", "save", "(", "self", ")", ":", "result", "=", "yield", "gen", ".", "Task", "(", "RedisSession", ".", "_redis_client", ".", "set", ",", "self", ".", "_key", ",", "self", ".", "dumps", "(", ")", ")", "LOGGER", ".", "debug", "(", "'Saved session ...
Store the session data in redis :param method callback: The callback method to invoke when done
[ "Store", "the", "session", "data", "in", "redis" ]
python
train
CalebBell/thermo
thermo/critical.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/critical.py#L1009-L1071
def Meissner(Tc=None, Pc=None, Vc=None): r'''Old (1942) relationship for estimating critical properties from each other. Two of the three properties are required. This model uses the "critical surface", a general plot of Tc vs Pc vs Vc. The model used 42 organic and inorganic compounds to derive the equation. The general equation is in [1]_: .. math:: P_c = \frac{2.08 T_c}{V_c-8} Parameters ---------- Tc : float, optional Critical temperature of fluid [K] Pc : float, optional Critical pressure of fluid [Pa] Vc : float, optional Critical volume of fluid [m^3/mol] Returns ------- Tc, Pc or Vc : float Critical property of fluid [K], [Pa], or [m^3/mol] Notes ----- The prediction of Tc from Pc and Vc is not tested, as this is not necessary anywhere, but it is implemented. Internal units are atm, cm^3/mol, and K. A slight error occurs when Pa, cm^3/mol and K are used instead, on the order of <0.2%. This equation is less accurate than that of Ihmels, but surprisingly close. The author also proposed means of estimated properties independently. Examples -------- Succinic acid [110-15-6] >>> Meissner(Tc=851.0, Vc=0.000308) 5978445.199999999 References ---------- .. [1] Meissner, H. P., and E. M. Redding. "Prediction of Critical Constants." Industrial & Engineering Chemistry 34, no. 5 (May 1, 1942): 521-26. doi:10.1021/ie50389a003. ''' if Tc and Vc: Vc = Vc*1E6 Pc = 20.8*Tc/(Vc-8) Pc = 101325*Pc # atm to Pa return Pc elif Tc and Pc: Pc = Pc/101325. # Pa to atm Vc = 104/5.0*Tc/Pc+8 Vc = Vc/1E6 # cm^3/mol to m^3/mol return Vc elif Pc and Vc: Pc = Pc/101325. # Pa to atm Vc = Vc*1E6 # m^3/mol to cm^3/mol Tc = 5./104.0*Pc*(Vc-8) return Tc else: raise Exception('Two of Tc, Pc, and Vc must be provided')
[ "def", "Meissner", "(", "Tc", "=", "None", ",", "Pc", "=", "None", ",", "Vc", "=", "None", ")", ":", "if", "Tc", "and", "Vc", ":", "Vc", "=", "Vc", "*", "1E6", "Pc", "=", "20.8", "*", "Tc", "/", "(", "Vc", "-", "8", ")", "Pc", "=", "10132...
r'''Old (1942) relationship for estimating critical properties from each other. Two of the three properties are required. This model uses the "critical surface", a general plot of Tc vs Pc vs Vc. The model used 42 organic and inorganic compounds to derive the equation. The general equation is in [1]_: .. math:: P_c = \frac{2.08 T_c}{V_c-8} Parameters ---------- Tc : float, optional Critical temperature of fluid [K] Pc : float, optional Critical pressure of fluid [Pa] Vc : float, optional Critical volume of fluid [m^3/mol] Returns ------- Tc, Pc or Vc : float Critical property of fluid [K], [Pa], or [m^3/mol] Notes ----- The prediction of Tc from Pc and Vc is not tested, as this is not necessary anywhere, but it is implemented. Internal units are atm, cm^3/mol, and K. A slight error occurs when Pa, cm^3/mol and K are used instead, on the order of <0.2%. This equation is less accurate than that of Ihmels, but surprisingly close. The author also proposed means of estimated properties independently. Examples -------- Succinic acid [110-15-6] >>> Meissner(Tc=851.0, Vc=0.000308) 5978445.199999999 References ---------- .. [1] Meissner, H. P., and E. M. Redding. "Prediction of Critical Constants." Industrial & Engineering Chemistry 34, no. 5 (May 1, 1942): 521-26. doi:10.1021/ie50389a003.
[ "r", "Old", "(", "1942", ")", "relationship", "for", "estimating", "critical", "properties", "from", "each", "other", ".", "Two", "of", "the", "three", "properties", "are", "required", ".", "This", "model", "uses", "the", "critical", "surface", "a", "general...
python
valid
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L3350-L3369
def fm_discriminator(Signal): """ Calculates the digital FM discriminator from a real-valued time signal. Parameters ---------- Signal : array-like A real-valued time signal Returns ------- fmDiscriminator : array-like The digital FM discriminator of the argument signal """ S_analytic = _hilbert(Signal) S_analytic_star = _GetComplexConjugateArray(S_analytic) S_analytic_hat = S_analytic[1:] * S_analytic_star[:-1] R, I = _GetRealImagArray(S_analytic_hat) fmDiscriminator = _np.arctan2(I, R) return fmDiscriminator
[ "def", "fm_discriminator", "(", "Signal", ")", ":", "S_analytic", "=", "_hilbert", "(", "Signal", ")", "S_analytic_star", "=", "_GetComplexConjugateArray", "(", "S_analytic", ")", "S_analytic_hat", "=", "S_analytic", "[", "1", ":", "]", "*", "S_analytic_star", "...
Calculates the digital FM discriminator from a real-valued time signal. Parameters ---------- Signal : array-like A real-valued time signal Returns ------- fmDiscriminator : array-like The digital FM discriminator of the argument signal
[ "Calculates", "the", "digital", "FM", "discriminator", "from", "a", "real", "-", "valued", "time", "signal", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/rbridge_id/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/rbridge_id/__init__.py#L572-L593
def _set_threshold_monitor(self, v, load=False): """ Setter method for threshold_monitor, mapped from YANG variable /rbridge_id/threshold_monitor (container) If this variable is read-only (config: false) in the source YANG file, then _set_threshold_monitor is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_threshold_monitor() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=threshold_monitor.threshold_monitor, is_container='container', presence=False, yang_name="threshold-monitor", rest_name="threshold-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Class monitoring threshold and alert setting', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """threshold_monitor must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=threshold_monitor.threshold_monitor, is_container='container', presence=False, yang_name="threshold-monitor", rest_name="threshold-monitor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Class monitoring threshold and alert setting', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""", }) self.__threshold_monitor = t if hasattr(self, '_set'): self._set()
[ "def", "_set_threshold_monitor", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",",...
Setter method for threshold_monitor, mapped from YANG variable /rbridge_id/threshold_monitor (container) If this variable is read-only (config: false) in the source YANG file, then _set_threshold_monitor is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_threshold_monitor() directly.
[ "Setter", "method", "for", "threshold_monitor", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "threshold_monitor", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "sou...
python
train
capless/valley
valley/utils/imports.py
https://github.com/capless/valley/blob/491e4203e428a9e92264e204d44a1df96a570bbc/valley/utils/imports.py#L4-L14
def import_util(imp): ''' Lazily imports a utils (class, function,or variable) from a module) from a string. @param imp: ''' mod_name, obj_name = imp.rsplit('.', 1) mod = importlib.import_module(mod_name) return getattr(mod, obj_name)
[ "def", "import_util", "(", "imp", ")", ":", "mod_name", ",", "obj_name", "=", "imp", ".", "rsplit", "(", "'.'", ",", "1", ")", "mod", "=", "importlib", ".", "import_module", "(", "mod_name", ")", "return", "getattr", "(", "mod", ",", "obj_name", ")" ]
Lazily imports a utils (class, function,or variable) from a module) from a string. @param imp:
[ "Lazily", "imports", "a", "utils", "(", "class", "function", "or", "variable", ")", "from", "a", "module", ")", "from", "a", "string", "." ]
python
train
mattharrison/rst2odp
odplib/preso.py
https://github.com/mattharrison/rst2odp/blob/4adbf29b28c8207ec882f792ded07e98b1d3e7d0/odplib/preso.py#L1350-L1356
def _check_add_node(self, parent, name): """ Returns False if bad to make name a child of parent """ if name == ns("text", "a"): if parent.tag == ns("draw", "text-box"): return False return True
[ "def", "_check_add_node", "(", "self", ",", "parent", ",", "name", ")", ":", "if", "name", "==", "ns", "(", "\"text\"", ",", "\"a\"", ")", ":", "if", "parent", ".", "tag", "==", "ns", "(", "\"draw\"", ",", "\"text-box\"", ")", ":", "return", "False",...
Returns False if bad to make name a child of parent
[ "Returns", "False", "if", "bad", "to", "make", "name", "a", "child", "of", "parent" ]
python
train
bitshares/python-bitshares
bitshares/bitshares.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitshares/bitshares.py#L1253-L1274
def create_committee_member(self, url="", account=None, **kwargs): """ Create a committee member :param str url: URL to read more about the worker :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) op = operations.Committee_member_create( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "committee_member_account": account["id"], "url": url, } ) return self.finalizeOp(op, account, "active", **kwargs)
[ "def", "create_committee_member", "(", "self", ",", "url", "=", "\"\"", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ...
Create a committee member :param str url: URL to read more about the worker :param str account: (optional) the account to allow access to (defaults to ``default_account``)
[ "Create", "a", "committee", "member" ]
python
train
hyperledger/sawtooth-core
cli/sawtooth_cli/tty.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/tty.py#L24-L37
def size(): """Determines the height and width of the console window Returns: tuple of int: The height in lines, then width in characters """ try: assert os != 'nt' and sys.stdout.isatty() rows, columns = os.popen('stty size', 'r').read().split() except (AssertionError, AttributeError, ValueError): # in case of failure, use dimensions of a full screen 13" laptop rows, columns = DEFAULT_HEIGHT, DEFAULT_WIDTH return int(rows), int(columns)
[ "def", "size", "(", ")", ":", "try", ":", "assert", "os", "!=", "'nt'", "and", "sys", ".", "stdout", ".", "isatty", "(", ")", "rows", ",", "columns", "=", "os", ".", "popen", "(", "'stty size'", ",", "'r'", ")", ".", "read", "(", ")", ".", "spl...
Determines the height and width of the console window Returns: tuple of int: The height in lines, then width in characters
[ "Determines", "the", "height", "and", "width", "of", "the", "console", "window" ]
python
train
saltstack/salt
salt/modules/ipmi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ipmi.py#L790-L826
def get_user(uid, channel=14, **kwargs): ''' Get user from uid and access on channel :param uid: user number [1:16] :param channel: number [1:7] :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None Return Data .. code-block:: none name: (str) uid: (int) channel: (int) access: - callback (bool) - link_auth (bool) - ipmi_msg (bool) - privilege_level: (str)[callback, user, operatorm administrator, proprietary, no_access] CLI Examples: .. code-block:: bash salt-call ipmi.get_user uid=2 ''' name = get_user_name(uid, **kwargs) access = get_user_access(uid, channel, **kwargs) data = {'name': name, 'uid': uid, 'channel': channel, 'access': access['access']} return data
[ "def", "get_user", "(", "uid", ",", "channel", "=", "14", ",", "*", "*", "kwargs", ")", ":", "name", "=", "get_user_name", "(", "uid", ",", "*", "*", "kwargs", ")", "access", "=", "get_user_access", "(", "uid", ",", "channel", ",", "*", "*", "kwarg...
Get user from uid and access on channel :param uid: user number [1:16] :param channel: number [1:7] :param kwargs: - api_host=127.0.0.1 - api_user=admin - api_pass=example - api_port=623 - api_kg=None Return Data .. code-block:: none name: (str) uid: (int) channel: (int) access: - callback (bool) - link_auth (bool) - ipmi_msg (bool) - privilege_level: (str)[callback, user, operatorm administrator, proprietary, no_access] CLI Examples: .. code-block:: bash salt-call ipmi.get_user uid=2
[ "Get", "user", "from", "uid", "and", "access", "on", "channel" ]
python
train
jasonrbriggs/stomp.py
stomp/transport.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L196-L251
def notify(self, frame_type, headers=None, body=None): """ Utility function for notifying listeners of incoming and outgoing messages :param str frame_type: the type of message :param dict headers: the map of headers associated with the message :param body: the content of the message """ if frame_type == 'receipt': # logic for wait-on-receipt notification receipt = headers['receipt-id'] receipt_value = self.__receipts.get(receipt) with self.__send_wait_condition: self.set_receipt(receipt, None) self.__send_wait_condition.notify() if receipt_value == CMD_DISCONNECT: self.set_connected(False) # received a stomp 1.1+ disconnect receipt if receipt == self.__disconnect_receipt: self.disconnect_socket() self.__disconnect_receipt = None elif frame_type == 'connected': self.set_connected(True) elif frame_type == 'disconnected': self.set_connected(False) with self.__listeners_change_condition: listeners = sorted(self.listeners.items()) for (_, listener) in listeners: if not listener: continue notify_func = getattr(listener, 'on_%s' % frame_type, None) if not notify_func: log.debug("listener %s has no method on_%s", listener, frame_type) continue if frame_type in ('heartbeat', 'disconnected'): notify_func() continue if frame_type == 'connecting': notify_func(self.current_host_and_port) continue if frame_type == 'error' and not self.connected: with self.__connect_wait_condition: self.connection_error = True self.__connect_wait_condition.notify() rtn = notify_func(headers, body) if rtn: (headers, body) = rtn return (headers, body)
[ "def", "notify", "(", "self", ",", "frame_type", ",", "headers", "=", "None", ",", "body", "=", "None", ")", ":", "if", "frame_type", "==", "'receipt'", ":", "# logic for wait-on-receipt notification", "receipt", "=", "headers", "[", "'receipt-id'", "]", "rece...
Utility function for notifying listeners of incoming and outgoing messages :param str frame_type: the type of message :param dict headers: the map of headers associated with the message :param body: the content of the message
[ "Utility", "function", "for", "notifying", "listeners", "of", "incoming", "and", "outgoing", "messages" ]
python
train
salu133445/pypianoroll
pypianoroll/metrics.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/metrics.py#L47-L58
def qualified_note_rate(pianoroll, threshold=2): """Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.""" _validate_pianoroll(pianoroll) if np.issubdtype(pianoroll.dtype, np.bool_): pianoroll = pianoroll.astype(np.uint8) padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant') diff = np.diff(padded, axis=0).reshape(-1) onsets = (diff > 0).nonzero()[0] offsets = (diff < 0).nonzero()[0] n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold) return n_qualified_notes / len(onsets)
[ "def", "qualified_note_rate", "(", "pianoroll", ",", "threshold", "=", "2", ")", ":", "_validate_pianoroll", "(", "pianoroll", ")", "if", "np", ".", "issubdtype", "(", "pianoroll", ".", "dtype", ",", "np", ".", "bool_", ")", ":", "pianoroll", "=", "pianoro...
Return the ratio of the number of the qualified notes (notes longer than `threshold` (in time step)) to the total number of notes in a pianoroll.
[ "Return", "the", "ratio", "of", "the", "number", "of", "the", "qualified", "notes", "(", "notes", "longer", "than", "threshold", "(", "in", "time", "step", "))", "to", "the", "total", "number", "of", "notes", "in", "a", "pianoroll", "." ]
python
train
tamasgal/km3pipe
km3pipe/io/__init__.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/__init__.py#L90-L113
def read_calibration( detx=None, det_id=None, from_file=False, det_id_table=None ): """Retrive calibration from file, the DB.""" from km3pipe.calib import Calibration # noqa if not (detx or det_id or from_file): return None if detx is not None: return Calibration(filename=detx) if from_file: det_ids = np.unique(det_id_table) if len(det_ids) > 1: log.critical("Multiple detector IDs found in events.") det_id = det_ids[0] if det_id is not None: if det_id < 0: log.warning( "Negative detector ID found ({0}). This is a MC " "detector and cannot be retrieved from the DB.".format(det_id) ) return None return Calibration(det_id=det_id) return None
[ "def", "read_calibration", "(", "detx", "=", "None", ",", "det_id", "=", "None", ",", "from_file", "=", "False", ",", "det_id_table", "=", "None", ")", ":", "from", "km3pipe", ".", "calib", "import", "Calibration", "# noqa", "if", "not", "(", "detx", "or...
Retrive calibration from file, the DB.
[ "Retrive", "calibration", "from", "file", "the", "DB", "." ]
python
train
adsabs/adsutils
adsutils/ads_utils.py
https://github.com/adsabs/adsutils/blob/fb9d6b4f6ed5e6ca19c552efc3cdd6466c587fdb/adsutils/ads_utils.py#L124-L150
def get_pub_abbreviation(pubstring, numBest=5, exact=None): """ Get ADS journal abbreviation ("bibstem") candidates for a given publication name. * 'exact': if True results will only be returned if an exact match was found * 'numBest': maximum number of guesses to return A list of tuples will be returned, each tuple consisting of a score and a bibstem """ if exact: # Only try to find exact matches bibstems = _defaultSourcematcher.getExactMatch(string.upper(pubstring)) else: # Allow fuzzy matching bibstems = _defaultSourcematcher.getBestMatches(string.upper(pubstring), numBest) if re.search(r"L(ett(ers)?)?$",pubstring): addit = _defaultSourcematcher.getBestMatches(re.sub(r"(?i)\s*L(ett(ers)?)?$", "", pubstring.upper()), 2) if addit: bibstems.extend(addit) bibstems.sort() # Make the list of results unique try: bibstems = list(set(bibstems)) except: bibstems = [] # Sort the list of results from highest score to lowest score bibstems.sort(key=lambda tup: tup[0], reverse=True) return bibstems
[ "def", "get_pub_abbreviation", "(", "pubstring", ",", "numBest", "=", "5", ",", "exact", "=", "None", ")", ":", "if", "exact", ":", "# Only try to find exact matches", "bibstems", "=", "_defaultSourcematcher", ".", "getExactMatch", "(", "string", ".", "upper", "...
Get ADS journal abbreviation ("bibstem") candidates for a given publication name. * 'exact': if True results will only be returned if an exact match was found * 'numBest': maximum number of guesses to return A list of tuples will be returned, each tuple consisting of a score and a bibstem
[ "Get", "ADS", "journal", "abbreviation", "(", "bibstem", ")", "candidates", "for", "a", "given", "publication", "name", ".", "*", "exact", ":", "if", "True", "results", "will", "only", "be", "returned", "if", "an", "exact", "match", "was", "found", "*", ...
python
train
alex-kostirin/pyatomac
atomac/ldtpd/core.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/core.py#L148-L170
def startprocessmonitor(self, process_name, interval=2): """ Start memory and CPU monitoring, with the time interval between each process scan @param process_name: Process name, ex: firefox-bin. @type process_name: string @param interval: Time interval between each process scan @type interval: double @return: 1 on success @rtype: integer """ if process_name in self._process_stats: # Stop previously running instance # At any point, only one process name can be tracked # If an instance already exist, then stop it self._process_stats[process_name].stop() # Create an instance of process stat self._process_stats[process_name] = ProcessStats(process_name, interval) # start monitoring the process self._process_stats[process_name].start() return 1
[ "def", "startprocessmonitor", "(", "self", ",", "process_name", ",", "interval", "=", "2", ")", ":", "if", "process_name", "in", "self", ".", "_process_stats", ":", "# Stop previously running instance", "# At any point, only one process name can be tracked", "# If an instan...
Start memory and CPU monitoring, with the time interval between each process scan @param process_name: Process name, ex: firefox-bin. @type process_name: string @param interval: Time interval between each process scan @type interval: double @return: 1 on success @rtype: integer
[ "Start", "memory", "and", "CPU", "monitoring", "with", "the", "time", "interval", "between", "each", "process", "scan" ]
python
valid
gem/oq-engine
openquake/hazardlib/gsim/dowrickrhoades_2005.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/dowrickrhoades_2005.py#L74-L96
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) # extract dictionaries of coefficients specific to required # intensity measure type C = self.COEFFS[imt] # Deltas for Tectonic Region Type and rake angles delta_R, delta_S, delta_V, delta_I = self._get_deltas(rup.rake) mean = self._compute_mean(C, rup.mag, dists.rrup, rup.hypo_depth, delta_R, delta_S, delta_V, delta_I, sites.vs30) stddevs = self._get_stddevs(C, stddev_types, sites.vs30.size) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "assert", "all", "(", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "for", "stddev_type", "in", "stddev_typ...
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
abingham/spor
src/spor/repository/repository.py
https://github.com/abingham/spor/blob/673c8c36c99a4b9ea882f002bfb529f1eca89126/src/spor/repository/repository.py#L61-L76
def add(self, anchor): """Add a new anchor to the repository. This will create a new ID for the anchor and provision new storage for it. Returns: The storage ID for the Anchor which can be used to retrieve the anchor later. """ anchor_id = uuid.uuid4().hex anchor_path = self._anchor_path(anchor_id) with anchor_path.open(mode='wt') as f: save_anchor(f, anchor, self.root) return anchor_id
[ "def", "add", "(", "self", ",", "anchor", ")", ":", "anchor_id", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "anchor_path", "=", "self", ".", "_anchor_path", "(", "anchor_id", ")", "with", "anchor_path", ".", "open", "(", "mode", "=", "'wt'", ")...
Add a new anchor to the repository. This will create a new ID for the anchor and provision new storage for it. Returns: The storage ID for the Anchor which can be used to retrieve the anchor later.
[ "Add", "a", "new", "anchor", "to", "the", "repository", "." ]
python
train
20c/twentyc.database
twentyc/database/couchbase/client.py
https://github.com/20c/twentyc.database/blob/c6b7184d66dddafb306c94c4f98234bef1df1291/twentyc/database/couchbase/client.py#L101-L126
def get(self, key): """ Retrieve object indexed by <key> """ try: try: obj = self.bucket.get(key) except couchbase.exception.MemcachedError, inst: if str(inst) == "Memcached error #1: Not found": # for some reason the py cb client raises an error when # a key isnt found, instead we just want a none value. obj = None else: raise except: raise if obj: return json.loads(obj[2]) else: return None except: raise
[ "def", "get", "(", "self", ",", "key", ")", ":", "try", ":", "try", ":", "obj", "=", "self", ".", "bucket", ".", "get", "(", "key", ")", "except", "couchbase", ".", "exception", ".", "MemcachedError", ",", "inst", ":", "if", "str", "(", "inst", "...
Retrieve object indexed by <key>
[ "Retrieve", "object", "indexed", "by", "<key", ">" ]
python
train
jmgilman/Neolib
neolib/pyamf/util/pure.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/util/pure.py#L218-L233
def write_uchar(self, c): """ Writes an C{unsigned char} to the stream. @param c: Unsigned char @type c: C{int} @raise TypeError: Unexpected type for int C{c}. @raise OverflowError: Not in range. """ if type(c) not in python.int_types: raise TypeError('expected an int (got:%r)' % type(c)) if not 0 <= c <= 255: raise OverflowError("Not in range, %d" % c) self.write(struct.pack("B", c))
[ "def", "write_uchar", "(", "self", ",", "c", ")", ":", "if", "type", "(", "c", ")", "not", "in", "python", ".", "int_types", ":", "raise", "TypeError", "(", "'expected an int (got:%r)'", "%", "type", "(", "c", ")", ")", "if", "not", "0", "<=", "c", ...
Writes an C{unsigned char} to the stream. @param c: Unsigned char @type c: C{int} @raise TypeError: Unexpected type for int C{c}. @raise OverflowError: Not in range.
[ "Writes", "an", "C", "{", "unsigned", "char", "}", "to", "the", "stream", "." ]
python
train
PyCQA/astroid
astroid/modutils.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/modutils.py#L536-L558
def get_source_file(filename, include_no_ext=False): """given a python module's file name return the matching source file name (the filename will be returned identically if it's already an absolute path to a python source file...) :type filename: str :param filename: python module's file name :raise NoSourceFile: if no source file exists on the file system :rtype: str :return: the absolute path of the source file if it exists """ filename = os.path.abspath(_path_from_filename(filename)) base, orig_ext = os.path.splitext(filename) for ext in PY_SOURCE_EXTS: source_path = "%s.%s" % (base, ext) if os.path.exists(source_path): return source_path if include_no_ext and not orig_ext and os.path.exists(base): return base raise NoSourceFile(filename)
[ "def", "get_source_file", "(", "filename", ",", "include_no_ext", "=", "False", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "_path_from_filename", "(", "filename", ")", ")", "base", ",", "orig_ext", "=", "os", ".", "path", ".", "sp...
given a python module's file name return the matching source file name (the filename will be returned identically if it's already an absolute path to a python source file...) :type filename: str :param filename: python module's file name :raise NoSourceFile: if no source file exists on the file system :rtype: str :return: the absolute path of the source file if it exists
[ "given", "a", "python", "module", "s", "file", "name", "return", "the", "matching", "source", "file", "name", "(", "the", "filename", "will", "be", "returned", "identically", "if", "it", "s", "already", "an", "absolute", "path", "to", "a", "python", "sourc...
python
train
OpenHydrology/floodestimation
floodestimation/fehdata.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/fehdata.py#L82-L109
def update_available(after_days=1): """ Check whether updated NRFA data is available. :param after_days: Only check if not checked previously since a certain number of days ago :type after_days: float :return: `True` if update available, `False` if not, `None` if remote location cannot be reached. :rtype: bool or None """ never_downloaded = not bool(config.get('nrfa', 'downloaded_on', fallback=None) or None) if never_downloaded: config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow()) config.save() return True last_checked_on = config.get_datetime('nrfa', 'update_checked_on', fallback=None) or datetime.fromtimestamp(0) if datetime.utcnow() < last_checked_on + timedelta(days=after_days): return False current_version = LooseVersion(config.get('nrfa', 'version', fallback='0') or '0') try: with urlopen(config['nrfa']['oh_json_url'], timeout=10) as f: remote_version = LooseVersion(json.loads(f.read().decode('utf-8'))['nrfa_version']) config.set_datetime('nrfa', 'update_checked_on', datetime.utcnow()) config.save() return remote_version > current_version except URLError: return None
[ "def", "update_available", "(", "after_days", "=", "1", ")", ":", "never_downloaded", "=", "not", "bool", "(", "config", ".", "get", "(", "'nrfa'", ",", "'downloaded_on'", ",", "fallback", "=", "None", ")", "or", "None", ")", "if", "never_downloaded", ":",...
Check whether updated NRFA data is available. :param after_days: Only check if not checked previously since a certain number of days ago :type after_days: float :return: `True` if update available, `False` if not, `None` if remote location cannot be reached. :rtype: bool or None
[ "Check", "whether", "updated", "NRFA", "data", "is", "available", "." ]
python
train
tensorflow/cleverhans
cleverhans/compat.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/compat.py#L35-L54
def reduce_function(op_func, input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None): """ This function used to be needed to support tf 1.4 and early, but support for tf 1.4 and earlier is now dropped. :param op_func: expects the function to handle eg: tf.reduce_sum. :param input_tensor: The tensor to reduce. Should have numeric type. :param axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). :param keepdims: If true, retains reduced dimensions with length 1. :param name: A name for the operation (optional). :param reduction_indices: The old (deprecated) name for axis. :return: outputs same value as op_func. """ warnings.warn("`reduce_function` is deprecated and may be removed on or after 2019-09-08.") out = op_func(input_tensor, axis=axis, keepdims=keepdims, name=name, reduction_indices=reduction_indices) return out
[ "def", "reduce_function", "(", "op_func", ",", "input_tensor", ",", "axis", "=", "None", ",", "keepdims", "=", "None", ",", "name", "=", "None", ",", "reduction_indices", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"`reduce_function` is deprecated a...
This function used to be needed to support tf 1.4 and early, but support for tf 1.4 and earlier is now dropped. :param op_func: expects the function to handle eg: tf.reduce_sum. :param input_tensor: The tensor to reduce. Should have numeric type. :param axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). :param keepdims: If true, retains reduced dimensions with length 1. :param name: A name for the operation (optional). :param reduction_indices: The old (deprecated) name for axis. :return: outputs same value as op_func.
[ "This", "function", "used", "to", "be", "needed", "to", "support", "tf", "1", ".", "4", "and", "early", "but", "support", "for", "tf", "1", ".", "4", "and", "earlier", "is", "now", "dropped", ".", ":", "param", "op_func", ":", "expects", "the", "func...
python
train
tensorpack/tensorpack
examples/FasterRCNN/dataset.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/dataset.py#L203-L229
def load_training_roidbs(self, names): """ Args: names (list[str]): name of the training datasets, e.g. ['train2014', 'valminusminival2014'] Returns: roidbs (list[dict]): Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances. and the following keys are expected for training: file_name: str, full path to the image boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2] class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories) is_crowd: k booleans. Use k False if you don't know what it means. segmentation: k lists of numpy arrays (one for each instance). Each list of numpy arrays corresponds to the mask for one instance. Each numpy array in the list is a polygon of shape Nx2, because one mask can be represented by N polygons. If your segmentation annotations are originally masks rather than polygons, either convert it, or the augmentation will need to be changed or skipped accordingly. Include this field only if training Mask R-CNN. """ return COCODetection.load_many( cfg.DATA.BASEDIR, names, add_gt=True, add_mask=cfg.MODE_MASK)
[ "def", "load_training_roidbs", "(", "self", ",", "names", ")", ":", "return", "COCODetection", ".", "load_many", "(", "cfg", ".", "DATA", ".", "BASEDIR", ",", "names", ",", "add_gt", "=", "True", ",", "add_mask", "=", "cfg", ".", "MODE_MASK", ")" ]
Args: names (list[str]): name of the training datasets, e.g. ['train2014', 'valminusminival2014'] Returns: roidbs (list[dict]): Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances. and the following keys are expected for training: file_name: str, full path to the image boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2] class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories) is_crowd: k booleans. Use k False if you don't know what it means. segmentation: k lists of numpy arrays (one for each instance). Each list of numpy arrays corresponds to the mask for one instance. Each numpy array in the list is a polygon of shape Nx2, because one mask can be represented by N polygons. If your segmentation annotations are originally masks rather than polygons, either convert it, or the augmentation will need to be changed or skipped accordingly. Include this field only if training Mask R-CNN.
[ "Args", ":", "names", "(", "list", "[", "str", "]", ")", ":", "name", "of", "the", "training", "datasets", "e", ".", "g", ".", "[", "train2014", "valminusminival2014", "]" ]
python
train
annoviko/pyclustering
pyclustering/cluster/cure.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/cure.py#L178-L243
def __process_by_python(self): """! @brief Performs cluster analysis using python code. """ self.__create_queue() # queue self.__create_kdtree() # create k-d tree while len(self.__queue) > self.__number_cluster: cluster1 = self.__queue[0] # cluster that has nearest neighbor. cluster2 = cluster1.closest # closest cluster. self.__queue.remove(cluster1) self.__queue.remove(cluster2) self.__delete_represented_points(cluster1) self.__delete_represented_points(cluster2) merged_cluster = self.__merge_clusters(cluster1, cluster2) self.__insert_represented_points(merged_cluster) # Pointers to clusters that should be relocated is stored here. cluster_relocation_requests = [] # Check for the last cluster if len(self.__queue) > 0: merged_cluster.closest = self.__queue[0] # arbitrary cluster from queue merged_cluster.distance = self.__cluster_distance(merged_cluster, merged_cluster.closest) for item in self.__queue: distance = self.__cluster_distance(merged_cluster, item) # Check if distance between new cluster and current is the best than now. if distance < merged_cluster.distance: merged_cluster.closest = item merged_cluster.distance = distance # Check if current cluster has removed neighbor. if (item.closest is cluster1) or (item.closest is cluster2): # If previous distance was less then distance to new cluster then nearest cluster should # be found in the tree. if item.distance < distance: (item.closest, item.distance) = self.__closest_cluster(item, distance) # TODO: investigation is required. There is assumption that itself and merged cluster # should be always in list of neighbors in line with specified radius. But merged cluster # may not be in list due to error calculation, therefore it should be added manually. if item.closest is None: item.closest = merged_cluster item.distance = distance else: item.closest = merged_cluster item.distance = distance cluster_relocation_requests.append(item) # New cluster and updated clusters should relocated in queue self.__insert_cluster(merged_cluster) for item in cluster_relocation_requests: self.__relocate_cluster(item) # Change cluster representation self.__clusters = [cure_cluster_unit.indexes for cure_cluster_unit in self.__queue] self.__representors = [cure_cluster_unit.rep for cure_cluster_unit in self.__queue] self.__means = [cure_cluster_unit.mean for cure_cluster_unit in self.__queue]
[ "def", "__process_by_python", "(", "self", ")", ":", "self", ".", "__create_queue", "(", ")", "# queue\r", "self", ".", "__create_kdtree", "(", ")", "# create k-d tree\r", "while", "len", "(", "self", ".", "__queue", ")", ">", "self", ".", "__number_cluster", ...
! @brief Performs cluster analysis using python code.
[ "!" ]
python
valid
lucalianas/pyBaseX
pybasex/fragments.py
https://github.com/lucalianas/pyBaseX/blob/c397e7182932bdeb997313c5dbe7731516b575c8/pybasex/fragments.py#L6-L15
def build_query_fragment(query): """ <query xmlns="http://basex.org/rest"> <text><![CDATA[ (//city/name)[position() <= 5] ]]></text> </query> """ root = etree.Element('query', nsmap={None: 'http://basex.org/rest'}) text = etree.SubElement(root, 'text') text.text = etree.CDATA(query.strip()) return root
[ "def", "build_query_fragment", "(", "query", ")", ":", "root", "=", "etree", ".", "Element", "(", "'query'", ",", "nsmap", "=", "{", "None", ":", "'http://basex.org/rest'", "}", ")", "text", "=", "etree", ".", "SubElement", "(", "root", ",", "'text'", ")...
<query xmlns="http://basex.org/rest"> <text><![CDATA[ (//city/name)[position() <= 5] ]]></text> </query>
[ "<query", "xmlns", "=", "http", ":", "//", "basex", ".", "org", "/", "rest", ">", "<text", ">", "<!", "[", "CDATA", "[", "(", "//", "city", "/", "name", ")", "[", "position", "()", "<", "=", "5", "]", "]]", ">", "<", "/", "text", ">", "<", ...
python
train
DarkEnergySurvey/ugali
ugali/simulation/simulator.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/simulation/simulator.py#L400-L493
def background(self,mc_source_id=2,seed=None): """ Create a simulation of the background stellar population. Because some stars have been clipped to generate the CMD, this function tends to slightly underestimate (~1%) the background as compared to the true catalog. The simulation of background object colors relies on the data-derived CMD. As such, it is a binned random generator and thus has some fundamental limitations. - The expected number of counts per bin is drawn ra There are a few limitations of this procedure: - Colors are drawn from the CMD of the background annulus - The number of stars per CMD bin is randomized according to the CMD - The colors/mags are then uniformly distributed within the bin - This leads to trouble with large bins when the cloud-in-cells algorithm is applied to the simulated data - The positions are chosen randomly over the spherical cap of the ROI - Objects that are outside of the WARNING: The cloud-in-cells method of generating the CMD leads to some difficulties since it disperses objects from high-density zones to low density zones. - Magnitudes are not randomized according to their errors """ if seed is not None: np.random.seed(seed) self._setup_cmd() # Randomize the number of stars per bin according to Poisson distribution nstar_per_bin = np.random.poisson(lam=self.bkg_lambda) nstar = nstar_per_bin.sum() logger.info("Simulating %i background stars..."%nstar) if not self.config['simulate'].get('uniform'): logger.info("Generating colors from background CMD.") # Distribute the stars within each CMD bin delta_color = self.bkg_centers_color[1]-self.bkg_centers_color[0] delta_mag = self.bkg_centers_mag[1]-self.bkg_centers_mag[0] # Distribute points within each color-mag bins xx,yy = np.meshgrid(self.bkg_centers_color,self.bkg_centers_mag) color = np.repeat(xx.flatten(),repeats=nstar_per_bin.flatten()) color += np.random.uniform(-delta_color/2.,delta_color/2.,size=nstar) mag_1 = np.repeat(yy.flatten(),repeats=nstar_per_bin.flatten()) mag_1 += np.random.uniform(-delta_mag/2.,delta_mag/2.,size=nstar) else: # Uniform color-magnitude distribution logger.info("Generating uniform CMD.") mag_1 = np.random.uniform(self.config['mag']['min'],self.config['mag']['max'],size=nstar) color = np.random.uniform(self.config['color']['min'],self.config['color']['max'],size=nstar) mag_2 = mag_1 - color # Random points drawn from healpix subpixels logger.info("Generating uniform positions...") idx = np.random.randint(0,len(self.subpix)-1,size=nstar) lon,lat = pix2ang(self.nside_subpixel,self.subpix[idx]) nside_pixel = self.nside_pixel pix = ang2pix(nside_pixel, lon, lat) # There is probably a better way to do this step without creating the full HEALPix map mask = -1. * np.ones(hp.nside2npix(nside_pixel)) mask[self.roi.pixels] = self.mask.mask_1.mask_roi_sparse mag_lim_1 = mask[pix] mask = -1. * np.ones(hp.nside2npix(nside_pixel)) mask[self.roi.pixels] = self.mask.mask_2.mask_roi_sparse mag_lim_2 = mask[pix] mag_err_1 = self.photo_err_1(mag_lim_1 - mag_1) mag_err_2 = self.photo_err_2(mag_lim_2 - mag_2) mc_source_id = mc_source_id * np.ones(len(mag_1)) # ADW: Should magnitudes be randomized by the erros? #mag_1 += (np.random.normal(size=len(mag_1)) * mag_err_1) #mag_2 += (np.random.normal(size=len(mag_2)) * mag_err_2) select = (mag_lim_1>mag_1)&(mag_lim_2>mag_2) ### # Make sure objects lie within the original cmd (should be done later...) ### select &= (ugali.utils.binning.take2D(self.mask.solid_angle_cmd, color, mag_1, ### self.roi.bins_color, self.roi.bins_mag) > 0) logger.info("Clipping %i simulated background stars..."%(~select).sum()) hdu = ugali.observation.catalog.makeHDU(self.config,mag_1[select],mag_err_1[select], mag_2[select],mag_err_2[select], lon[select],lat[select],mc_source_id[select]) catalog = ugali.observation.catalog.Catalog(self.config, data=hdu.data) return catalog
[ "def", "background", "(", "self", ",", "mc_source_id", "=", "2", ",", "seed", "=", "None", ")", ":", "if", "seed", "is", "not", "None", ":", "np", ".", "random", ".", "seed", "(", "seed", ")", "self", ".", "_setup_cmd", "(", ")", "# Randomize the num...
Create a simulation of the background stellar population. Because some stars have been clipped to generate the CMD, this function tends to slightly underestimate (~1%) the background as compared to the true catalog. The simulation of background object colors relies on the data-derived CMD. As such, it is a binned random generator and thus has some fundamental limitations. - The expected number of counts per bin is drawn ra There are a few limitations of this procedure: - Colors are drawn from the CMD of the background annulus - The number of stars per CMD bin is randomized according to the CMD - The colors/mags are then uniformly distributed within the bin - This leads to trouble with large bins when the cloud-in-cells algorithm is applied to the simulated data - The positions are chosen randomly over the spherical cap of the ROI - Objects that are outside of the WARNING: The cloud-in-cells method of generating the CMD leads to some difficulties since it disperses objects from high-density zones to low density zones. - Magnitudes are not randomized according to their errors
[ "Create", "a", "simulation", "of", "the", "background", "stellar", "population", ".", "Because", "some", "stars", "have", "been", "clipped", "to", "generate", "the", "CMD", "this", "function", "tends", "to", "slightly", "underestimate", "(", "~1%", ")", "the",...
python
train
AguaClara/aguaclara
aguaclara/core/physchem.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L121-L128
def re_rect(FlowRate, Width, DistCenter, Nu, openchannel): """Return the Reynolds Number for a rectangular channel.""" #Checking input validity - inputs not checked here are checked by #functions this function calls. ut.check_range([FlowRate, ">0", "Flow rate"], [Nu, ">0", "Nu"]) return (4 * FlowRate * radius_hydraulic(Width, DistCenter, openchannel).magnitude / (Width * DistCenter * Nu))
[ "def", "re_rect", "(", "FlowRate", ",", "Width", ",", "DistCenter", ",", "Nu", ",", "openchannel", ")", ":", "#Checking input validity - inputs not checked here are checked by", "#functions this function calls.", "ut", ".", "check_range", "(", "[", "FlowRate", ",", "\">...
Return the Reynolds Number for a rectangular channel.
[ "Return", "the", "Reynolds", "Number", "for", "a", "rectangular", "channel", "." ]
python
train
bwohlberg/sporco
sporco/dictlrn/prlcnscdl.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L249-L262
def step_group(k): """Do a single iteration over cbpdn and ccmod steps that can be performed independently for each slice `k` of the input data set. """ cbpdn_xstep(k) if mp_xrlx != 1.0: cbpdn_relax(k) cbpdn_ystep(k) cbpdn_ustep(k) ccmod_setcoef(k) ccmod_xstep(k) if mp_drlx != 1.0: ccmod_relax(k)
[ "def", "step_group", "(", "k", ")", ":", "cbpdn_xstep", "(", "k", ")", "if", "mp_xrlx", "!=", "1.0", ":", "cbpdn_relax", "(", "k", ")", "cbpdn_ystep", "(", "k", ")", "cbpdn_ustep", "(", "k", ")", "ccmod_setcoef", "(", "k", ")", "ccmod_xstep", "(", "k...
Do a single iteration over cbpdn and ccmod steps that can be performed independently for each slice `k` of the input data set.
[ "Do", "a", "single", "iteration", "over", "cbpdn", "and", "ccmod", "steps", "that", "can", "be", "performed", "independently", "for", "each", "slice", "k", "of", "the", "input", "data", "set", "." ]
python
train
brentp/cruzdb
cruzdb/__init__.py
https://github.com/brentp/cruzdb/blob/9068d46e25952f4a929dde0242beb31fa4c7e89a/cruzdb/__init__.py#L449-L484
def annotate(self, fname, tables, feature_strand=False, in_memory=False, header=None, out=sys.stdout, parallel=False): """ annotate a file with a number of tables Parameters ---------- fname : str or file file name or file-handle tables : list list of tables with which to annotate `fname` feature_strand : bool if this is True, then the up/downstream designations are based on the features in `tables` rather than the features in `fname` in_memoory : bool if True, then tables are read into memory. This usually makes the annotation much faster if there are more than 500 features in `fname` and the number of features in the table is less than 100K. header : str header to print out (if True, use existing header) out : file where to print output parallel : bool if True, use multiprocessing library to execute the annotation of each chromosome in parallel. Uses more memory. """ from .annotate import annotate return annotate(self, fname, tables, feature_strand, in_memory, header=header, out=out, parallel=parallel)
[ "def", "annotate", "(", "self", ",", "fname", ",", "tables", ",", "feature_strand", "=", "False", ",", "in_memory", "=", "False", ",", "header", "=", "None", ",", "out", "=", "sys", ".", "stdout", ",", "parallel", "=", "False", ")", ":", "from", ".",...
annotate a file with a number of tables Parameters ---------- fname : str or file file name or file-handle tables : list list of tables with which to annotate `fname` feature_strand : bool if this is True, then the up/downstream designations are based on the features in `tables` rather than the features in `fname` in_memoory : bool if True, then tables are read into memory. This usually makes the annotation much faster if there are more than 500 features in `fname` and the number of features in the table is less than 100K. header : str header to print out (if True, use existing header) out : file where to print output parallel : bool if True, use multiprocessing library to execute the annotation of each chromosome in parallel. Uses more memory.
[ "annotate", "a", "file", "with", "a", "number", "of", "tables" ]
python
train
pbrisk/mathtoolspy
mathtoolspy/solver/minimize_algorithm_1dim_brent.py
https://github.com/pbrisk/mathtoolspy/blob/d0d35b45d20f346ba8a755e53ed0aa182fab43dd/mathtoolspy/solver/minimize_algorithm_1dim_brent.py#L25-L102
def minimize_algorithm_1dim_brent(fct, _a, _b, _c, tolerance=DOUBLE_TOL): ''' Finds the minimum of the given function f. The arguments are the given function f, and given a bracketing triplet of abscissas A, B, C (such that B is between A and C, and f(B) is less than both f(A) and f(C)) and the Tolerance. This routine isolates the minimum to a fractional precision of about tol using Brent's method. The abscissa of of the minimum is returned as xmin, and the minimum value is returned as brent, the returned function value. ''' ''' ZEPS is a small number that protects against trying to achieve fractional accuracy for a minimum that happens to be exactly zero. ''' ZEPS = 1.0e-10 a = _a if _a < _c else _c b = _a if _a > _c else _c assert a < _b and _b < b x = w = v = _b; fv = fw = fx = fct(x) tol1 = tolerance d = e = 0.0 e_temp = fu = u = xm = 0.0 iterations = 0 while (True): xm = 0.5 * (a + b) tol1 = tolerance * abs(x) + ZEPS tol2 = 2.0 * tol1; if abs(x - xm) <= tol2 - 0.5 * (b - a): return (x, fx) if abs(e) > tol1: r = (x - w) * (fx - fv) q = (x - v) * (fx - fw) p = (x - v) * q - (x - w) * r q = 2.0 * (q - r) if q > 0.0: p = -p q = abs(q) e_temp = e e = d if abs(p) >= abs(0.5 * q * e_temp) or p <= q * (a - x) or p >= q * (b - x): e = a - x if x >= xm else b - x d = GOLD * e else: d = p / q u = x + d if u - a < tol2 or b - u < tol2: d = abs_sign(tol1, xm - x) else: e = a - x if x >= xm else b - x d = GOLD * e u = x + d if abs(d) >= tol1 else x + abs_sign(tol1, d) fu = fct(u); if fu <= fx: if u >= x: a = x else: b = x v, w, x = shift(w, x, u) fv, fw, fx = shift(fw, fx, fu) else: if u < x: a = u else: b = u if fu <= fw or float_equal(w, x): v = w w = u fv = fw fw = fu elif float_equal(fu, fv) or float_equal(v, x) or float_equal(v, w): v = u fv = fu iterations = iterations + 1 if iterations > 10000: return (None, None)
[ "def", "minimize_algorithm_1dim_brent", "(", "fct", ",", "_a", ",", "_b", ",", "_c", ",", "tolerance", "=", "DOUBLE_TOL", ")", ":", "''' ZEPS is a small number that protects against trying to achieve fractional accuracy\n for a minimum that happens to be exactly zero. '''", "ZEP...
Finds the minimum of the given function f. The arguments are the given function f, and given a bracketing triplet of abscissas A, B, C (such that B is between A and C, and f(B) is less than both f(A) and f(C)) and the Tolerance. This routine isolates the minimum to a fractional precision of about tol using Brent's method. The abscissa of of the minimum is returned as xmin, and the minimum value is returned as brent, the returned function value.
[ "Finds", "the", "minimum", "of", "the", "given", "function", "f", ".", "The", "arguments", "are", "the", "given", "function", "f", "and", "given", "a", "bracketing", "triplet", "of", "abscissas", "A", "B", "C", "(", "such", "that", "B", "is", "between", ...
python
train
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L964-L984
def do_visualize(self, line): """Visualize an ontology - ie wrapper for export command""" if not self.current: self._help_noontology() return line = line.split() try: # from ..viz.builder import action_visualize from ..ontodocs.builder import action_visualize except: self._print("This command requires the ontodocs package: `pip install ontodocs`") return import webbrowser url = action_visualize(args=self.current['file'], fromshell=True) if url: webbrowser.open(url) return
[ "def", "do_visualize", "(", "self", ",", "line", ")", ":", "if", "not", "self", ".", "current", ":", "self", ".", "_help_noontology", "(", ")", "return", "line", "=", "line", ".", "split", "(", ")", "try", ":", "# from ..viz.builder import action_visualize",...
Visualize an ontology - ie wrapper for export command
[ "Visualize", "an", "ontology", "-", "ie", "wrapper", "for", "export", "command" ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L3829-L3837
def SetView(self, view: int) -> bool: """ Call IUIAutomationMultipleViewPattern::SetCurrentView. Set the view of the control. view: int, the control-specific view identifier. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getviewname """ return self.pattern.SetCurrentView(view) == S_OK
[ "def", "SetView", "(", "self", ",", "view", ":", "int", ")", "->", "bool", ":", "return", "self", ".", "pattern", ".", "SetCurrentView", "(", "view", ")", "==", "S_OK" ]
Call IUIAutomationMultipleViewPattern::SetCurrentView. Set the view of the control. view: int, the control-specific view identifier. Return bool, True if succeed otherwise False. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getviewname
[ "Call", "IUIAutomationMultipleViewPattern", "::", "SetCurrentView", ".", "Set", "the", "view", "of", "the", "control", ".", "view", ":", "int", "the", "control", "-", "specific", "view", "identifier", ".", "Return", "bool", "True", "if", "succeed", "otherwise", ...
python
valid
sdispater/orator
orator/schema/grammars/grammar.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/grammars/grammar.py#L152-L156
def _get_commands_by_name(self, blueprint, name): """ Get all of the commands with a given name. """ return list(filter(lambda value: value.name == name, blueprint.get_commands()))
[ "def", "_get_commands_by_name", "(", "self", ",", "blueprint", ",", "name", ")", ":", "return", "list", "(", "filter", "(", "lambda", "value", ":", "value", ".", "name", "==", "name", ",", "blueprint", ".", "get_commands", "(", ")", ")", ")" ]
Get all of the commands with a given name.
[ "Get", "all", "of", "the", "commands", "with", "a", "given", "name", "." ]
python
train
saltstack/salt
salt/cli/caller.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L104-L109
def print_grains(self): ''' Print out the grains ''' grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts) salt.output.display_output({'local': grains}, 'grains', self.opts)
[ "def", "print_grains", "(", "self", ")", ":", "grains", "=", "self", ".", "minion", ".", "opts", ".", "get", "(", "'grains'", ")", "or", "salt", ".", "loader", ".", "grains", "(", "self", ".", "opts", ")", "salt", ".", "output", ".", "display_output"...
Print out the grains
[ "Print", "out", "the", "grains" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/gff3.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/gff3.py#L272-L284
def _parseAttrs(self, attrsStr): """ Parse the attributes and values """ attributes = dict() for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr): name, vals = self._parseAttrVal(attrStr) if name in attributes: raise GFF3Exception( "duplicated attribute name: {}".format(name), self.fileName, self.lineNumber) attributes[name] = vals return attributes
[ "def", "_parseAttrs", "(", "self", ",", "attrsStr", ")", ":", "attributes", "=", "dict", "(", ")", "for", "attrStr", "in", "self", ".", "SPLIT_ATTR_COL_RE", ".", "split", "(", "attrsStr", ")", ":", "name", ",", "vals", "=", "self", ".", "_parseAttrVal", ...
Parse the attributes and values
[ "Parse", "the", "attributes", "and", "values" ]
python
train
eumis/pyviews
pyviews/rendering/node.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/rendering/node.py#L20-L27
def get_inst_type(xml_node: XmlNode): '''Returns type by xml node''' (module_path, class_name) = (xml_node.namespace, xml_node.name) try: return import_module(module_path).__dict__[class_name] except (KeyError, ImportError, ModuleNotFoundError): message = 'Import "{0}.{1}" is failed.'.format(module_path, class_name) raise RenderingError(message, xml_node.view_info)
[ "def", "get_inst_type", "(", "xml_node", ":", "XmlNode", ")", ":", "(", "module_path", ",", "class_name", ")", "=", "(", "xml_node", ".", "namespace", ",", "xml_node", ".", "name", ")", "try", ":", "return", "import_module", "(", "module_path", ")", ".", ...
Returns type by xml node
[ "Returns", "type", "by", "xml", "node" ]
python
train
tanghaibao/goatools
goatools/gosubdag/plot/gosubdag_plot.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/gosubdag_plot.py#L152-L155
def _plt_pydot(self, fout_img): """Plot using the pydot graphics engine.""" dag = self.get_pydot_graph() self.wr_pydot_dag(fout_img, dag)
[ "def", "_plt_pydot", "(", "self", ",", "fout_img", ")", ":", "dag", "=", "self", ".", "get_pydot_graph", "(", ")", "self", ".", "wr_pydot_dag", "(", "fout_img", ",", "dag", ")" ]
Plot using the pydot graphics engine.
[ "Plot", "using", "the", "pydot", "graphics", "engine", "." ]
python
train
tuxpiper/cloudcast
cloudcast/iscm/phased.py
https://github.com/tuxpiper/cloudcast/blob/06ca62045c483e9c3e7ee960ba70d90ea6a13776/cloudcast/iscm/phased.py#L135-L145
def get_dict_repr(self): """ Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements """ return dict( phase_name = self.phase_name, phase_type = self.phase_type, actions = self.actions )
[ "def", "get_dict_repr", "(", "self", ")", ":", "return", "dict", "(", "phase_name", "=", "self", ".", "phase_name", ",", "phase_type", "=", "self", ".", "phase_type", ",", "actions", "=", "self", ".", "actions", ")" ]
Return a dictionary representation of this phase. This will be used for checksumming, in order to uniquely compare instance images against their requirements
[ "Return", "a", "dictionary", "representation", "of", "this", "phase", ".", "This", "will", "be", "used", "for", "checksumming", "in", "order", "to", "uniquely", "compare", "instance", "images", "against", "their", "requirements" ]
python
train
Rapptz/discord.py
discord/iterators.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/iterators.py#L571-L579
async def _retrieve_guilds_before_strategy(self, retrieve): """Retrieve guilds using before parameter.""" before = self.before.id if self.before else None data = await self.get_guilds(retrieve, before=before) if len(data): if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(data[-1]['id'])) return data
[ "async", "def", "_retrieve_guilds_before_strategy", "(", "self", ",", "retrieve", ")", ":", "before", "=", "self", ".", "before", ".", "id", "if", "self", ".", "before", "else", "None", "data", "=", "await", "self", ".", "get_guilds", "(", "retrieve", ",",...
Retrieve guilds using before parameter.
[ "Retrieve", "guilds", "using", "before", "parameter", "." ]
python
train
astropy/photutils
photutils/utils/interpolation.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/interpolation.py#L289-L370
def interpolate_masked_data(data, mask, error=None, background=None): """ Interpolate over masked pixels in data and optional error or background images. The value of masked pixels are replaced by the mean value of the connected neighboring non-masked pixels. This function is intended for single, isolated masked pixels (e.g. hot/warm pixels). Parameters ---------- data : array_like or `~astropy.units.Quantity` The data array. mask : array_like (bool) A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` must have the same shape as ``data``. background : array_like, or `~astropy.units.Quantity`, optional The pixel-wise background level of the input ``data``. ``background`` must have the same shape as ``data``. Returns ------- data : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``data`` with interpolated masked pixels. error : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``error`` with interpolated masked pixels. `None` if input ``error`` is not input. background : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``background`` with interpolated masked pixels. `None` if input ``background`` is not input. """ if data.shape != mask.shape: raise ValueError('data and mask must have the same shape') data_out = np.copy(data) # do not alter input data mask_idx = mask.nonzero() if mask_idx[0].size == 0: raise ValueError('All items in data are masked') for x in zip(*mask_idx): X = np.array([[max(x[i] - 1, 0), min(x[i] + 1, data.shape[i] - 1)] for i in range(len(data.shape))]) goodpix = ~mask[X] if not np.any(goodpix): warnings.warn('The masked pixel at "{}" is completely ' 'surrounded by (connected) masked pixels, ' 'thus unable to interpolate'.format(x,), AstropyUserWarning) continue data_out[x] = np.mean(data[X][goodpix]) if background is not None: if background.shape != data.shape: raise ValueError('background and data must have the same ' 'shape') background_out = np.copy(background) background_out[x] = np.mean(background[X][goodpix]) else: background_out = None if error is not None: if error.shape != data.shape: raise ValueError('error and data must have the same ' 'shape') error_out = np.copy(error) error_out[x] = np.sqrt(np.mean(error[X][goodpix]**2)) else: error_out = None return data_out, error_out, background_out
[ "def", "interpolate_masked_data", "(", "data", ",", "mask", ",", "error", "=", "None", ",", "background", "=", "None", ")", ":", "if", "data", ".", "shape", "!=", "mask", ".", "shape", ":", "raise", "ValueError", "(", "'data and mask must have the same shape'"...
Interpolate over masked pixels in data and optional error or background images. The value of masked pixels are replaced by the mean value of the connected neighboring non-masked pixels. This function is intended for single, isolated masked pixels (e.g. hot/warm pixels). Parameters ---------- data : array_like or `~astropy.units.Quantity` The data array. mask : array_like (bool) A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like or `~astropy.units.Quantity`, optional The pixel-wise Gaussian 1-sigma errors of the input ``data``. ``error`` must have the same shape as ``data``. background : array_like, or `~astropy.units.Quantity`, optional The pixel-wise background level of the input ``data``. ``background`` must have the same shape as ``data``. Returns ------- data : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``data`` with interpolated masked pixels. error : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``error`` with interpolated masked pixels. `None` if input ``error`` is not input. background : `~numpy.ndarray` or `~astropy.units.Quantity` Input ``background`` with interpolated masked pixels. `None` if input ``background`` is not input.
[ "Interpolate", "over", "masked", "pixels", "in", "data", "and", "optional", "error", "or", "background", "images", "." ]
python
train
vingd/encrypted-pickle-python
encryptedpickle/encryptedpickle.py
https://github.com/vingd/encrypted-pickle-python/blob/7656233598e02e65971f69e11849a0f288b2b2a5/encryptedpickle/encryptedpickle.py#L190-L193
def set_signature_passphrases(self, signature_passphrases): '''Set signature passphrases''' self.signature_passphrases = self._update_dict(signature_passphrases, {}, replace_data=True)
[ "def", "set_signature_passphrases", "(", "self", ",", "signature_passphrases", ")", ":", "self", ".", "signature_passphrases", "=", "self", ".", "_update_dict", "(", "signature_passphrases", ",", "{", "}", ",", "replace_data", "=", "True", ")" ]
Set signature passphrases
[ "Set", "signature", "passphrases" ]
python
valid
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxworkflow.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxworkflow.py#L277-L295
def move_stage(self, stage, new_index, edit_version=None, **kwargs): ''' :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID :type stage: int or string :param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage) :type new_index: int :param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional) :type edit_version: int Removes the specified stage from the workflow ''' stage_id = self._get_stage_id(stage) move_stage_input = {"stage": stage_id, "newIndex": new_index} self._add_edit_version_to_request(move_stage_input, edit_version) try: dxpy.api.workflow_move_stage(self._dxid, move_stage_input, **kwargs) finally: self.describe()
[ "def", "move_stage", "(", "self", ",", "stage", ",", "new_index", ",", "edit_version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "stage_id", "=", "self", ".", "_get_stage_id", "(", "stage", ")", "move_stage_input", "=", "{", "\"stage\"", ":", "stag...
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID :type stage: int or string :param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage) :type new_index: int :param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional) :type edit_version: int Removes the specified stage from the workflow
[ ":", "param", "stage", ":", "A", "number", "for", "the", "stage", "index", "(", "for", "the", "nth", "stage", "starting", "from", "0", ")", "or", "a", "string", "of", "the", "stage", "index", "name", "or", "ID", ":", "type", "stage", ":", "int", "o...
python
train
bprinty/gems
gems/datatypes.py
https://github.com/bprinty/gems/blob/3ff76407af0e71621dada744cd964611e998699c/gems/datatypes.py#L428-L435
def items(self): """ Return keys for object, if they are available. """ if self.meta_type == 'list': return self._list elif self.meta_type == 'dict': return self._dict.items()
[ "def", "items", "(", "self", ")", ":", "if", "self", ".", "meta_type", "==", "'list'", ":", "return", "self", ".", "_list", "elif", "self", ".", "meta_type", "==", "'dict'", ":", "return", "self", ".", "_dict", ".", "items", "(", ")" ]
Return keys for object, if they are available.
[ "Return", "keys", "for", "object", "if", "they", "are", "available", "." ]
python
valid
vlukes/dicom2fem
dicom2fem/base.py
https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/base.py#L430-L448
def copy(self, deep=False, name=None): """Make a (deep) copy of self. Parameters: deep : bool Make a deep copy. name : str Name of the copy, with default self.name + '_copy'. """ if deep: other = deepcopy(self) else: other = copy(self) if hasattr(self, 'name'): other.name = get_default(name, self.name + '_copy') return other
[ "def", "copy", "(", "self", ",", "deep", "=", "False", ",", "name", "=", "None", ")", ":", "if", "deep", ":", "other", "=", "deepcopy", "(", "self", ")", "else", ":", "other", "=", "copy", "(", "self", ")", "if", "hasattr", "(", "self", ",", "'...
Make a (deep) copy of self. Parameters: deep : bool Make a deep copy. name : str Name of the copy, with default self.name + '_copy'.
[ "Make", "a", "(", "deep", ")", "copy", "of", "self", "." ]
python
train
minio/minio-py
examples/progress.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/examples/progress.py#L131-L141
def seconds_to_time(seconds): """ Consistent time format to be displayed on the elapsed time in screen. :param seconds: seconds """ minutes, seconds = divmod(int(seconds), 60) hours, m = divmod(minutes, 60) if hours: return _HOURS_OF_ELAPSED % (hours, m, seconds) else: return _MINUTES_OF_ELAPSED % (m, seconds)
[ "def", "seconds_to_time", "(", "seconds", ")", ":", "minutes", ",", "seconds", "=", "divmod", "(", "int", "(", "seconds", ")", ",", "60", ")", "hours", ",", "m", "=", "divmod", "(", "minutes", ",", "60", ")", "if", "hours", ":", "return", "_HOURS_OF_...
Consistent time format to be displayed on the elapsed time in screen. :param seconds: seconds
[ "Consistent", "time", "format", "to", "be", "displayed", "on", "the", "elapsed", "time", "in", "screen", ".", ":", "param", "seconds", ":", "seconds" ]
python
train
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L810-L815
def _get_passwordkey(self): """This method just hashes self.password.""" sha = SHA256.new() sha.update(self.password.encode('utf-8')) return sha.digest()
[ "def", "_get_passwordkey", "(", "self", ")", ":", "sha", "=", "SHA256", ".", "new", "(", ")", "sha", ".", "update", "(", "self", ".", "password", ".", "encode", "(", "'utf-8'", ")", ")", "return", "sha", ".", "digest", "(", ")" ]
This method just hashes self.password.
[ "This", "method", "just", "hashes", "self", ".", "password", "." ]
python
train
ska-sa/purr
Purr/MainWindow.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/MainWindow.py#L744-L748
def _addDPFiles(self, *files): """callback to add DPs corresponding to files.""" # quiet flag is always true self.new_entry_dialog.addDataProducts(self.purrer.makeDataProducts( [(file, True) for file in files], unbanish=True, unignore=True))
[ "def", "_addDPFiles", "(", "self", ",", "*", "files", ")", ":", "# quiet flag is always true", "self", ".", "new_entry_dialog", ".", "addDataProducts", "(", "self", ".", "purrer", ".", "makeDataProducts", "(", "[", "(", "file", ",", "True", ")", "for", "file...
callback to add DPs corresponding to files.
[ "callback", "to", "add", "DPs", "corresponding", "to", "files", "." ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/consensus/notifier.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/consensus/notifier.py#L169-L175
def notify_engine_activated(self, chain_head): """The consensus engine has been activated.""" chain_head_bytes = chain_head.SerializeToString() self._notify( "consensus_notifier_notify_engine_activated", chain_head_bytes, len(chain_head_bytes))
[ "def", "notify_engine_activated", "(", "self", ",", "chain_head", ")", ":", "chain_head_bytes", "=", "chain_head", ".", "SerializeToString", "(", ")", "self", ".", "_notify", "(", "\"consensus_notifier_notify_engine_activated\"", ",", "chain_head_bytes", ",", "len", "...
The consensus engine has been activated.
[ "The", "consensus", "engine", "has", "been", "activated", "." ]
python
train
odlgroup/odl
odl/solvers/functional/default_functionals.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L2022-L2103
def proximal(self): """Return the proximal operator. Raises ------ NotImplementedError if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or infinity """ if self.outernorm.exponent != 1: raise NotImplementedError('`proximal` only implemented for ' '`outer_exp==1`') if self.pwisenorm.exponent not in [1, 2, np.inf]: raise NotImplementedError('`proximal` only implemented for ' '`singular_vector_exp` in [1, 2, inf]') def nddot(a, b): """Compute pointwise matrix product in the last indices.""" return np.einsum('...ij,...jk->...ik', a, b) func = self # Add epsilon to fix rounding errors, i.e. make sure that when we # project on the unit ball, we actually end up slightly inside the unit # ball. Without, we may end up slightly outside. dtype = getattr(self.domain, 'dtype', float) eps = np.finfo(dtype).resolution * 10 class NuclearNormProximal(Operator): """Proximal operator of `NuclearNorm`.""" def __init__(self, sigma): self.sigma = float(sigma) super(NuclearNormProximal, self).__init__( func.domain, func.domain, linear=False) def _call(self, x): """Return ``self(x)``.""" arr = func._asarray(x) # Compute SVD U, s, Vt = np.linalg.svd(arr, full_matrices=False) # transpose pointwise V = Vt.swapaxes(-1, -2) # Take pseudoinverse of s sinv = s.copy() sinv[sinv != 0] = 1 / sinv[sinv != 0] # Take pointwise proximal operator of s w.r.t. the norm # on the singular vectors if func.pwisenorm.exponent == 1: abss = np.abs(s) - (self.sigma - eps) sprox = np.sign(s) * np.maximum(abss, 0) elif func.pwisenorm.exponent == 2: s_reordered = moveaxis(s, -1, 0) snorm = func.pwisenorm(s_reordered).asarray() snorm = np.maximum(self.sigma, snorm, out=snorm) sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s elif func.pwisenorm.exponent == np.inf: snorm = np.sum(np.abs(s), axis=-1) snorm = np.maximum(self.sigma, snorm, out=snorm) sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s else: raise RuntimeError # Compute s matrix sproxsinv = (sprox * sinv)[..., :, None] # Compute the final result result = nddot(nddot(arr, V), sproxsinv * Vt) # Cast to vector and return. Note array and vector have # different shapes. return func._asvector(result) def __repr__(self): """Return ``repr(self)``.""" return '{!r}.proximal({})'.format(func, self.sigma) return NuclearNormProximal
[ "def", "proximal", "(", "self", ")", ":", "if", "self", ".", "outernorm", ".", "exponent", "!=", "1", ":", "raise", "NotImplementedError", "(", "'`proximal` only implemented for '", "'`outer_exp==1`'", ")", "if", "self", ".", "pwisenorm", ".", "exponent", "not",...
Return the proximal operator. Raises ------ NotImplementedError if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or infinity
[ "Return", "the", "proximal", "operator", "." ]
python
train
Microsoft/nni
tools/nni_trial_tool/rest_utils.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_trial_tool/rest_utils.py#L34-L44
def rest_post(url, data, timeout, rethrow_exception=False): '''Call rest post method''' try: response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\ data=data, timeout=timeout) return response except Exception as e: if rethrow_exception is True: raise print('Get exception {0} when sending http post to url {1}'.format(str(e), url)) return None
[ "def", "rest_post", "(", "url", ",", "data", ",", "timeout", ",", "rethrow_exception", "=", "False", ")", ":", "try", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "headers", "=", "{", "'Accept'", ":", "'application/json'", ",", "'Conte...
Call rest post method
[ "Call", "rest", "post", "method" ]
python
train
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L724-L754
def searchForThreads(self, name, limit=10): """ Find and get a thread by its name :param name: Name of the thread :param limit: The max. amount of groups to fetch :return: :class:`models.User`, :class:`models.Group` and :class:`models.Page` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed """ params = {"search": name, "limit": limit} j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_THREAD, params=params)) rtn = [] for node in j[name]["threads"]["nodes"]: if node["__typename"] == "User": rtn.append(User._from_graphql(node)) elif node["__typename"] == "MessageThread": # MessageThread => Group thread rtn.append(Group._from_graphql(node)) elif node["__typename"] == "Page": rtn.append(Page._from_graphql(node)) elif node["__typename"] == "Group": # We don't handle Facebook "Groups" pass else: log.warning( "Unknown type {} in {}".format(repr(node["__typename"]), node) ) return rtn
[ "def", "searchForThreads", "(", "self", ",", "name", ",", "limit", "=", "10", ")", ":", "params", "=", "{", "\"search\"", ":", "name", ",", "\"limit\"", ":", "limit", "}", "j", "=", "self", ".", "graphql_request", "(", "GraphQL", "(", "query", "=", "...
Find and get a thread by its name :param name: Name of the thread :param limit: The max. amount of groups to fetch :return: :class:`models.User`, :class:`models.Group` and :class:`models.Page` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed
[ "Find", "and", "get", "a", "thread", "by", "its", "name" ]
python
train
peopledoc/django-agnocomplete
agnocomplete/core.py
https://github.com/peopledoc/django-agnocomplete/blob/9bf21db2f2036ba5059b843acd32902a09192053/agnocomplete/core.py#L458-L466
def build_filtered_queryset(self, query, **kwargs): """ Build and return the fully-filtered queryset """ # Take the basic queryset qs = self.get_queryset() # filter it via the query conditions qs = qs.filter(self.get_queryset_filters(query)) return self.build_extra_filtered_queryset(qs, **kwargs)
[ "def", "build_filtered_queryset", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "# Take the basic queryset", "qs", "=", "self", ".", "get_queryset", "(", ")", "# filter it via the query conditions", "qs", "=", "qs", ".", "filter", "(", "self", ...
Build and return the fully-filtered queryset
[ "Build", "and", "return", "the", "fully", "-", "filtered", "queryset" ]
python
train
iceb0y/aiowrap
aiowrap/wrap.py
https://github.com/iceb0y/aiowrap/blob/7a155e68c0faee0eea7a3f43c1e96a36ccc2fd84/aiowrap/wrap.py#L5-L18
def wrap_async(func): """Wraps an asynchronous function into a synchronous function.""" @functools.wraps(func) def wrapped(*args, **kwargs): fut = asyncio.ensure_future(func(*args, **kwargs)) cur = greenlet.getcurrent() def callback(fut): try: cur.switch(fut.result()) except BaseException as e: cur.throw(e) fut.add_done_callback(callback) return cur.parent.switch() return wrapped
[ "def", "wrap_async", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "fut", "=", "asyncio", ".", "ensure_future", "(", "func", "(", "*", "args", ",", ...
Wraps an asynchronous function into a synchronous function.
[ "Wraps", "an", "asynchronous", "function", "into", "a", "synchronous", "function", "." ]
python
train
harlowja/fasteners
fasteners/_utils.py
https://github.com/harlowja/fasteners/blob/8f3bbab0204a50037448a8fad7a6bf12eb1a2695/fasteners/_utils.py#L47-L57
def canonicalize_path(path): """Canonicalizes a potential path. Returns a binary string encoded into filesystem encoding. """ if isinstance(path, six.binary_type): return path if isinstance(path, six.text_type): return _fsencode(path) else: return canonicalize_path(str(path))
[ "def", "canonicalize_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "six", ".", "binary_type", ")", ":", "return", "path", "if", "isinstance", "(", "path", ",", "six", ".", "text_type", ")", ":", "return", "_fsencode", "(", "path", ...
Canonicalizes a potential path. Returns a binary string encoded into filesystem encoding.
[ "Canonicalizes", "a", "potential", "path", "." ]
python
train
saltstack/salt
salt/auth/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/auth/__init__.py#L288-L308
def authenticate_eauth(self, load): ''' Authenticate a user by the external auth module specified in load. Return True on success or False on failure. ''' if 'eauth' not in load: log.warning('Authentication failure of type "eauth" occurred.') return False if load['eauth'] not in self.opts['external_auth']: log.warning('The eauth system "%s" is not enabled', load['eauth']) log.warning('Authentication failure of type "eauth" occurred.') return False # Perform the actual authentication. If we fail here, do not # continue. if not self.time_auth(load): log.warning('Authentication failure of type "eauth" occurred.') return False return True
[ "def", "authenticate_eauth", "(", "self", ",", "load", ")", ":", "if", "'eauth'", "not", "in", "load", ":", "log", ".", "warning", "(", "'Authentication failure of type \"eauth\" occurred.'", ")", "return", "False", "if", "load", "[", "'eauth'", "]", "not", "i...
Authenticate a user by the external auth module specified in load. Return True on success or False on failure.
[ "Authenticate", "a", "user", "by", "the", "external", "auth", "module", "specified", "in", "load", ".", "Return", "True", "on", "success", "or", "False", "on", "failure", "." ]
python
train
fermiPy/fermipy
fermipy/diffuse/diffuse_src_manager.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/diffuse_src_manager.py#L456-L485
def make_diffuse_comp_info_dict(**kwargs): """Build and return the information about the diffuse components """ library_yamlfile = kwargs.pop('library', 'models/library.yaml') components = kwargs.pop('components', None) if components is None: comp_yamlfile = kwargs.pop('comp', 'config/binning.yaml') components = Component.build_from_yamlfile(comp_yamlfile) gmm = kwargs.get('GalpropMapManager', GalpropMapManager(**kwargs)) dmm = kwargs.get('DiffuseModelManager', DiffuseModelManager(**kwargs)) if library_yamlfile is None or library_yamlfile == 'None': diffuse_comps = {} else: diffuse_comps = DiffuseModelManager.read_diffuse_component_yaml( library_yamlfile) diffuse_comp_info_dict = dmm.make_diffuse_comp_info_dict( diffuse_comps, components) for diffuse_value in diffuse_comps.values(): if diffuse_value is None: continue if diffuse_value['model_type'] != 'galprop_rings': continue versions = diffuse_value['versions'] for version in versions: galprop_dict = gmm.make_diffuse_comp_info_dict(version) diffuse_comp_info_dict.update(galprop_dict) return dict(comp_info_dict=diffuse_comp_info_dict, GalpropMapManager=gmm, DiffuseModelManager=dmm)
[ "def", "make_diffuse_comp_info_dict", "(", "*", "*", "kwargs", ")", ":", "library_yamlfile", "=", "kwargs", ".", "pop", "(", "'library'", ",", "'models/library.yaml'", ")", "components", "=", "kwargs", ".", "pop", "(", "'components'", ",", "None", ")", "if", ...
Build and return the information about the diffuse components
[ "Build", "and", "return", "the", "information", "about", "the", "diffuse", "components" ]
python
train
PiotrDabkowski/pyjsparser
pyjsparser/parser.py
https://github.com/PiotrDabkowski/pyjsparser/blob/5465d037b30e334cb0997f2315ec1e451b8ad4c1/pyjsparser/parser.py#L518-L608
def _interpret_regexp(self, string, flags): '''Perform sctring escape - for regexp literals''' self.index = 0 self.length = len(string) self.source = string self.lineNumber = 0 self.lineStart = 0 octal = False st = '' inside_square = 0 while (self.index < self.length): template = '[%s]' if not inside_square else '%s' ch = self.source[self.index] self.index += 1 if ch == '\\': ch = self.source[self.index] self.index += 1 if (not isLineTerminator(ch)): if ch == 'u': digs = self.source[self.index:self.index + 4] if len(digs) == 4 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 4 else: st += 'u' elif ch == 'x': digs = self.source[self.index:self.index + 2] if len(digs) == 2 and all(isHexDigit(d) for d in digs): st += template % unichr(int(digs, 16)) self.index += 2 else: st += 'x' # special meaning - single char. elif ch == '0': st += '\\0' elif ch == 'n': st += '\\n' elif ch == 'r': st += '\\r' elif ch == 't': st += '\\t' elif ch == 'f': st += '\\f' elif ch == 'v': st += '\\v' # unescape special single characters like . so that they are interpreted literally elif ch in REGEXP_SPECIAL_SINGLE: st += '\\' + ch # character groups elif ch == 'b': st += '\\b' elif ch == 'B': st += '\\B' elif ch == 'w': st += '\\w' elif ch == 'W': st += '\\W' elif ch == 'd': st += '\\d' elif ch == 'D': st += '\\D' elif ch == 's': st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff' elif ch == 'S': st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff' else: if isDecimalDigit(ch): num = ch while self.index < self.length and isDecimalDigit( self.source[self.index]): num += self.source[self.index] self.index += 1 st += '\\' + num else: st += ch # DONT ESCAPE!!! else: self.lineNumber += 1 if (ch == '\r' and self.source[self.index] == '\n'): self.index += 1 self.lineStart = self.index else: if ch == '[': inside_square = True elif ch == ']': inside_square = False st += ch # print string, 'was transformed to', st return st
[ "def", "_interpret_regexp", "(", "self", ",", "string", ",", "flags", ")", ":", "self", ".", "index", "=", "0", "self", ".", "length", "=", "len", "(", "string", ")", "self", ".", "source", "=", "string", "self", ".", "lineNumber", "=", "0", "self", ...
Perform sctring escape - for regexp literals
[ "Perform", "sctring", "escape", "-", "for", "regexp", "literals" ]
python
train
Nic30/hwt
hwt/simulator/hdlSimulator.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/simulator/hdlSimulator.py#L310-L324
def _scheduleApplyValues(self) -> None: """ Apply stashed values to signals """ assert not self._applyValPlaned, self.now self._add_process(self._applyValues(), PRIORITY_APPLY_COMB) self._applyValPlaned = True if self._runSeqProcessesPlaned: # if runSeqProcesses is already scheduled return assert not self._seqProcsToRun and not self._runSeqProcessesPlaned, self.now self._add_process(self._runSeqProcesses(), PRIORITY_APPLY_SEQ) self._runSeqProcessesPlaned = True
[ "def", "_scheduleApplyValues", "(", "self", ")", "->", "None", ":", "assert", "not", "self", ".", "_applyValPlaned", ",", "self", ".", "now", "self", ".", "_add_process", "(", "self", ".", "_applyValues", "(", ")", ",", "PRIORITY_APPLY_COMB", ")", "self", ...
Apply stashed values to signals
[ "Apply", "stashed", "values", "to", "signals" ]
python
test
yunojuno/elasticsearch-django
elasticsearch_django/models.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L308-L342
def as_search_action(self, *, index, action): """ Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. """ if action not in ("index", "update", "delete"): raise ValueError("Action must be 'index', 'update' or 'delete'.") document = { "_index": index, "_type": self.search_doc_type, "_op_type": action, "_id": self.pk, } if action == "index": document["_source"] = self.as_search_document(index=index) elif action == "update": document["doc"] = self.as_search_document(index=index) return document
[ "def", "as_search_action", "(", "self", ",", "*", ",", "index", ",", "action", ")", ":", "if", "action", "not", "in", "(", "\"index\"", ",", "\"update\"", ",", "\"delete\"", ")", ":", "raise", "ValueError", "(", "\"Action must be 'index', 'update' or 'delete'.\"...
Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary.
[ "Return", "an", "object", "as", "represented", "in", "a", "bulk", "api", "operation", "." ]
python
train
uw-it-aca/uw-restclients-nws
uw_nws/__init__.py
https://github.com/uw-it-aca/uw-restclients-nws/blob/ec6fd14342ffc883d14bcb53b2fe9bc288696027/uw_nws/__init__.py#L393-L411
def update_person(self, person): """ Update an existing person :param person: is the updated person that the client wants to update """ self._validate_regid(person.person_id) self._validate_subscriber_id(person.surrogate_id) for attr in MANAGED_ATTRIBUTES: person.attributes.pop(attr, None) url = "/notification/v1/person/{}".format(person.person_id) response = NWS_DAO().putURL( url, self._write_headers(), self._json_body(person.json_data())) if response.status != 204: raise DataFailureException(url, response.status, response.data) return response.status
[ "def", "update_person", "(", "self", ",", "person", ")", ":", "self", ".", "_validate_regid", "(", "person", ".", "person_id", ")", "self", ".", "_validate_subscriber_id", "(", "person", ".", "surrogate_id", ")", "for", "attr", "in", "MANAGED_ATTRIBUTES", ":",...
Update an existing person :param person: is the updated person that the client wants to update
[ "Update", "an", "existing", "person", ":", "param", "person", ":", "is", "the", "updated", "person", "that", "the", "client", "wants", "to", "update" ]
python
train
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L2268-L2278
def _get_key_value(self, event, event_type): """Get the key value.""" if event_type == 10: value = 1 elif event_type == 11: value = 0 elif event_type == 12: value = self._get_flag_value(event) else: value = -1 return value
[ "def", "_get_key_value", "(", "self", ",", "event", ",", "event_type", ")", ":", "if", "event_type", "==", "10", ":", "value", "=", "1", "elif", "event_type", "==", "11", ":", "value", "=", "0", "elif", "event_type", "==", "12", ":", "value", "=", "s...
Get the key value.
[ "Get", "the", "key", "value", "." ]
python
train
ic-labs/django-icekit
glamkit_collections/etl/base.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/glamkit_collections/etl/base.py#L17-L29
def items_to_extract(self, offset=0, length=None): """ Return an iterable of specific items to extract. As a side-effect, set self.items_to_extract_length. :param offset: where to start extracting :param length: how many to extract :return: An iterable of the specific """ endoffset = length and offset + length qs = self.origin_data()[offset:endoffset] self.items_to_extract_length = qs.count() return qs
[ "def", "items_to_extract", "(", "self", ",", "offset", "=", "0", ",", "length", "=", "None", ")", ":", "endoffset", "=", "length", "and", "offset", "+", "length", "qs", "=", "self", ".", "origin_data", "(", ")", "[", "offset", ":", "endoffset", "]", ...
Return an iterable of specific items to extract. As a side-effect, set self.items_to_extract_length. :param offset: where to start extracting :param length: how many to extract :return: An iterable of the specific
[ "Return", "an", "iterable", "of", "specific", "items", "to", "extract", ".", "As", "a", "side", "-", "effect", "set", "self", ".", "items_to_extract_length", "." ]
python
train
pteichman/cobe
cobe/brain.py
https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L197-L303
def reply(self, text, loop_ms=500, max_len=None): """Reply to a string of text. If the input is not already Unicode, it will be decoded as utf-8.""" if type(text) != types.UnicodeType: # Assume that non-Unicode text is encoded as utf-8, which # should be somewhat safe in the modern world. text = text.decode("utf-8", "ignore") tokens = self.tokenizer.split(text) input_ids = map(self.graph.get_token_by_text, tokens) # filter out unknown words and non-words from the potential pivots pivot_set = self._filter_pivots(input_ids) # Conflate the known ids with the stems of their words if self.stemmer is not None: self._conflate_stems(pivot_set, tokens) # If we didn't recognize any word tokens in the input, pick # something random from the database and babble. if len(pivot_set) == 0: pivot_set = self._babble() score_cache = {} best_score = -1.0 best_reply = None # Loop for approximately loop_ms milliseconds. This can either # take more (if the first reply takes a long time to generate) # or less (if the _generate_replies search ends early) time, # but it should stay roughly accurate. start = time.time() end = start + loop_ms * 0.001 count = 0 all_replies = [] _start = time.time() for edges, pivot_node in self._generate_replies(pivot_set): reply = Reply(self.graph, tokens, input_ids, pivot_node, edges) if max_len and self._too_long(max_len, reply): continue key = reply.edge_ids if key not in score_cache: with trace_us("Brain.evaluate_reply_us"): score = self.scorer.score(reply) score_cache[key] = score else: # skip scoring, we've already seen this reply score = -1 if score > best_score: best_reply = reply best_score = score # dump all replies to the console if debugging is enabled if log.isEnabledFor(logging.DEBUG): all_replies.append((score, reply)) count += 1 if time.time() > end: break if best_reply is None: # we couldn't find any pivot words in _babble(), so we're # working with an essentially empty brain. Use the classic # MegaHAL reply: return "I don't know enough to answer you yet!" _time = time.time() - _start self.scorer.end(best_reply) if log.isEnabledFor(logging.DEBUG): replies = [(score, reply.to_text()) for score, reply in all_replies] replies.sort() for score, text in replies: log.debug("%f %s", score, text) trace("Brain.reply_input_token_count", len(tokens)) trace("Brain.known_word_token_count", len(pivot_set)) trace("Brain.reply_us", _time) trace("Brain.reply_count", count, _time) trace("Brain.best_reply_score", int(best_score * 1000)) trace("Brain.best_reply_length", len(best_reply.edge_ids)) log.debug("made %d replies (%d unique) in %f seconds" % (count, len(score_cache), _time)) if len(text) > 60: msg = text[0:60] + "..." else: msg = text log.info("[%s] %d %f", msg, count, best_score) # look up the words for these tokens with trace_us("Brain.reply_words_lookup_us"): text = best_reply.to_text() return text
[ "def", "reply", "(", "self", ",", "text", ",", "loop_ms", "=", "500", ",", "max_len", "=", "None", ")", ":", "if", "type", "(", "text", ")", "!=", "types", ".", "UnicodeType", ":", "# Assume that non-Unicode text is encoded as utf-8, which", "# should be somewha...
Reply to a string of text. If the input is not already Unicode, it will be decoded as utf-8.
[ "Reply", "to", "a", "string", "of", "text", ".", "If", "the", "input", "is", "not", "already", "Unicode", "it", "will", "be", "decoded", "as", "utf", "-", "8", "." ]
python
train
refenv/cijoe
modules/cij/runner.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/modules/cij/runner.py#L322-L337
def tsuite_enter(trun, tsuite): """Triggers when entering the given testsuite""" if trun["conf"]["VERBOSE"]: cij.emph("rnr:tsuite:enter { name: %r }" % tsuite["name"]) rcode = 0 for hook in tsuite["hooks"]["enter"]: # ENTER-hooks rcode = script_run(trun, hook) if rcode: break if trun["conf"]["VERBOSE"]: cij.emph("rnr:tsuite:enter { rcode: %r } " % rcode, rcode) return rcode
[ "def", "tsuite_enter", "(", "trun", ",", "tsuite", ")", ":", "if", "trun", "[", "\"conf\"", "]", "[", "\"VERBOSE\"", "]", ":", "cij", ".", "emph", "(", "\"rnr:tsuite:enter { name: %r }\"", "%", "tsuite", "[", "\"name\"", "]", ")", "rcode", "=", "0", "for...
Triggers when entering the given testsuite
[ "Triggers", "when", "entering", "the", "given", "testsuite" ]
python
valid
iotile/coretools
transport_plugins/jlink/iotile_transport_jlink/jlink.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/jlink/iotile_transport_jlink/jlink.py#L179-L207
def probe_async(self, callback): """Send advertisements for all connected devices. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe """ def _on_finished(_name, control_info, exception): if exception is not None: callback(self.id, False, str(exception)) return self._control_info = control_info try: info = { 'connection_string': "direct", 'uuid': control_info.uuid, 'signal_strength': 100 } self._trigger_callback('on_scan', self.id, info, self.ExpirationTime) finally: callback(self.id, True, None) self._control_thread.command(JLinkControlThread.FIND_CONTROL, _on_finished, self._device_info.ram_start, self._device_info.ram_size)
[ "def", "probe_async", "(", "self", ",", "callback", ")", ":", "def", "_on_finished", "(", "_name", ",", "control_info", ",", "exception", ")", ":", "if", "exception", "is", "not", "None", ":", "callback", "(", "self", ".", "id", ",", "False", ",", "str...
Send advertisements for all connected devices. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe
[ "Send", "advertisements", "for", "all", "connected", "devices", "." ]
python
train
untwisted/untwisted
untwisted/expect.py
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/expect.py#L84-L90
def destroy(self): """ Unregister up from untwisted reactor. It is needed to call self.terminate() first to kill the process. """ core.gear.pool.remove(self) self.base.clear()
[ "def", "destroy", "(", "self", ")", ":", "core", ".", "gear", ".", "pool", ".", "remove", "(", "self", ")", "self", ".", "base", ".", "clear", "(", ")" ]
Unregister up from untwisted reactor. It is needed to call self.terminate() first to kill the process.
[ "Unregister", "up", "from", "untwisted", "reactor", ".", "It", "is", "needed", "to", "call", "self", ".", "terminate", "()", "first", "to", "kill", "the", "process", "." ]
python
train
Unity-Technologies/ml-agents
ml-agents/mlagents/trainers/curriculum.py
https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/curriculum.py#L96-L112
def get_config(self, lesson=None): """ Returns reset parameters which correspond to the lesson. :param lesson: The lesson you want to get the config of. If None, the current lesson is returned. :return: The configuration of the reset parameters. """ if not self.data: return {} if lesson is None: lesson = self.lesson_num lesson = max(0, min(lesson, self.max_lesson_num)) config = {} parameters = self.data['parameters'] for key in parameters: config[key] = parameters[key][lesson] return config
[ "def", "get_config", "(", "self", ",", "lesson", "=", "None", ")", ":", "if", "not", "self", ".", "data", ":", "return", "{", "}", "if", "lesson", "is", "None", ":", "lesson", "=", "self", ".", "lesson_num", "lesson", "=", "max", "(", "0", ",", "...
Returns reset parameters which correspond to the lesson. :param lesson: The lesson you want to get the config of. If None, the current lesson is returned. :return: The configuration of the reset parameters.
[ "Returns", "reset", "parameters", "which", "correspond", "to", "the", "lesson", ".", ":", "param", "lesson", ":", "The", "lesson", "you", "want", "to", "get", "the", "config", "of", ".", "If", "None", "the", "current", "lesson", "is", "returned", ".", ":...
python
train
widdowquinn/pyani
pyani/anib.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L134-L147
def get_fraglength_dict(fastafiles): """Returns dictionary of sequence fragment lengths, keyed by query name. - fastafiles - list of FASTA input whole sequence files Loops over input files and, for each, produces a dictionary with fragment lengths, keyed by sequence ID. These are returned as a dictionary with the keys being query IDs derived from filenames. """ fraglength_dict = {} for filename in fastafiles: qname = os.path.split(filename)[-1].split("-fragments")[0] fraglength_dict[qname] = get_fragment_lengths(filename) return fraglength_dict
[ "def", "get_fraglength_dict", "(", "fastafiles", ")", ":", "fraglength_dict", "=", "{", "}", "for", "filename", "in", "fastafiles", ":", "qname", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "[", "-", "1", "]", ".", "split", "(", "\"-fra...
Returns dictionary of sequence fragment lengths, keyed by query name. - fastafiles - list of FASTA input whole sequence files Loops over input files and, for each, produces a dictionary with fragment lengths, keyed by sequence ID. These are returned as a dictionary with the keys being query IDs derived from filenames.
[ "Returns", "dictionary", "of", "sequence", "fragment", "lengths", "keyed", "by", "query", "name", "." ]
python
train
datadesk/python-documentcloud
documentcloud/__init__.py
https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L1154-L1163
def get_document(self, id): """ Retrieves a particular document from this project. """ obj_list = self.document_list matches = [i for i in obj_list if str(i.id) == str(id)] if not matches: raise DoesNotExistError("The resource you've requested does not \ exist or is unavailable without the proper credentials.") return matches[0]
[ "def", "get_document", "(", "self", ",", "id", ")", ":", "obj_list", "=", "self", ".", "document_list", "matches", "=", "[", "i", "for", "i", "in", "obj_list", "if", "str", "(", "i", ".", "id", ")", "==", "str", "(", "id", ")", "]", "if", "not", ...
Retrieves a particular document from this project.
[ "Retrieves", "a", "particular", "document", "from", "this", "project", "." ]
python
train
QInfer/python-qinfer
src/qinfer/tomography/plotting_tools.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/tomography/plotting_tools.py#L156-L202
def plot_rebit_prior(prior, rebit_axes=REBIT_AXES, n_samples=2000, true_state=None, true_size=250, force_mean=None, legend=True, mean_color_index=2 ): """ Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison. """ pallette = plt.rcParams['axes.color_cycle'] plot_rebit_modelparams(prior.sample(n_samples), c=pallette[0], label='Prior', rebit_axes=rebit_axes ) if true_state is not None: plot_rebit_modelparams(true_state, c=pallette[1], label='True', marker='*', s=true_size, rebit_axes=rebit_axes ) if hasattr(prior, '_mean') or force_mean is not None: mean = force_mean if force_mean is not None else prior._mean plot_rebit_modelparams( prior._basis.state_to_modelparams(mean)[None, :], edgecolors=pallette[mean_color_index], s=250, facecolors='none', linewidth=3, label='Mean', rebit_axes=rebit_axes ) plot_decorate_rebits(prior.basis, rebit_axes=rebit_axes ) if legend: plt.legend(loc='lower left', ncol=3, scatterpoints=1)
[ "def", "plot_rebit_prior", "(", "prior", ",", "rebit_axes", "=", "REBIT_AXES", ",", "n_samples", "=", "2000", ",", "true_state", "=", "None", ",", "true_size", "=", "250", ",", "force_mean", "=", "None", ",", "legend", "=", "True", ",", "mean_color_index", ...
Plots rebit states drawn from a given prior. :param qinfer.tomography.DensityOperatorDistribution prior: Distribution over rebit states to plot. :param list rebit_axes: List containing indices for the :math:`x` and :math:`z` axes. :param int n_samples: Number of samples to draw from the prior. :param np.ndarray true_state: State to be plotted as a "true" state for comparison.
[ "Plots", "rebit", "states", "drawn", "from", "a", "given", "prior", "." ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L765-L846
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
[ "def", "select_as_multiple", "(", "self", ",", "keys", ",", "where", "=", "None", ",", "selector", "=", "None", ",", "columns", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "iterator", "=", "False", ",", "chunksize", "=", "N...
Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS
[ "Retrieve", "pandas", "objects", "from", "multiple", "tables" ]
python
train
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/spep.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/spep.py#L20-L47
def function(self, x, y, theta_E, gamma, e1, e2, center_x=0, center_y=0): """ :param x: set of x-coordinates :type x: array of size (n) :param theta_E: Einstein radius of lense :type theta_E: float. :param gamma: power law slope of mass profifle :type gamma: <2 float :param q: Axis ratio :type q: 0<q<1 :param phi_G: position angel of SES :type q: 0<phi_G<pi/2 :returns: function :raises: AttributeError, KeyError """ phi_G, q = param_util.ellipticity2phi_q(e1, e2) gamma, q = self._param_bounds(gamma, q) theta_E *= q x_shift = x - center_x y_shift = y - center_y E = theta_E / (((3 - gamma) / 2.) ** (1. / (1 - gamma)) * np.sqrt(q)) #E = phi_E eta = -gamma+3 xt1 = np.cos(phi_G)*x_shift+np.sin(phi_G)*y_shift xt2 = -np.sin(phi_G)*x_shift+np.cos(phi_G)*y_shift p2 = xt1**2+xt2**2/q**2 s2 = 0. # softening return 2 * E**2/eta**2 * ((p2 + s2)/E**2)**(eta/2)
[ "def", "function", "(", "self", ",", "x", ",", "y", ",", "theta_E", ",", "gamma", ",", "e1", ",", "e2", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ")", ":", "phi_G", ",", "q", "=", "param_util", ".", "ellipticity2phi_q", "(", "e1", ","...
:param x: set of x-coordinates :type x: array of size (n) :param theta_E: Einstein radius of lense :type theta_E: float. :param gamma: power law slope of mass profifle :type gamma: <2 float :param q: Axis ratio :type q: 0<q<1 :param phi_G: position angel of SES :type q: 0<phi_G<pi/2 :returns: function :raises: AttributeError, KeyError
[ ":", "param", "x", ":", "set", "of", "x", "-", "coordinates", ":", "type", "x", ":", "array", "of", "size", "(", "n", ")", ":", "param", "theta_E", ":", "Einstein", "radius", "of", "lense", ":", "type", "theta_E", ":", "float", ".", ":", "param", ...
python
train
kgiusti/pyngus
pyngus/connection.py
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/pyngus/connection.py#L461-L533
def process(self, now): """Perform connection state processing.""" if self._pn_connection is None: LOG.error("Connection.process() called on destroyed connection!") return 0 # do nothing until the connection has been opened if self._pn_connection.state & proton.Endpoint.LOCAL_UNINIT: return 0 if self._pn_sasl and not self._sasl_done: # wait until SASL has authenticated if (_PROTON_VERSION < (0, 10)): if self._pn_sasl.state not in (proton.SASL.STATE_PASS, proton.SASL.STATE_FAIL): LOG.debug("SASL in progress. State=%s", str(self._pn_sasl.state)) if self._handler: with self._callback_lock: self._handler.sasl_step(self, self._pn_sasl) return self._next_deadline self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) else: if self._pn_sasl.outcome is not None: self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) # process timer events: timer_deadline = self._expire_timers(now) transport_deadline = self._pn_transport.tick(now) if timer_deadline and transport_deadline: self._next_deadline = min(timer_deadline, transport_deadline) else: self._next_deadline = timer_deadline or transport_deadline # process events from proton: pn_event = self._pn_collector.peek() while pn_event: # LOG.debug("pn_event: %s received", pn_event.type) if _Link._handle_proton_event(pn_event, self): pass elif self._handle_proton_event(pn_event): pass elif _SessionProxy._handle_proton_event(pn_event, self): pass self._pn_collector.pop() pn_event = self._pn_collector.peek() # check for connection failure after processing all pending # engine events: if self._error: if self._handler: # nag application until connection is destroyed self._next_deadline = now with self._callback_lock: self._handler.connection_failed(self, self._error) elif (self._endpoint_state == self._CLOSED and self._read_done and self._write_done): # invoke closed callback after endpoint has fully closed and # all pending I/O has completed: if self._handler: with self._callback_lock: self._handler.connection_closed(self) return self._next_deadline
[ "def", "process", "(", "self", ",", "now", ")", ":", "if", "self", ".", "_pn_connection", "is", "None", ":", "LOG", ".", "error", "(", "\"Connection.process() called on destroyed connection!\"", ")", "return", "0", "# do nothing until the connection has been opened", ...
Perform connection state processing.
[ "Perform", "connection", "state", "processing", "." ]
python
test
steffann/pylisp
pylisp/packet/ip/udp.py
https://github.com/steffann/pylisp/blob/907340f0c7ef2c4d4fe0c8e0a48df5be0d969407/pylisp/packet/ip/udp.py#L26-L39
def sanitize(self): ''' Check if the current settings conform to the RFC and fix where possible ''' # Check ports if not isinstance(self.source_port, numbers.Integral) \ or self.source_port < 0 \ or self.source_port >= 2 ** 16: raise ValueError('Invalid source port') if not isinstance(self.destination_port, numbers.Integral) \ or self.destination_port < 0 \ or self.destination_port >= 2 ** 16: raise ValueError('Invalid destination port')
[ "def", "sanitize", "(", "self", ")", ":", "# Check ports", "if", "not", "isinstance", "(", "self", ".", "source_port", ",", "numbers", ".", "Integral", ")", "or", "self", ".", "source_port", "<", "0", "or", "self", ".", "source_port", ">=", "2", "**", ...
Check if the current settings conform to the RFC and fix where possible
[ "Check", "if", "the", "current", "settings", "conform", "to", "the", "RFC", "and", "fix", "where", "possible" ]
python
train
rsalmei/clearly
clearly/server.py
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/server.py#L148-L154
def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
[ "def", "find_task", "(", "self", ",", "request", ",", "context", ")", ":", "_log_request", "(", "request", ",", "context", ")", "task", "=", "self", ".", "listener", ".", "memory", ".", "tasks", ".", "get", "(", "request", ".", "task_uuid", ")", "if", ...
Finds one specific task.
[ "Finds", "one", "specific", "task", "." ]
python
train
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1432-L1446
def simxPackInts(intList): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if sys.version_info[0] == 3: s=bytes() for i in range(len(intList)): s=s+struct.pack('<i',intList[i]) s=bytearray(s) else: s='' for i in range(len(intList)): s+=struct.pack('<i',intList[i]) return s
[ "def", "simxPackInts", "(", "intList", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "s", "=", "bytes", "(", ")", "for", "i", "in", "range", "(", "len", "(", "intList", ")", ")", ":", "s", "=", "s", "+", "struct", ...
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
QInfer/python-qinfer
src/qinfer/tomography/bases.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/tomography/bases.py#L323-L336
def state_to_modelparams(self, state): """ Converts a QuTiP-represented state into a model parameter vector. :param qutip.Qobj state: State to be converted. :rtype: :class:`np.ndarray` :return: The representation of the given state in this basis, as a vector of real parameters. """ basis = self.flat() data = state.data.todense().view(np.ndarray).flatten() # NB: assumes Hermitian state and basis! return np.real(np.dot(basis.conj(), data))
[ "def", "state_to_modelparams", "(", "self", ",", "state", ")", ":", "basis", "=", "self", ".", "flat", "(", ")", "data", "=", "state", ".", "data", ".", "todense", "(", ")", ".", "view", "(", "np", ".", "ndarray", ")", ".", "flatten", "(", ")", "...
Converts a QuTiP-represented state into a model parameter vector. :param qutip.Qobj state: State to be converted. :rtype: :class:`np.ndarray` :return: The representation of the given state in this basis, as a vector of real parameters.
[ "Converts", "a", "QuTiP", "-", "represented", "state", "into", "a", "model", "parameter", "vector", "." ]
python
train
marten-de-vries/Flask-WebSub
flask_websub/hub/__init__.py
https://github.com/marten-de-vries/Flask-WebSub/blob/422d5b597245554c47e881483f99cae7c57a81ba/flask_websub/hub/__init__.py#L66-L103
def init_celery(self, celery): """Registers the celery tasks on the hub object.""" count = next(self.counter) def task_with_hub(f, **opts): @functools.wraps(f) def wrapper(*args, **kwargs): return f(self, *args, **kwargs) # Make sure newer instances don't overwride older ones. wrapper.__name__ = wrapper.__name__ + '_' + str(count) return celery.task(**opts)(wrapper) # tasks for internal use: self.subscribe = task_with_hub(subscribe) self.unsubscribe = task_with_hub(unsubscribe) max_attempts = self.config.get('MAX_ATTEMPTS', 10) make_req = task_with_hub(make_request_retrying, bind=True, max_retries=max_attempts) self.make_request_retrying = make_req # user facing tasks # wrapped by send_change_notification: self.send_change = task_with_hub(send_change_notification) # wrapped by cleanup_expired_subscriptions @task_with_hub def cleanup(hub): self.storage.cleanup_expired_subscriptions() self.cleanup = cleanup # wrapped by schedule_cleanup def schedule(every_x_seconds=A_DAY): celery.add_periodic_task(every_x_seconds, self.cleanup_expired_subscriptions.s()) self.schedule = schedule
[ "def", "init_celery", "(", "self", ",", "celery", ")", ":", "count", "=", "next", "(", "self", ".", "counter", ")", "def", "task_with_hub", "(", "f", ",", "*", "*", "opts", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", ...
Registers the celery tasks on the hub object.
[ "Registers", "the", "celery", "tasks", "on", "the", "hub", "object", "." ]
python
train
aio-libs/aioredis
aioredis/commands/hash.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L104-L106
def hset(self, key, field, value): """Set the string value of a hash field.""" return self.execute(b'HSET', key, field, value)
[ "def", "hset", "(", "self", ",", "key", ",", "field", ",", "value", ")", ":", "return", "self", ".", "execute", "(", "b'HSET'", ",", "key", ",", "field", ",", "value", ")" ]
Set the string value of a hash field.
[ "Set", "the", "string", "value", "of", "a", "hash", "field", "." ]
python
train
ihmeuw/vivarium
src/vivarium/framework/results_writer.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/results_writer.py#L50-L79
def write_output(self, data, file_name, key=None): """Writes output data to disk. Parameters ---------- data: pandas.DataFrame or dict The data to write to disk. file_name: str The name of the file to write. key: str, optional The lookup key for the sub_directory to write results to, if any. """ path = os.path.join(self._directories[key], file_name) extension = file_name.split('.')[-1] if extension == 'yaml': with open(path, 'w') as f: yaml.dump(data, f) elif extension == 'hdf': # to_hdf breaks with categorical dtypes. categorical_columns = data.dtypes[data.dtypes == 'category'].index data.loc[:, categorical_columns] = data.loc[:, categorical_columns].astype('object') # Writing to an hdf over and over balloons the file size so write to new file and move it over to avoid data.to_hdf(path + "update", 'data') if os.path.exists(path): os.remove(path) os.rename(path + "update", path) else: raise NotImplementedError( f"Only 'yaml' and 'hdf' file types are supported. You requested {extension}")
[ "def", "write_output", "(", "self", ",", "data", ",", "file_name", ",", "key", "=", "None", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_directories", "[", "key", "]", ",", "file_name", ")", "extension", "=", "file_name"...
Writes output data to disk. Parameters ---------- data: pandas.DataFrame or dict The data to write to disk. file_name: str The name of the file to write. key: str, optional The lookup key for the sub_directory to write results to, if any.
[ "Writes", "output", "data", "to", "disk", "." ]
python
train
juju/python-libjuju
juju/utils.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/utils.py#L88-L115
async def run_with_interrupt(task, *events, loop=None): """ Awaits a task while allowing it to be interrupted by one or more `asyncio.Event`s. If the task finishes without the events becoming set, the results of the task will be returned. If the event become set, the task will be cancelled ``None`` will be returned. :param task: Task to run :param events: One or more `asyncio.Event`s which, if set, will interrupt `task` and cause it to be cancelled. :param loop: Optional event loop to use other than the default. """ loop = loop or asyncio.get_event_loop() task = asyncio.ensure_future(task, loop=loop) event_tasks = [loop.create_task(event.wait()) for event in events] done, pending = await asyncio.wait([task] + event_tasks, loop=loop, return_when=asyncio.FIRST_COMPLETED) for f in pending: f.cancel() # cancel unfinished tasks for f in done: f.exception() # prevent "exception was not retrieved" errors if task in done: return task.result() # may raise exception else: return None
[ "async", "def", "run_with_interrupt", "(", "task", ",", "*", "events", ",", "loop", "=", "None", ")", ":", "loop", "=", "loop", "or", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "asyncio", ".", "ensure_future", "(", "task", ",", "loop", "=...
Awaits a task while allowing it to be interrupted by one or more `asyncio.Event`s. If the task finishes without the events becoming set, the results of the task will be returned. If the event become set, the task will be cancelled ``None`` will be returned. :param task: Task to run :param events: One or more `asyncio.Event`s which, if set, will interrupt `task` and cause it to be cancelled. :param loop: Optional event loop to use other than the default.
[ "Awaits", "a", "task", "while", "allowing", "it", "to", "be", "interrupted", "by", "one", "or", "more", "asyncio", ".", "Event", "s", "." ]
python
train
rvswift/EB
EB/builder/postanalysis/postanalysis_io.py
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/postanalysis/postanalysis_io.py#L27-L92
def print_extended_help(): """ print a detailed help message :return: """ w = textwrap.TextWrapper() w.expand_tabs = False w.width = 85 w.initial_indent = '\t' w.subsequent_indent = '\t ' print('') print(textwrap.fill("<postanalyze> Complete parameter list:", initial_indent='')) print('') cmd = "--input : (required) csv file to split into training and test sets" print(w.fill(cmd)) cmd = "\t\tColumns should be as follows:" print(w.fill(cmd)) print('') cmd="\t\t id, status, receptor_1, receptor_2, ..., receptor_N" print(w.fill(cmd)) cmd="\t\t CH44, 1, -9.7, -9.3, ..., -10.2" print(w.fill(cmd)) cmd="\t\t ZN44, 0, -6.6, -6.1, ..., -6.8" print(w.fill(cmd)) print('') cmd="\t\tid is a unique molecular identifier" print(w.fill(cmd)) cmd="\t\tstatus takes a value of '1' if the molecule is active and '0' otherwise." print(w.fill(cmd)) cmd="\t\treceptor_1 through receptor_N are docking scores." print(w.fill(cmd)) print('') outname = "--outname : (required) the prefix of the outputfiles." print(w.fill(outname)) print('') ensemble_list = "--ensemble_list : (required) a list of csv files that contain the queries in\ the ensemble. For example, 'Ensemble_1_queries.csv Ensemble_2_queries.csv\ Ensemble_3_queries.csv ...'" print(w.fill(ensemble_list)) print('') compare = "--compare : (optional) Compare the virtual screening results for the\ ensembles specified after the '--ensemble_list' flag. No more than two ensembles\ may be specified at once." print(w.fill(compare)) print('') fpf = "--fpf : (optional) Evaluate ensemble performance at the set of specified FPF values. \ By default, values of '0.0001', '0.001', '0.01', and '0.05' are considered, if they are defined." print(w.fill(fpf)) print('') plot = "--plot : (optional) Generate ROC plots of the input ensembles and their\ members." print(w.fill(plot)) print('') roc_data = "--write_roc : (optional) if the '--write_roc' flag is set, a 'ROC_DATA' \ directory will be created, & ROC data points will be written there for each\ ensemble. The default is not to write ROC data points." print(w.fill(roc_data)) print('')
[ "def", "print_extended_help", "(", ")", ":", "w", "=", "textwrap", ".", "TextWrapper", "(", ")", "w", ".", "expand_tabs", "=", "False", "w", ".", "width", "=", "85", "w", ".", "initial_indent", "=", "'\\t'", "w", ".", "subsequent_indent", "=", "'\\t '",...
print a detailed help message :return:
[ "print", "a", "detailed", "help", "message", ":", "return", ":" ]
python
train
SheffieldML/GPy
GPy/plotting/gpy_plot/gp_plots.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/plotting/gpy_plot/gp_plots.py#L222-L258
def plot_density(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, apply_link=False, visible_dims=None, which_data_ycols='all', levels=35, label='gp density', predict_kw=None, **kwargs): """ Plot the confidence interval between the percentiles lower and upper. E.g. the 95% confidence interval is $2.5, 97.5$. Note: Only implemented for one dimension! You can deactivate the legend for this one plot by supplying None to label. Give the Y_metadata in the predict_kw if you need it. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [default:200] :param bool plot_raw: plot the latent function (usually denoted f) only? :param bool apply_link: whether to apply the link function of the GP to the raw prediction. :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here """ canvas, kwargs = pl().new_canvas(**kwargs) X = get_x_y_var(self)[0] helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution) helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw, apply_link, np.linspace(2.5, 97.5, levels*2), get_which_data_ycols(self, which_data_ycols), predict_kw) plots = _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs) return pl().add_to_canvas(canvas, plots)
[ "def", "plot_density", "(", "self", ",", "plot_limits", "=", "None", ",", "fixed_inputs", "=", "None", ",", "resolution", "=", "None", ",", "plot_raw", "=", "False", ",", "apply_link", "=", "False", ",", "visible_dims", "=", "None", ",", "which_data_ycols", ...
Plot the confidence interval between the percentiles lower and upper. E.g. the 95% confidence interval is $2.5, 97.5$. Note: Only implemented for one dimension! You can deactivate the legend for this one plot by supplying None to label. Give the Y_metadata in the predict_kw if you need it. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [default:200] :param bool plot_raw: plot the latent function (usually denoted f) only? :param bool apply_link: whether to apply the link function of the GP to the raw prediction. :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
[ "Plot", "the", "confidence", "interval", "between", "the", "percentiles", "lower", "and", "upper", ".", "E", ".", "g", ".", "the", "95%", "confidence", "interval", "is", "$2", ".", "5", "97", ".", "5$", ".", "Note", ":", "Only", "implemented", "for", "...
python
train
helixyte/everest
everest/url.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/url.py#L161-L170
def make_filter_string(cls, filter_specification): """ Converts the given filter specification to a CQL filter expression. """ registry = get_current_registry() visitor_cls = registry.getUtility(IFilterSpecificationVisitor, name=EXPRESSION_KINDS.CQL) visitor = visitor_cls() filter_specification.accept(visitor) return str(visitor.expression)
[ "def", "make_filter_string", "(", "cls", ",", "filter_specification", ")", ":", "registry", "=", "get_current_registry", "(", ")", "visitor_cls", "=", "registry", ".", "getUtility", "(", "IFilterSpecificationVisitor", ",", "name", "=", "EXPRESSION_KINDS", ".", "CQL"...
Converts the given filter specification to a CQL filter expression.
[ "Converts", "the", "given", "filter", "specification", "to", "a", "CQL", "filter", "expression", "." ]
python
train
orb-framework/orb
orb/core/query.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/query.py#L994-L1013
def lessThan(self, value): """ Sets the operator type to Query.Op.LessThan and sets the value to the inputted value. :param value <variant> :return <Query> :sa lessThan :usage |>>> from orb import Query as Q |>>> query = Q('test').lessThan(1) |>>> print query |test less_than 1 """ newq = self.copy() newq.setOp(Query.Op.LessThan) newq.setValue(value) return newq
[ "def", "lessThan", "(", "self", ",", "value", ")", ":", "newq", "=", "self", ".", "copy", "(", ")", "newq", ".", "setOp", "(", "Query", ".", "Op", ".", "LessThan", ")", "newq", ".", "setValue", "(", "value", ")", "return", "newq" ]
Sets the operator type to Query.Op.LessThan and sets the value to the inputted value. :param value <variant> :return <Query> :sa lessThan :usage |>>> from orb import Query as Q |>>> query = Q('test').lessThan(1) |>>> print query |test less_than 1
[ "Sets", "the", "operator", "type", "to", "Query", ".", "Op", ".", "LessThan", "and", "sets", "the", "value", "to", "the", "inputted", "value", ".", ":", "param", "value", "<variant", ">", ":", "return", "<Query", ">", ":", "sa", "lessThan", ":", "usage...
python
train
getsentry/rb
rb/cluster.py
https://github.com/getsentry/rb/blob/569d1d13311f6c04bae537fc17e75da430e4ec45/rb/cluster.py#L147-L152
def disconnect_pools(self): """Disconnects all connections from the internal pools.""" with self._lock: for pool in self._pools.itervalues(): pool.disconnect() self._pools.clear()
[ "def", "disconnect_pools", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "for", "pool", "in", "self", ".", "_pools", ".", "itervalues", "(", ")", ":", "pool", ".", "disconnect", "(", ")", "self", ".", "_pools", ".", "clear", "(", ")" ]
Disconnects all connections from the internal pools.
[ "Disconnects", "all", "connections", "from", "the", "internal", "pools", "." ]
python
train
CleanCut/green
green/loader.py
https://github.com/CleanCut/green/blob/6434515302472363b7d10135be76ed8cd3934d80/green/loader.py#L333-L375
def toParallelTargets(suite, targets): """ Produce a list of targets which should be tested in parallel. For the most part this will be a list of test modules. The exception is when a dotted name representing something more granular than a module was input (like an individal test case or test method) """ targets = filter(lambda x: x != '.', targets) # First, convert the suite to a proto test list - proto tests nicely # parse things like the fully dotted name of the test and the # finest-grained module it belongs to, which simplifies our job. proto_test_list = toProtoTestList(suite) # Extract a list of the modules that all of the discovered tests are in modules = set([x.module for x in proto_test_list]) # Get the list of user-specified targets that are NOT modules non_module_targets = [] for target in targets: if not list(filter(None, [target in x for x in modules])): non_module_targets.append(target) # Main loop -- iterating through all loaded test methods parallel_targets = [] for test in proto_test_list: found = False for target in non_module_targets: # target is a dotted name of either a test case or test method # here test.dotted name is always a dotted name of a method if (target in test.dotted_name): if target not in parallel_targets: # Explicitly specified targets get their own entry to # run parallel to everything else parallel_targets.append(target) found = True break if found: continue # This test does not appear to be part of a specified target, so # its entire module must have been discovered, so just add the # whole module to the list if we haven't already. if test.module not in parallel_targets: parallel_targets.append(test.module) return parallel_targets
[ "def", "toParallelTargets", "(", "suite", ",", "targets", ")", ":", "targets", "=", "filter", "(", "lambda", "x", ":", "x", "!=", "'.'", ",", "targets", ")", "# First, convert the suite to a proto test list - proto tests nicely", "# parse things like the fully dotted name...
Produce a list of targets which should be tested in parallel. For the most part this will be a list of test modules. The exception is when a dotted name representing something more granular than a module was input (like an individal test case or test method)
[ "Produce", "a", "list", "of", "targets", "which", "should", "be", "tested", "in", "parallel", "." ]
python
train