repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
econ-ark/HARK
HARK/ConsumptionSaving/ConsAggShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsAggShockModel.py#L241-L255
def getRfree(self): ''' Returns an array of size self.AgentCount with self.RfreeNow in every entry. Parameters ---------- None Returns ------- RfreeNow : np.array Array of size self.AgentCount with risk free interest rate for each agent. ''' RfreeNow = self.RfreeNow*np.ones(self.AgentCount) return RfreeNow
[ "def", "getRfree", "(", "self", ")", ":", "RfreeNow", "=", "self", ".", "RfreeNow", "*", "np", ".", "ones", "(", "self", ".", "AgentCount", ")", "return", "RfreeNow" ]
Returns an array of size self.AgentCount with self.RfreeNow in every entry. Parameters ---------- None Returns ------- RfreeNow : np.array Array of size self.AgentCount with risk free interest rate for each agent.
[ "Returns", "an", "array", "of", "size", "self", ".", "AgentCount", "with", "self", ".", "RfreeNow", "in", "every", "entry", "." ]
python
train
26.4
mitsei/dlkit
dlkit/json_/resource/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/managers.py#L1329-L1344
def get_bin_query_session(self, proxy): """Gets the bin query session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.BinQuerySession) - a ``BinQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bin_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bin_query()`` is ``true``.* """ if not self.supports_bin_query(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.BinQuerySession(proxy=proxy, runtime=self._runtime)
[ "def", "get_bin_query_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_bin_query", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "BinQuerySession", "(...
Gets the bin query session. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.resource.BinQuerySession) - a ``BinQuerySession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_bin_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bin_query()`` is ``true``.*
[ "Gets", "the", "bin", "query", "session", "." ]
python
train
43.3125
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1592-L1614
def makeStatic(): """ Provide static access to underscore class """ p = lambda value: inspect.ismethod(value) or inspect.isfunction(value) for eachMethod in inspect.getmembers(underscore, predicate=p): m = eachMethod[0] if not hasattr(_, m): def caller(a): def execute(*args): if len(args) == 1: r = getattr(underscore(args[0]), a)() elif len(args) > 1: rargs = args[1:] r = getattr(underscore(args[0]), a)(*rargs) else: r = getattr(underscore([]), a)() return r return execute _.__setattr__(m, caller(m)) # put the class itself as a parameter so that we can use it on outside _.__setattr__("underscore", underscore) _.templateSettings = {}
[ "def", "makeStatic", "(", ")", ":", "p", "=", "lambda", "value", ":", "inspect", ".", "ismethod", "(", "value", ")", "or", "inspect", ".", "isfunction", "(", "value", ")", "for", "eachMethod", "in", "inspect", ".", "getmembers", "(", "underscore", ",", ...
Provide static access to underscore class
[ "Provide", "static", "access", "to", "underscore", "class" ]
python
train
44.478261
spulec/moto
scripts/scaffold.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/scripts/scaffold.py#L279-L301
def get_function_in_models(service, operation): """refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members if not hasattr(op_model.output_shape, 'members'): outputs = {} else: outputs = op_model.output_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] if input_names: body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names)) else: body = 'def {}(self)\n' body += ' # implement here\n' body += ' return {}\n\n'.format(', '.join(output_names)) return body
[ "def", "get_function_in_models", "(", "service", ",", "operation", ")", ":", "client", "=", "boto3", ".", "client", "(", "service", ")", "aws_operation_name", "=", "to_upper_camel_case", "(", "operation", ")", "op_model", "=", "client", ".", "_service_model", "....
refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
[ "refers", "to", "definition", "of", "API", "in", "botocore", "and", "autogenerates", "function", "You", "can", "see", "example", "of", "elbv2", "from", "link", "below", ".", "https", ":", "//", "github", ".", "com", "/", "boto", "/", "botocore", "/", "bl...
python
train
45.695652
molmod/molmod
molmod/pairff.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/pairff.py#L202-L219
def yield_pair_energies(self, index1, index2): """Yields pairs ((s(r_ij), v(bar{r}_ij))""" d_1 = 1/self.distances[index1, index2] if self.charges is not None: c1 = self.charges[index1] c2 = self.charges[index2] yield c1*c2*d_1, 1 if self.dipoles is not None: d_3 = d_1**3 d_5 = d_1**5 delta = self.deltas[index1, index2] p1 = self.dipoles[index1] p2 = self.dipoles[index2] yield d_3*np.dot(p1, p2), 1 yield -3*d_5, np.dot(p1, delta)*np.dot(delta, p2) if self.charges is not None: yield c1*d_3, np.dot(p2, delta) yield c2*d_3, np.dot(p1, -delta)
[ "def", "yield_pair_energies", "(", "self", ",", "index1", ",", "index2", ")", ":", "d_1", "=", "1", "/", "self", ".", "distances", "[", "index1", ",", "index2", "]", "if", "self", ".", "charges", "is", "not", "None", ":", "c1", "=", "self", ".", "c...
Yields pairs ((s(r_ij), v(bar{r}_ij))
[ "Yields", "pairs", "((", "s", "(", "r_ij", ")", "v", "(", "bar", "{", "r", "}", "_ij", "))" ]
python
train
40.166667
ThreatConnect-Inc/tcex
tcex/tcex_ti/mappings/tcex_ti_mappings.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/tcex_ti_mappings.py#L644-L678
def attribute(self, attribute_id, action='GET', params=None): """ Gets the attribute from a Group/Indicator or Victim Args: action: params: attribute_id: Returns: attribute json """ if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner, params=params, ) if action == 'DELETE': return self.tc_requests.delete_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner ) self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action]) return None
[ "def", "attribute", "(", "self", ",", "attribute_id", ",", "action", "=", "'GET'", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "not", "self", ".", "can_update", "(", ")", ":", "self", ".",...
Gets the attribute from a Group/Indicator or Victim Args: action: params: attribute_id: Returns: attribute json
[ "Gets", "the", "attribute", "from", "a", "Group", "/", "Indicator", "or", "Victim" ]
python
train
27.457143
learningequality/ricecooker
ricecooker/managers/tree.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/managers/tree.py#L148-L170
def upload_tree(self): """ upload_tree: sends processed channel data to server to create tree Args: None Returns: link to uploadedchannel """ from datetime import datetime start_time = datetime.now() root, channel_id = self.add_channel() self.node_count_dict = {"upload_count": 0, "total_count": self.channel.count()} config.LOGGER.info("\tPreparing fields...") self.truncate_fields(self.channel) self.add_nodes(root, self.channel) if self.check_failed(print_warning=False): failed = self.failed_node_builds self.failed_node_builds = {} self.reattempt_failed(failed) self.check_failed() channel_id, channel_link = self.commit_channel(channel_id) end_time = datetime.now() config.LOGGER.info("Upload time: {time}s".format(time=(end_time - start_time).total_seconds())) return channel_id, channel_link
[ "def", "upload_tree", "(", "self", ")", ":", "from", "datetime", "import", "datetime", "start_time", "=", "datetime", ".", "now", "(", ")", "root", ",", "channel_id", "=", "self", ".", "add_channel", "(", ")", "self", ".", "node_count_dict", "=", "{", "\...
upload_tree: sends processed channel data to server to create tree Args: None Returns: link to uploadedchannel
[ "upload_tree", ":", "sends", "processed", "channel", "data", "to", "server", "to", "create", "tree", "Args", ":", "None", "Returns", ":", "link", "to", "uploadedchannel" ]
python
train
41.869565
tdryer/hangups
hangups/auth.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/auth.py#L220-L235
def get_auth_stdin(refresh_token_filename, manual_login=False): """Simple wrapper for :func:`get_auth` that prompts the user using stdin. Args: refresh_token_filename (str): Path to file where refresh token will be cached. manual_login (bool): If true, prompt user to log in through a browser and enter authorization code manually. Defaults to false. Raises: GoogleAuthError: If authentication with Google fails. """ refresh_token_cache = RefreshTokenCache(refresh_token_filename) return get_auth( CredentialsPrompt(), refresh_token_cache, manual_login=manual_login )
[ "def", "get_auth_stdin", "(", "refresh_token_filename", ",", "manual_login", "=", "False", ")", ":", "refresh_token_cache", "=", "RefreshTokenCache", "(", "refresh_token_filename", ")", "return", "get_auth", "(", "CredentialsPrompt", "(", ")", ",", "refresh_token_cache"...
Simple wrapper for :func:`get_auth` that prompts the user using stdin. Args: refresh_token_filename (str): Path to file where refresh token will be cached. manual_login (bool): If true, prompt user to log in through a browser and enter authorization code manually. Defaults to false. Raises: GoogleAuthError: If authentication with Google fails.
[ "Simple", "wrapper", "for", ":", "func", ":", "get_auth", "that", "prompts", "the", "user", "using", "stdin", "." ]
python
valid
39.875
explosion/thinc
thinc/api.py
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/api.py#L367-L407
def foreach(layer, drop_factor=1.0): """Map a layer across list items""" def foreach_fwd(docs, drop=0.0): sents = [] lengths = [] for doc in docs: doc_sents = [sent for sent in doc if len(sent)] subset = [ s for s in doc_sents if numpy.random.random() >= drop * drop_factor ] if subset: sents.extend(subset) lengths.append(len(subset)) else: numpy.random.shuffle(doc_sents) sents.append(doc_sents[0]) lengths.append(1) flat, bp_flat = layer.begin_update(sents, drop=0.0) output = layer.ops.unflatten(flat, lengths) def foreach_bwd(d_output, sgd=None): d_flat = layer.ops.flatten(d_output) d_sents = bp_flat(d_flat, sgd=sgd) if d_sents is None: return d_sents else: return layer.ops.unflatten(d_sents, lengths) return output, foreach_bwd model = wrap(foreach_fwd, layer) def _run_foreach_child_hooks(model, X, y): for layer in model._layers: for hook in layer.on_data_hooks: hook(layer, X[0], y[0]) model.on_data_hooks = [_run_foreach_child_hooks] return model
[ "def", "foreach", "(", "layer", ",", "drop_factor", "=", "1.0", ")", ":", "def", "foreach_fwd", "(", "docs", ",", "drop", "=", "0.0", ")", ":", "sents", "=", "[", "]", "lengths", "=", "[", "]", "for", "doc", "in", "docs", ":", "doc_sents", "=", "...
Map a layer across list items
[ "Map", "a", "layer", "across", "list", "items" ]
python
train
31.170732
tensorflow/tensor2tensor
tensor2tensor/utils/decoding.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/decoding.py#L782-L795
def save_video(video, save_path_template): """Save frames of the videos into files.""" try: from PIL import Image # pylint: disable=g-import-not-at-top except ImportError as e: tf.logging.warning( "Showing and saving an image requires PIL library to be " "installed: %s", e) raise NotImplementedError("Image display and save not implemented.") for i, frame in enumerate(video): save_path = save_path_template.format(i) with tf.gfile.Open(save_path, "wb") as sp: Image.fromarray(np.uint8(frame)).save(sp)
[ "def", "save_video", "(", "video", ",", "save_path_template", ")", ":", "try", ":", "from", "PIL", "import", "Image", "# pylint: disable=g-import-not-at-top", "except", "ImportError", "as", "e", ":", "tf", ".", "logging", ".", "warning", "(", "\"Showing and saving...
Save frames of the videos into files.
[ "Save", "frames", "of", "the", "videos", "into", "files", "." ]
python
train
38.714286
ForensicArtifacts/artifacts
artifacts/writer.py
https://github.com/ForensicArtifacts/artifacts/blob/044a63bfb4448af33d085c69066c80f9505ae7ca/artifacts/writer.py#L72-L83
def FormatArtifacts(self, artifacts): """Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition. """ artifact_definitions = [artifact.AsDict() for artifact in artifacts] json_data = json.dumps(artifact_definitions) return json_data
[ "def", "FormatArtifacts", "(", "self", ",", "artifacts", ")", ":", "artifact_definitions", "=", "[", "artifact", ".", "AsDict", "(", ")", "for", "artifact", "in", "artifacts", "]", "json_data", "=", "json", ".", "dumps", "(", "artifact_definitions", ")", "re...
Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition.
[ "Formats", "artifacts", "to", "desired", "output", "format", "." ]
python
train
30.916667
boriel/zxbasic
zxbpp.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpp.py#L118-L136
def search_filename(fname, lineno, local_first): """ Search a filename into the list of the include path. If local_first is true, it will try first in the current directory of the file being analyzed. """ fname = api.utils.sanitize_filename(fname) i_path = [CURRENT_DIR] + INCLUDEPATH if local_first else list(INCLUDEPATH) i_path.extend(OPTIONS.include_path.value.split(':') if OPTIONS.include_path.value else []) if os.path.isabs(fname): if os.path.isfile(fname): return fname else: for dir_ in i_path: path = api.utils.sanitize_filename(os.path.join(dir_, fname)) if os.path.exists(path): return path error(lineno, "file '%s' not found" % fname) return ''
[ "def", "search_filename", "(", "fname", ",", "lineno", ",", "local_first", ")", ":", "fname", "=", "api", ".", "utils", ".", "sanitize_filename", "(", "fname", ")", "i_path", "=", "[", "CURRENT_DIR", "]", "+", "INCLUDEPATH", "if", "local_first", "else", "l...
Search a filename into the list of the include path. If local_first is true, it will try first in the current directory of the file being analyzed.
[ "Search", "a", "filename", "into", "the", "list", "of", "the", "include", "path", ".", "If", "local_first", "is", "true", "it", "will", "try", "first", "in", "the", "current", "directory", "of", "the", "file", "being", "analyzed", "." ]
python
train
39.578947
ungarj/mapchete
mapchete/io/raster.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L290-L324
def read_raster_no_crs(input_file, indexes=None, gdal_opts=None): """ Wrapper function around rasterio.open().read(). Parameters ---------- input_file : str Path to file indexes : int or list Band index or list of band indexes to be read. Returns ------- MaskedArray Raises ------ FileNotFoundError if file cannot be found. """ with warnings.catch_warnings(): warnings.simplefilter("ignore") try: with rasterio.Env( **get_gdal_options( gdal_opts, is_remote=path_is_remote(input_file, s3=True) ) ): with rasterio.open(input_file, "r") as src: return src.read(indexes=indexes, masked=True) except RasterioIOError as e: for i in ("does not exist in the file system", "No such file or directory"): if i in str(e): raise FileNotFoundError("%s not found" % input_file) else: raise
[ "def", "read_raster_no_crs", "(", "input_file", ",", "indexes", "=", "None", ",", "gdal_opts", "=", "None", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "try", ":", "with", "ra...
Wrapper function around rasterio.open().read(). Parameters ---------- input_file : str Path to file indexes : int or list Band index or list of band indexes to be read. Returns ------- MaskedArray Raises ------ FileNotFoundError if file cannot be found.
[ "Wrapper", "function", "around", "rasterio", ".", "open", "()", ".", "read", "()", "." ]
python
valid
29.342857
econ-ark/HARK
HARK/core.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/core.py#L721-L801
def solveAgent(agent,verbose): ''' Solve the dynamic model for one agent type. This function iterates on "cycles" of an agent's model either a given number of times or until solution convergence if an infinite horizon model is used (with agent.cycles = 0). Parameters ---------- agent : AgentType The microeconomic AgentType whose dynamic problem is to be solved. verbose : boolean If True, solution progress is printed to screen (when cycles != 1). Returns ------- solution : [Solution] A list of solutions to the one period problems that the agent will encounter in his "lifetime". Returns in reverse chronological order. ''' # Record the flow of time when the Agent began the process, and make sure time is flowing backwards original_time_flow = agent.time_flow agent.timeRev() # Check to see whether this is an (in)finite horizon problem cycles_left = agent.cycles infinite_horizon = cycles_left == 0 # Initialize the solution, which includes the terminal solution if it's not a pseudo-terminal period solution = [] if not agent.pseudo_terminal: solution.append(deepcopy(agent.solution_terminal)) # Initialize the process, then loop over cycles solution_last = agent.solution_terminal go = True completed_cycles = 0 max_cycles = 5000 # escape clause if verbose: t_last = clock() while go: # Solve a cycle of the model, recording it if horizon is finite solution_cycle = solveOneCycle(agent,solution_last) if not infinite_horizon: solution += solution_cycle # Check for termination: identical solutions across cycle iterations or run out of cycles solution_now = solution_cycle[-1] if infinite_horizon: if completed_cycles > 0: solution_distance = solution_now.distance(solution_last) go = (solution_distance > agent.tolerance and completed_cycles < max_cycles) else: # Assume solution does not converge after only one cycle solution_distance = 100.0 go = True else: cycles_left += -1 go = cycles_left > 0 # Update the "last period solution" solution_last = solution_now completed_cycles += 1 # Display progress if requested if verbose: t_now = clock() if infinite_horizon: print('Finished cycle #' + str(completed_cycles) + ' in ' + str(t_now-t_last) +\ ' seconds, solution distance = ' + str(solution_distance)) else: print('Finished cycle #' + str(completed_cycles) + ' of ' + str(agent.cycles) +\ ' in ' + str(t_now-t_last) + ' seconds.') t_last = t_now # Record the last cycle if horizon is infinite (solution is still empty!) if infinite_horizon: solution = solution_cycle # PseudoTerminal=False impossible for infinite horizon # Restore the direction of time to its original orientation, then return the solution if original_time_flow: agent.timeFwd() return solution
[ "def", "solveAgent", "(", "agent", ",", "verbose", ")", ":", "# Record the flow of time when the Agent began the process, and make sure time is flowing backwards", "original_time_flow", "=", "agent", ".", "time_flow", "agent", ".", "timeRev", "(", ")", "# Check to see whether t...
Solve the dynamic model for one agent type. This function iterates on "cycles" of an agent's model either a given number of times or until solution convergence if an infinite horizon model is used (with agent.cycles = 0). Parameters ---------- agent : AgentType The microeconomic AgentType whose dynamic problem is to be solved. verbose : boolean If True, solution progress is printed to screen (when cycles != 1). Returns ------- solution : [Solution] A list of solutions to the one period problems that the agent will encounter in his "lifetime". Returns in reverse chronological order.
[ "Solve", "the", "dynamic", "model", "for", "one", "agent", "type", ".", "This", "function", "iterates", "on", "cycles", "of", "an", "agent", "s", "model", "either", "a", "given", "number", "of", "times", "or", "until", "solution", "convergence", "if", "an"...
python
train
39.074074
echonest/pyechonest
examples/try_new_things.py
https://github.com/echonest/pyechonest/blob/d8c7af6c1da699b50b2f4b1bd3c0febe72e7f1ee/examples/try_new_things.py#L95-L109
def write_xspf(f, tuples): """send me a list of (artist,title,mp3_url)""" xml = XmlWriter(f, indentAmount=' ') xml.prolog() xml.start('playlist', { 'xmlns': 'http://xspf.org/ns/0/', 'version': '1' }) xml.start('trackList') for tupe in tuples: xml.start('track') xml.elem('creator',tupe[0]) xml.elem('title',tupe[1]) xml.elem('location', tupe[2]) xml.end() xml.end() xml.end() f.close()
[ "def", "write_xspf", "(", "f", ",", "tuples", ")", ":", "xml", "=", "XmlWriter", "(", "f", ",", "indentAmount", "=", "' '", ")", "xml", ".", "prolog", "(", ")", "xml", ".", "start", "(", "'playlist'", ",", "{", "'xmlns'", ":", "'http://xspf.org/ns/0/'...
send me a list of (artist,title,mp3_url)
[ "send", "me", "a", "list", "of", "(", "artist", "title", "mp3_url", ")" ]
python
train
29.866667
chemlab/chemlab
chemlab/graphics/renderers/sphere.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/graphics/renderers/sphere.py#L86-L93
def update_positions(self, positions): '''Update the sphere positions. ''' sphs_verts = self.sphs_verts_radii.copy() sphs_verts += positions.reshape(self.n_spheres, 1, 3) self.tr.update_vertices(sphs_verts) self.poslist = positions
[ "def", "update_positions", "(", "self", ",", "positions", ")", ":", "sphs_verts", "=", "self", ".", "sphs_verts_radii", ".", "copy", "(", ")", "sphs_verts", "+=", "positions", ".", "reshape", "(", "self", ".", "n_spheres", ",", "1", ",", "3", ")", "self"...
Update the sphere positions.
[ "Update", "the", "sphere", "positions", "." ]
python
train
34.125
cfobel/si-prefix
si_prefix/__init__.py
https://github.com/cfobel/si-prefix/blob/274fdf47f65d87d0b7a2e3c80f267db63d042c59/si_prefix/__init__.py#L224-L263
def si_parse(value): ''' Parse a value expressed using SI prefix units to a floating point number. Parameters ---------- value : str or unicode Value expressed using SI prefix units (as returned by :func:`si_format` function). .. versionchanged:: 1.0 Use unicode string for SI unit to support micro (i.e., µ) character. .. seealso:: `Issue #4`_. .. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4 ''' CRE_10E_NUMBER = re.compile(r'^\s*(?P<integer>[\+\-]?\d+)?' r'(?P<fraction>.\d+)?\s*([eE]\s*' r'(?P<expof10>[\+\-]?\d+))?$') CRE_SI_NUMBER = re.compile(r'^\s*(?P<number>(?P<integer>[\+\-]?\d+)?' r'(?P<fraction>.\d+)?)\s*' u'(?P<si_unit>[%s])?\s*$' % SI_PREFIX_UNITS) match = CRE_10E_NUMBER.match(value) if match: # Can be parse using `float`. assert(match.group('integer') is not None or match.group('fraction') is not None) return float(value) match = CRE_SI_NUMBER.match(value) assert(match.group('integer') is not None or match.group('fraction') is not None) d = match.groupdict() si_unit = d['si_unit'] if d['si_unit'] else ' ' prefix_levels = (len(SI_PREFIX_UNITS) - 1) // 2 scale = 10 ** (3 * (SI_PREFIX_UNITS.index(si_unit) - prefix_levels)) return float(d['number']) * scale
[ "def", "si_parse", "(", "value", ")", ":", "CRE_10E_NUMBER", "=", "re", ".", "compile", "(", "r'^\\s*(?P<integer>[\\+\\-]?\\d+)?'", "r'(?P<fraction>.\\d+)?\\s*([eE]\\s*'", "r'(?P<expof10>[\\+\\-]?\\d+))?$'", ")", "CRE_SI_NUMBER", "=", "re", ".", "compile", "(", "r'^\\s*(?...
Parse a value expressed using SI prefix units to a floating point number. Parameters ---------- value : str or unicode Value expressed using SI prefix units (as returned by :func:`si_format` function). .. versionchanged:: 1.0 Use unicode string for SI unit to support micro (i.e., µ) character. .. seealso:: `Issue #4`_. .. _`Issue #4`: https://github.com/cfobel/si-prefix/issues/4
[ "Parse", "a", "value", "expressed", "using", "SI", "prefix", "units", "to", "a", "floating", "point", "number", "." ]
python
train
36.4
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/storage_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/storage_v1_api.py#L1531-L1554
def read_volume_attachment(self, name, **kwargs): # noqa: E501 """read_volume_attachment # noqa: E501 read the specified VolumeAttachment # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_volume_attachment(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the VolumeAttachment (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1VolumeAttachment If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501 else: (data) = self.read_volume_attachment_with_http_info(name, **kwargs) # noqa: E501 return data
[ "def", "read_volume_attachment", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "rea...
read_volume_attachment # noqa: E501 read the specified VolumeAttachment # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_volume_attachment(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the VolumeAttachment (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1VolumeAttachment If the method is called asynchronously, returns the request thread.
[ "read_volume_attachment", "#", "noqa", ":", "E501" ]
python
train
51.958333
SHDShim/pytheos
pytheos/eqn_bm3.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_bm3.py#L96-L107
def bm3_k(p, v0, k0, k0p): """ calculate bulk modulus, wrapper for cal_k_bm3 cannot handle uncertainties :param p: pressure :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :return: bulk modulus at high pressure """ return cal_k_bm3(p, [v0, k0, k0p])
[ "def", "bm3_k", "(", "p", ",", "v0", ",", "k0", ",", "k0p", ")", ":", "return", "cal_k_bm3", "(", "p", ",", "[", "v0", ",", "k0", ",", "k0p", "]", ")" ]
calculate bulk modulus, wrapper for cal_k_bm3 cannot handle uncertainties :param p: pressure :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :return: bulk modulus at high pressure
[ "calculate", "bulk", "modulus", "wrapper", "for", "cal_k_bm3", "cannot", "handle", "uncertainties" ]
python
train
32.75
pysathq/pysat
pysat/formula.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/formula.py#L1239-L1291
def from_fp(self, file_pointer, comment_lead=['c']): """ Read a CNF+ formula from a file pointer. A file pointer should be specified as an argument. The only default argument is ``comment_lead``, which can be used for parsing specific comment lines. :param file_pointer: a file pointer to read the formula from. :param comment_lead: a list of characters leading comment lines :type file_pointer: file pointer :type comment_lead: list(str) Usage example: .. code-block:: python >>> with open('some-file.cnf+', 'r') as fp: ... cnf1 = CNFPlus() ... cnf1.from_fp(fp) >>> >>> with open('another-file.cnf+', 'r') as fp: ... cnf2 = CNFPlus(from_fp=fp) """ self.nv = 0 self.clauses = [] self.atmosts = [] self.comments = [] comment_lead = tuple('p') + tuple(comment_lead) for line in file_pointer: line = line.strip() if line: if line[0] not in comment_lead: if line[-1] == '0': # normal clause cl = [int(l) for l in line.split()[:-1]] self.nv = max([abs(l) for l in cl] + [self.nv]) self.clauses.append(cl) else: # atmost/atleast constraint items = [i for i in line.split()] lits = [int(l) for l in items[:-2]] rhs = int(items[-1]) self.nv = max([abs(l) for l in lits] + [self.nv]) if items[-2][0] == '>': lits = list(map(lambda l: -l, lits)) rhs = len(lits) - rhs self.atmosts.append([lits, rhs]) elif not line.startswith('p cnf+ '): self.comments.append(line)
[ "def", "from_fp", "(", "self", ",", "file_pointer", ",", "comment_lead", "=", "[", "'c'", "]", ")", ":", "self", ".", "nv", "=", "0", "self", ".", "clauses", "=", "[", "]", "self", ".", "atmosts", "=", "[", "]", "self", ".", "comments", "=", "[",...
Read a CNF+ formula from a file pointer. A file pointer should be specified as an argument. The only default argument is ``comment_lead``, which can be used for parsing specific comment lines. :param file_pointer: a file pointer to read the formula from. :param comment_lead: a list of characters leading comment lines :type file_pointer: file pointer :type comment_lead: list(str) Usage example: .. code-block:: python >>> with open('some-file.cnf+', 'r') as fp: ... cnf1 = CNFPlus() ... cnf1.from_fp(fp) >>> >>> with open('another-file.cnf+', 'r') as fp: ... cnf2 = CNFPlus(from_fp=fp)
[ "Read", "a", "CNF", "+", "formula", "from", "a", "file", "pointer", ".", "A", "file", "pointer", "should", "be", "specified", "as", "an", "argument", ".", "The", "only", "default", "argument", "is", "comment_lead", "which", "can", "be", "used", "for", "p...
python
train
37.622642
nccgroup/opinel
opinel/services/cloudformation.py
https://github.com/nccgroup/opinel/blob/2d4f5b96e0a1f9cb0356629f4f87e4ed99ce2606/opinel/services/cloudformation.py#L154-L176
def get_stackset_ready_accounts(credentials, account_ids, quiet=True): """ Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role :param credentials: AWS credentials to use when calling sts:assumerole :param org_account_ids: List of AWS accounts to check for Stackset configuration :return: List of account IDs in which assuming the stackset execution role worked """ api_client = connect_service('sts', credentials, silent=True) configured_account_ids = [] for account_id in account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role(RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts') configured_account_ids.append(account_id) except Exception as e: pass if len(configured_account_ids) != len(account_ids) and not quiet: printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids)) printDebug(str(configured_account_ids)) return configured_account_ids
[ "def", "get_stackset_ready_accounts", "(", "credentials", ",", "account_ids", ",", "quiet", "=", "True", ")", ":", "api_client", "=", "connect_service", "(", "'sts'", ",", "credentials", ",", "silent", "=", "True", ")", "configured_account_ids", "=", "[", "]", ...
Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role :param credentials: AWS credentials to use when calling sts:assumerole :param org_account_ids: List of AWS accounts to check for Stackset configuration :return: List of account IDs in which assuming the stackset execution role worked
[ "Verify", "which", "AWS", "accounts", "have", "been", "configured", "for", "CloudFormation", "stack", "set", "by", "attempting", "to", "assume", "the", "stack", "set", "execution", "role" ]
python
train
53.391304
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/utils.py
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/utils.py#L162-L199
def wait_for_provision_state(baremetal_client, node_uuid, provision_state, loops=10, sleep=1): """Wait for a given Provisioning state in Ironic Discoverd Updating the provisioning state is an async operation, we need to wait for it to be completed. :param baremetal_client: Instance of Ironic client :type baremetal_client: ironicclient.v1.client.Client :param node_uuid: The Ironic node UUID :type node_uuid: str :param provision_state: The provisioning state name to wait for :type provision_state: str :param loops: How many times to loop :type loops: int :param sleep: How long to sleep between loops :type sleep: int """ for _ in range(0, loops): node = baremetal_client.node.get(node_uuid) if node is None: # The node can't be found in ironic, so we don't need to wait for # the provision state return True if node.provision_state == provision_state: return True time.sleep(sleep) return False
[ "def", "wait_for_provision_state", "(", "baremetal_client", ",", "node_uuid", ",", "provision_state", ",", "loops", "=", "10", ",", "sleep", "=", "1", ")", ":", "for", "_", "in", "range", "(", "0", ",", "loops", ")", ":", "node", "=", "baremetal_client", ...
Wait for a given Provisioning state in Ironic Discoverd Updating the provisioning state is an async operation, we need to wait for it to be completed. :param baremetal_client: Instance of Ironic client :type baremetal_client: ironicclient.v1.client.Client :param node_uuid: The Ironic node UUID :type node_uuid: str :param provision_state: The provisioning state name to wait for :type provision_state: str :param loops: How many times to loop :type loops: int :param sleep: How long to sleep between loops :type sleep: int
[ "Wait", "for", "a", "given", "Provisioning", "state", "in", "Ironic", "Discoverd" ]
python
train
27.631579
PixelwarStudio/PyTree
Tree/draw.py
https://github.com/PixelwarStudio/PyTree/blob/f14b25ea145da6b00d836e34251d2a4c823766dc/Tree/draw.py#L42-L62
def _get_color(self, age): """Get the fill color depending on age. Args: age (int): The age of the branch/es Returns: tuple: (r, g, b) """ if age == self.tree.age: return self.leaf_color color = self.stem_color tree = self.tree if len(color) == 3: return color diff = [color[i+3]-color[i] for i in range(3)] per_age = [diff[i]/(tree.age-1) for i in range(3)] return tuple([int(color[i]+per_age[i]*age) for i in range(3)])
[ "def", "_get_color", "(", "self", ",", "age", ")", ":", "if", "age", "==", "self", ".", "tree", ".", "age", ":", "return", "self", ".", "leaf_color", "color", "=", "self", ".", "stem_color", "tree", "=", "self", ".", "tree", "if", "len", "(", "colo...
Get the fill color depending on age. Args: age (int): The age of the branch/es Returns: tuple: (r, g, b)
[ "Get", "the", "fill", "color", "depending", "on", "age", "." ]
python
train
25.809524
google/grumpy
third_party/pypy/datetime.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/datetime.py#L1354-L1368
def replace(self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True): """Return a new time with new values for the specified fields.""" if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo return time.__new__(type(self), hour, minute, second, microsecond, tzinfo)
[ "def", "replace", "(", "self", ",", "hour", "=", "None", ",", "minute", "=", "None", ",", "second", "=", "None", ",", "microsecond", "=", "None", ",", "tzinfo", "=", "True", ")", ":", "if", "hour", "is", "None", ":", "hour", "=", "self", ".", "ho...
Return a new time with new values for the specified fields.
[ "Return", "a", "new", "time", "with", "new", "values", "for", "the", "specified", "fields", "." ]
python
valid
38.8
yyuu/botornado
boto/ec2/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L286-L313
def deregister_image(self, image_id, delete_snapshot=False): """ Unregister an AMI. :type image_id: string :param image_id: the ID of the Image to unregister :type delete_snapshot: bool :param delete_snapshot: Set to True if we should delete the snapshot associated with an EBS volume mounted at /dev/sda1 :rtype: bool :return: True if successful """ snapshot_id = None if delete_snapshot: image = self.get_image(image_id) for key in image.block_device_mapping: if key == "/dev/sda1": snapshot_id = image.block_device_mapping[key].snapshot_id break result = self.get_status('DeregisterImage', {'ImageId':image_id}, verb='POST') if result and snapshot_id: return result and self.delete_snapshot(snapshot_id) return result
[ "def", "deregister_image", "(", "self", ",", "image_id", ",", "delete_snapshot", "=", "False", ")", ":", "snapshot_id", "=", "None", "if", "delete_snapshot", ":", "image", "=", "self", ".", "get_image", "(", "image_id", ")", "for", "key", "in", "image", "....
Unregister an AMI. :type image_id: string :param image_id: the ID of the Image to unregister :type delete_snapshot: bool :param delete_snapshot: Set to True if we should delete the snapshot associated with an EBS volume mounted at /dev/sda1 :rtype: bool :return: True if successful
[ "Unregister", "an", "AMI", "." ]
python
train
35.642857
mseclab/PyJFuzz
pyjfuzz/core/pjf_process_monitor.py
https://github.com/mseclab/PyJFuzz/blob/f777067076f62c9ab74ffea6e90fd54402b7a1b4/pyjfuzz/core/pjf_process_monitor.py#L101-L127
def start_monitor(self, standalone=True): """ Run command in a loop and check exit status plus restart process when needed """ try: self.start() cmdline = shlex.split(self.config.process_to_monitor) if standalone: signal.signal(signal.SIGINT, self.shutdown) self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE) while self.process and not self.finished: self.process.wait() if self._is_sigsegv(self.process.returncode): if self.config.debug: print("[\033[92mINFO\033[0m] Process crashed with \033[91mSIGSEGV\033[0m, waiting for testcase...") while not self.got_testcase(): time.sleep(1) self.save_testcase(self.testcase[-10:]) # just take last 10 testcases if self.process: self.process = subprocess.Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE) except OSError: self.shutdown() self.process = False self.got_testcase = lambda: True raise PJFProcessExecutionError("Binary <%s> does not exist" % cmdline[0]) except Exception as e: raise PJFBaseException("Unknown error please send log to author")
[ "def", "start_monitor", "(", "self", ",", "standalone", "=", "True", ")", ":", "try", ":", "self", ".", "start", "(", ")", "cmdline", "=", "shlex", ".", "split", "(", "self", ".", "config", ".", "process_to_monitor", ")", "if", "standalone", ":", "sign...
Run command in a loop and check exit status plus restart process when needed
[ "Run", "command", "in", "a", "loop", "and", "check", "exit", "status", "plus", "restart", "process", "when", "needed" ]
python
test
50.407407
cmutel/constructive_geometries
constructive_geometries/cg.py
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/cg.py#L143-L182
def construct_rest_of_worlds_mapping(self, excluded, fp=None): """Construct topo mapping file for ``excluded``. ``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``. Topo mapping has the data format: .. code-block:: python { 'data': [ ['location label', ['topo face integer ids']], ], 'metadata': { 'filename': 'name of face definitions file', 'field': 'field with uniquely identifies the fields in ``filename``', 'sha256': 'SHA 256 hash of ``filename``' } } """ metadata = { 'filename': 'faces.gpkg', 'field': 'id', 'sha256': sha256(self.faces_fp) } data = [] for key, locations in excluded.items(): for location in locations: assert location in self.locations, "Can't find location {}".format(location) included = self.all_faces.difference( {face for loc in locations for face in self.data[loc]} ) data.append((key, sorted(included))) obj = {'data': data, 'metadata': metadata} if fp: with open(fp, "w") as f: json.dump(obj, f, indent=2) else: return obj
[ "def", "construct_rest_of_worlds_mapping", "(", "self", ",", "excluded", ",", "fp", "=", "None", ")", ":", "metadata", "=", "{", "'filename'", ":", "'faces.gpkg'", ",", "'field'", ":", "'id'", ",", "'sha256'", ":", "sha256", "(", "self", ".", "faces_fp", "...
Construct topo mapping file for ``excluded``. ``excluded`` must be a **dictionary** of {"rest-of-world label": ["names", "of", "excluded", "locations"]}``. Topo mapping has the data format: .. code-block:: python { 'data': [ ['location label', ['topo face integer ids']], ], 'metadata': { 'filename': 'name of face definitions file', 'field': 'field with uniquely identifies the fields in ``filename``', 'sha256': 'SHA 256 hash of ``filename``' } }
[ "Construct", "topo", "mapping", "file", "for", "excluded", "." ]
python
train
34.95
morpframework/morpfw
morpfw/interfaces.py
https://github.com/morpframework/morpfw/blob/803fbf29714e6f29456482f1cfbdbd4922b020b0/morpfw/interfaces.py#L168-L172
def aggregate(self, query: Optional[dict] = None, group: Optional[dict] = None, order_by: Union[None, list, tuple] = None) -> list: """return aggregation result based on specified rulez query and group""" raise NotImplementedError
[ "def", "aggregate", "(", "self", ",", "query", ":", "Optional", "[", "dict", "]", "=", "None", ",", "group", ":", "Optional", "[", "dict", "]", "=", "None", ",", "order_by", ":", "Union", "[", "None", ",", "list", ",", "tuple", "]", "=", "None", ...
return aggregation result based on specified rulez query and group
[ "return", "aggregation", "result", "based", "on", "specified", "rulez", "query", "and", "group" ]
python
train
55.6
cloudendpoints/endpoints-python
endpoints/endpoints_dispatcher.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/endpoints_dispatcher.py#L149-L172
def dispatch(self, request, start_response): """Handles dispatch to apiserver handlers. This typically ends up calling start_response and returning the entire body of the response. Args: request: An ApiRequest, the request from the user. start_response: A function with semantics defined in PEP-333. Returns: A string, the body of the response. """ # Check if this matches any of our special handlers. dispatched_response = self.dispatch_non_api_requests(request, start_response) if dispatched_response is not None: return dispatched_response # Call the service. try: return self.call_backend(request, start_response) except errors.RequestError as error: return self._handle_request_error(request, error, start_response)
[ "def", "dispatch", "(", "self", ",", "request", ",", "start_response", ")", ":", "# Check if this matches any of our special handlers.", "dispatched_response", "=", "self", ".", "dispatch_non_api_requests", "(", "request", ",", "start_response", ")", "if", "dispatched_res...
Handles dispatch to apiserver handlers. This typically ends up calling start_response and returning the entire body of the response. Args: request: An ApiRequest, the request from the user. start_response: A function with semantics defined in PEP-333. Returns: A string, the body of the response.
[ "Handles", "dispatch", "to", "apiserver", "handlers", "." ]
python
train
35.166667
deathbeds/importnb
src/importnb/remote.py
https://github.com/deathbeds/importnb/blob/ec870d1f8ab99fd5b363267f89787a3e442a779f/src/importnb/remote.py#L88-L96
def Remote(path=None, loader=Notebook, **globals): """A remote notebook finder. Place a `*` into a url to generalize the finder. It returns a context manager """ class Remote(RemoteMixin, loader): ... return Remote(path=path, **globals)
[ "def", "Remote", "(", "path", "=", "None", ",", "loader", "=", "Notebook", ",", "*", "*", "globals", ")", ":", "class", "Remote", "(", "RemoteMixin", ",", "loader", ")", ":", "...", "return", "Remote", "(", "path", "=", "path", ",", "*", "*", "glob...
A remote notebook finder. Place a `*` into a url to generalize the finder. It returns a context manager
[ "A", "remote", "notebook", "finder", ".", "Place", "a", "*", "into", "a", "url", "to", "generalize", "the", "finder", ".", "It", "returns", "a", "context", "manager" ]
python
train
28.888889
UCL-INGI/INGInious
inginious/common/task_factory.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/common/task_factory.py#L95-L120
def update_task_descriptor_content(self, courseid, taskid, content, force_extension=None): """ Update the task descriptor with the dict in content :param courseid: the course id of the course :param taskid: the task id of the task :param content: the content to put in the task file :param force_extension: If None, save it the same format. Else, save with the given extension :raise InvalidNameException, TaskNotFoundException, TaskUnreadableException """ if not id_checker(courseid): raise InvalidNameException("Course with invalid name: " + courseid) if not id_checker(taskid): raise InvalidNameException("Task with invalid name: " + taskid) if force_extension is None: path_to_descriptor, descriptor_manager = self._get_task_descriptor_info(courseid, taskid) elif force_extension in self.get_available_task_file_extensions(): path_to_descriptor = "task." + force_extension descriptor_manager = self._task_file_managers[force_extension] else: raise TaskReaderNotFoundException() try: self.get_task_fs(courseid, taskid).put(path_to_descriptor, descriptor_manager.dump(content)) except: raise TaskNotFoundException()
[ "def", "update_task_descriptor_content", "(", "self", ",", "courseid", ",", "taskid", ",", "content", ",", "force_extension", "=", "None", ")", ":", "if", "not", "id_checker", "(", "courseid", ")", ":", "raise", "InvalidNameException", "(", "\"Course with invalid ...
Update the task descriptor with the dict in content :param courseid: the course id of the course :param taskid: the task id of the task :param content: the content to put in the task file :param force_extension: If None, save it the same format. Else, save with the given extension :raise InvalidNameException, TaskNotFoundException, TaskUnreadableException
[ "Update", "the", "task", "descriptor", "with", "the", "dict", "in", "content", ":", "param", "courseid", ":", "the", "course", "id", "of", "the", "course", ":", "param", "taskid", ":", "the", "task", "id", "of", "the", "task", ":", "param", "content", ...
python
train
50.384615
saltstack/salt
salt/modules/ssh.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ssh.py#L52-L80
def _refine_enc(enc): ''' Return the properly formatted ssh value for the authorized encryption key type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string if using higher enc. If the type is not found, raise CommandExecutionError. ''' rsa = ['r', 'rsa', 'ssh-rsa'] dss = ['d', 'dsa', 'dss', 'ssh-dss'] ecdsa = ['e', 'ecdsa', 'ecdsa-sha2-nistp521', 'ecdsa-sha2-nistp384', 'ecdsa-sha2-nistp256'] ed25519 = ['ed25519', 'ssh-ed25519'] if enc in rsa: return 'ssh-rsa' elif enc in dss: return 'ssh-dss' elif enc in ecdsa: # ecdsa defaults to ecdsa-sha2-nistp256 # otherwise enc string is actual encoding string if enc in ['e', 'ecdsa']: return 'ecdsa-sha2-nistp256' return enc elif enc in ed25519: return 'ssh-ed25519' else: raise CommandExecutionError( 'Incorrect encryption key type \'{0}\'.'.format(enc) )
[ "def", "_refine_enc", "(", "enc", ")", ":", "rsa", "=", "[", "'r'", ",", "'rsa'", ",", "'ssh-rsa'", "]", "dss", "=", "[", "'d'", ",", "'dsa'", ",", "'dss'", ",", "'ssh-dss'", "]", "ecdsa", "=", "[", "'e'", ",", "'ecdsa'", ",", "'ecdsa-sha2-nistp521'"...
Return the properly formatted ssh value for the authorized encryption key type. ecdsa defaults to 256 bits, must give full ecdsa enc schema string if using higher enc. If the type is not found, raise CommandExecutionError.
[ "Return", "the", "properly", "formatted", "ssh", "value", "for", "the", "authorized", "encryption", "key", "type", ".", "ecdsa", "defaults", "to", "256", "bits", "must", "give", "full", "ecdsa", "enc", "schema", "string", "if", "using", "higher", "enc", ".",...
python
train
33.068966
EelcoHoogendoorn/Numpy_arraysetops_EP
numpy_indexed/funcs.py
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L199-L216
def rank(keys, axis=semantics.axis_default): """where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys """ index = as_index(keys, axis) return index.rank
[ "def", "rank", "(", "keys", ",", "axis", "=", "semantics", ".", "axis_default", ")", ":", "index", "=", "as_index", "(", "keys", ",", "axis", ")", "return", "index", ".", "rank" ]
where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys
[ "where", "each", "item", "is", "in", "the", "pecking", "order", "." ]
python
train
21.166667
sorgerlab/indra
indra/sources/trips/analyze_ekbs.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L20-L47
def get_args(node): """Return the arguments of a node in the event graph.""" arg_roles = {} args = node.findall('arg') + \ [node.find('arg1'), node.find('arg2'), node.find('arg3')] for arg in args: if arg is not None: id = arg.attrib.get('id') if id is not None: arg_roles[arg.attrib['role']] = (arg.attrib['id'], arg) # Now look at possible inevent links if node.find('features') is not None: inevents = node.findall('features/inevent') for inevent in inevents: if 'id' in inevent.attrib: arg_roles['inevent'] = (inevent.attrib['id'], inevent) ptms = node.findall('features/ptm') + node.findall('features/no-ptm') for ptm in ptms: if 'id' in inevent.attrib: arg_roles['ptm'] = (inevent.attrib['id'], ptm) # And also look for assoc-with links aw = node.find('assoc-with') if aw is not None: aw_id = aw.attrib['id'] arg_roles['assoc-with'] = (aw_id, aw) return arg_roles
[ "def", "get_args", "(", "node", ")", ":", "arg_roles", "=", "{", "}", "args", "=", "node", ".", "findall", "(", "'arg'", ")", "+", "[", "node", ".", "find", "(", "'arg1'", ")", ",", "node", ".", "find", "(", "'arg2'", ")", ",", "node", ".", "fi...
Return the arguments of a node in the event graph.
[ "Return", "the", "arguments", "of", "a", "node", "in", "the", "event", "graph", "." ]
python
train
37.285714
apragacz/django-rest-registration
rest_registration/api/views/register_email.py
https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register_email.py#L33-L58
def register_email(request): ''' Register new email. ''' user = request.user serializer = RegisterEmailSerializer(data=request.data) serializer.is_valid(raise_exception=True) email = serializer.validated_data['email'] template_config = ( registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES) if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED: signer = RegisterEmailSigner({ 'user_id': user.pk, 'email': email, }, request=request) send_verification_notification( user, signer, template_config, email=email) else: email_field = get_user_setting('EMAIL_FIELD') setattr(user, email_field, email) user.save() return get_ok_response('Register email link email sent')
[ "def", "register_email", "(", "request", ")", ":", "user", "=", "request", ".", "user", "serializer", "=", "RegisterEmailSerializer", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "ema...
Register new email.
[ "Register", "new", "email", "." ]
python
train
30.730769
rodricios/eatiht
eatiht/v2.py
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L113-L158
def get_html_tree(filename_url_or_filelike): """From some file path, input stream, or URL, construct and return an HTML tree. """ try: handler = ( HTTPSHandler if filename_url_or_filelike.lower().startswith('https') else HTTPHandler ) cj = CookieJar() opener = build_opener(handler) opener.add_handler(HTTPCookieProcessor(cj)) resp = opener.open(filename_url_or_filelike) except(AttributeError): content = filename_url_or_filelike.read() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html except(ValueError): content = filename_url_or_filelike encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html try: content = resp.read() finally: resp.close() encoding = chardet.detect(content)['encoding'] parsed_html = html.parse(BytesIO(content), html.HTMLParser(encoding=encoding, remove_blank_text=True)) return parsed_html
[ "def", "get_html_tree", "(", "filename_url_or_filelike", ")", ":", "try", ":", "handler", "=", "(", "HTTPSHandler", "if", "filename_url_or_filelike", ".", "lower", "(", ")", ".", "startswith", "(", "'https'", ")", "else", "HTTPHandler", ")", "cj", "=", "Cookie...
From some file path, input stream, or URL, construct and return an HTML tree.
[ "From", "some", "file", "path", "input", "stream", "or", "URL", "construct", "and", "return", "an", "HTML", "tree", "." ]
python
train
32.847826
aio-libs/aiohttp
aiohttp/streams.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/streams.py#L472-L484
def _read_nowait(self, n: int) -> bytes: """ Read not more than n bytes, or whole buffer is n == -1 """ chunks = [] while self._buffer: chunk = self._read_nowait_chunk(n) chunks.append(chunk) if n != -1: n -= len(chunk) if n == 0: break return b''.join(chunks) if chunks else b''
[ "def", "_read_nowait", "(", "self", ",", "n", ":", "int", ")", "->", "bytes", ":", "chunks", "=", "[", "]", "while", "self", ".", "_buffer", ":", "chunk", "=", "self", ".", "_read_nowait_chunk", "(", "n", ")", "chunks", ".", "append", "(", "chunk", ...
Read not more than n bytes, or whole buffer is n == -1
[ "Read", "not", "more", "than", "n", "bytes", "or", "whole", "buffer", "is", "n", "==", "-", "1" ]
python
train
29.923077
theislab/scanpy
scanpy/tools/_sim.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/tools/_sim.py#L675-L683
def coupl_model8(self): """ Variant of toggle switch. """ self.Coupl = 0.5*self.Adj_signed # reduce the value of the coupling of the repressing genes # otherwise completely unstable solutions are obtained for x in np.nditer(self.Coupl,op_flags=['readwrite']): if x < -1e-6: x[...] = -0.2
[ "def", "coupl_model8", "(", "self", ")", ":", "self", ".", "Coupl", "=", "0.5", "*", "self", ".", "Adj_signed", "# reduce the value of the coupling of the repressing genes", "# otherwise completely unstable solutions are obtained", "for", "x", "in", "np", ".", "nditer", ...
Variant of toggle switch.
[ "Variant", "of", "toggle", "switch", "." ]
python
train
39.444444
rene-aguirre/pywinusb
examples/mute_led.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/examples/mute_led.py#L13-L49
def set_mute(mute_value): "Browse for mute usages and set value" all_mutes = ( \ (0x8, 0x9), # LED page (0x1, 0xA7), # desktop page (0xb, 0x2f), ) all_target_usages = [hid.get_full_usage_id(u[0], u[1]) for u in all_mutes] # usually you'll find and open the target device, here we'll browse for the # current connected devices all_devices = hid.find_all_hid_devices() success = 0 if not all_devices: print("Can't any HID device!") else: # search for our target usage # target pageId, usageId for device in all_devices: try: device.open() # target 'to set' value could be in feature or output reports for report in device.find_output_reports() + device.find_feature_reports(): for target_usage in all_target_usages: if target_usage in report: # set our value and send report[target_usage] = value report.send() success += 1 finally: device.close() # fit to sys.exit() proper result values print("{0} Mute usage(s) set\n".format(success)) if success: return 0 return -1
[ "def", "set_mute", "(", "mute_value", ")", ":", "all_mutes", "=", "(", "(", "0x8", ",", "0x9", ")", ",", "# LED page", "(", "0x1", ",", "0xA7", ")", ",", "# desktop page", "(", "0xb", ",", "0x2f", ")", ",", ")", "all_target_usages", "=", "[", "hid", ...
Browse for mute usages and set value
[ "Browse", "for", "mute", "usages", "and", "set", "value" ]
python
train
35.351351
cloudtools/stacker
stacker/actions/base.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/actions/base.py#L100-L113
def stack_template_key_name(blueprint): """Given a blueprint, produce an appropriate key name. Args: blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint object to create the key from. Returns: string: Key name resulting from blueprint. """ name = blueprint.name return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name), name, blueprint.version)
[ "def", "stack_template_key_name", "(", "blueprint", ")", ":", "name", "=", "blueprint", ".", "name", "return", "\"stack_templates/%s/%s-%s.json\"", "%", "(", "blueprint", ".", "context", ".", "get_fqn", "(", "name", ")", ",", "name", ",", "blueprint", ".", "ve...
Given a blueprint, produce an appropriate key name. Args: blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint object to create the key from. Returns: string: Key name resulting from blueprint.
[ "Given", "a", "blueprint", "produce", "an", "appropriate", "key", "name", "." ]
python
train
36.571429
pybel/pybel
src/pybel/struct/graph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L413-L416
def _add_two_way_unqualified_edge(self, u: BaseEntity, v: BaseEntity, relation: str) -> str: """Add an unqualified edge both ways.""" self.add_unqualified_edge(v, u, relation) return self.add_unqualified_edge(u, v, relation)
[ "def", "_add_two_way_unqualified_edge", "(", "self", ",", "u", ":", "BaseEntity", ",", "v", ":", "BaseEntity", ",", "relation", ":", "str", ")", "->", "str", ":", "self", ".", "add_unqualified_edge", "(", "v", ",", "u", ",", "relation", ")", "return", "s...
Add an unqualified edge both ways.
[ "Add", "an", "unqualified", "edge", "both", "ways", "." ]
python
train
61.25
prthkms/alex
alex/handler.py
https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L43-L50
def make_executable(query): """make_executable(query) -- give executable permissions to a given file """ filename = support.get_file_name(query) if(os.path.isfile(filename)): os.system('chmod +x '+filename) else: print 'file not found'
[ "def", "make_executable", "(", "query", ")", ":", "filename", "=", "support", ".", "get_file_name", "(", "query", ")", "if", "(", "os", ".", "path", ".", "isfile", "(", "filename", ")", ")", ":", "os", ".", "system", "(", "'chmod +x '", "+", "filename"...
make_executable(query) -- give executable permissions to a given file
[ "make_executable", "(", "query", ")", "--", "give", "executable", "permissions", "to", "a", "given", "file" ]
python
train
29.625
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L291-L316
def version(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Return the version of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.version ''' query = 'SELECT setting FROM pg_catalog.pg_settings ' \ 'WHERE name = \'server_version\'' cmd = _psql_cmd('-c', query, '-t', host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) ret = _run_psql( cmd, runas=runas, password=password, host=host, port=port, user=user) for line in salt.utils.itertools.split(ret['stdout'], '\n'): # Just return the first line return line
[ "def", "version", "(", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "query", "=", "'SELECT setting FROM pg_catalog.pg_setting...
Return the version of a Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.version
[ "Return", "the", "version", "of", "a", "Postgres", "server", "." ]
python
train
30.384615
albahnsen/CostSensitiveClassification
costcla/models/cost_tree.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/models/cost_tree.py#L654-L676
def pruning(self, X, y, cost_mat): """ Function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. """ self.tree_.tree_pruned = copy.deepcopy(self.tree_.tree) if self.tree_.n_nodes > 0: self._pruning(X, y, cost_mat) nodes_pruned = self._nodes(self.tree_.tree_pruned) self.tree_.n_nodes_pruned = len(nodes_pruned)
[ "def", "pruning", "(", "self", ",", "X", ",", "y", ",", "cost_mat", ")", ":", "self", ".", "tree_", ".", "tree_pruned", "=", "copy", ".", "deepcopy", "(", "self", ".", "tree_", ".", "tree", ")", "if", "self", ".", "tree_", ".", "n_nodes", ">", "0...
Function that prune the decision tree. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y_true : array indicator matrix Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example.
[ "Function", "that", "prune", "the", "decision", "tree", "." ]
python
train
35.478261
reingart/pyafipws
wsremcarne.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wsremcarne.py#L143-L148
def __analizar_evento(self, ret): "Comprueba y extrae el wvento informativo si existen en la respuesta XML" evt = ret.get('evento') if evt: self.Eventos = [evt] self.Evento = "%(codigo)s: %(descripcion)s" % evt
[ "def", "__analizar_evento", "(", "self", ",", "ret", ")", ":", "evt", "=", "ret", ".", "get", "(", "'evento'", ")", "if", "evt", ":", "self", ".", "Eventos", "=", "[", "evt", "]", "self", ".", "Evento", "=", "\"%(codigo)s: %(descripcion)s\"", "%", "evt...
Comprueba y extrae el wvento informativo si existen en la respuesta XML
[ "Comprueba", "y", "extrae", "el", "wvento", "informativo", "si", "existen", "en", "la", "respuesta", "XML" ]
python
train
42.166667
draios/python-sdc-client
sdcclient/_monitor.py
https://github.com/draios/python-sdc-client/blob/47f83415842048778939b90944f64386a3bcb205/sdcclient/_monitor.py#L811-L866
def convert_scope_string_to_expression(scope): '''**Description** Internal function to convert a filter string to a filter object to be used with dashboards. ''' # # NOTE: The supported grammar is not perfectly aligned with the grammar supported by the Sysdig backend. # Proper grammar implementation will happen soon. # For practical purposes, the parsing will have equivalent results. # if scope is None or not scope: return [True, []] expressions = [] string_expressions = scope.strip(' \t\n\r').split(' and ') expression_re = re.compile('^(?P<not>not )?(?P<operand>[^ ]+) (?P<operator>=|!=|in|contains|starts with) (?P<value>(:?"[^"]+"|\'[^\']+\'|\(.+\)|.+))$') for string_expression in string_expressions: matches = expression_re.match(string_expression) if matches is None: return [False, 'invalid scope format'] is_not_operator = matches.group('not') is not None if matches.group('operator') == 'in': list_value = matches.group('value').strip(' ()') value_matches = re.findall('(:?\'[^\',]+\')|(:?"[^",]+")|(:?[,]+)', list_value) if len(value_matches) == 0: return [False, 'invalid scope value list format'] value_matches = map(lambda v: v[0] if v[0] else v[1], value_matches) values = map(lambda v: v.strip(' "\''), value_matches) else: values = [matches.group('value').strip('"\'')] operator_parse_dict = { 'in': 'in' if not is_not_operator else 'notIn', '=': 'equals' if not is_not_operator else 'notEquals', '!=': 'notEquals' if not is_not_operator else 'equals', 'contains': 'contains' if not is_not_operator else 'notContains', 'starts with': 'startsWith' } operator = operator_parse_dict.get(matches.group('operator'), None) if operator is None: return [False, 'invalid scope operator'] expressions.append({ 'operand': matches.group('operand'), 'operator': operator, 'value': values }) return [True, expressions]
[ "def", "convert_scope_string_to_expression", "(", "scope", ")", ":", "#", "# NOTE: The supported grammar is not perfectly aligned with the grammar supported by the Sysdig backend.", "# Proper grammar implementation will happen soon.", "# For practical purposes, the parsing will have equivalent res...
**Description** Internal function to convert a filter string to a filter object to be used with dashboards.
[ "**", "Description", "**", "Internal", "function", "to", "convert", "a", "filter", "string", "to", "a", "filter", "object", "to", "be", "used", "with", "dashboards", "." ]
python
test
41.410714
saltstack/salt
salt/states/pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L840-L884
def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed
[ "def", "_verify_install", "(", "desired", ",", "new_pkgs", ",", "ignore_epoch", "=", "False", ",", "new_caps", "=", "None", ")", ":", "ok", "=", "[", "]", "failed", "=", "[", "]", "if", "not", "new_caps", ":", "new_caps", "=", "dict", "(", ")", "for"...
Determine whether or not the installed packages match what was requested in the SLS file.
[ "Determine", "whether", "or", "not", "the", "installed", "packages", "match", "what", "was", "requested", "in", "the", "SLS", "file", "." ]
python
train
38.555556
ReadabilityHoldings/python-readability-api
readability/clients.py
https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L350-L360
def post(self, url, post_params=None): """ Make an HTTP POST request to the Parser API. :param url: url to which to make the request :param post_params: POST data to send along. Expected to be a dict. """ post_params['token'] = self.token params = urlencode(post_params) logger.debug('Making POST request to %s with body %s', url, params) return requests.post(url, data=params)
[ "def", "post", "(", "self", ",", "url", ",", "post_params", "=", "None", ")", ":", "post_params", "[", "'token'", "]", "=", "self", ".", "token", "params", "=", "urlencode", "(", "post_params", ")", "logger", ".", "debug", "(", "'Making POST request to %s ...
Make an HTTP POST request to the Parser API. :param url: url to which to make the request :param post_params: POST data to send along. Expected to be a dict.
[ "Make", "an", "HTTP", "POST", "request", "to", "the", "Parser", "API", "." ]
python
train
40
tensorflow/tensor2tensor
tensor2tensor/envs/rendered_env_problem.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/rendered_env_problem.py#L66-L85
def example_reading_spec(self): """Return a mix of env and video data fields and decoders.""" video_fields, video_decoders = ( video_utils.VideoProblem.example_reading_spec(self)) env_fields, env_decoders = env_problem.EnvProblem.example_reading_spec(self) # Remove raw observations field since we want to capture them as videos. env_fields.pop(env_problem.OBSERVATION_FIELD) env_decoders.pop(env_problem.OBSERVATION_FIELD) # Add frame number spec and decoder. env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64) env_decoders[ _FRAME_NUMBER_FIELD] = tf.contrib.slim.tfexample_decoder.Tensor( _FRAME_NUMBER_FIELD) # Add video fields and decoders env_fields.update(video_fields) env_decoders.update(video_decoders) return env_fields, env_decoders
[ "def", "example_reading_spec", "(", "self", ")", ":", "video_fields", ",", "video_decoders", "=", "(", "video_utils", ".", "VideoProblem", ".", "example_reading_spec", "(", "self", ")", ")", "env_fields", ",", "env_decoders", "=", "env_problem", ".", "EnvProblem",...
Return a mix of env and video data fields and decoders.
[ "Return", "a", "mix", "of", "env", "and", "video", "data", "fields", "and", "decoders", "." ]
python
train
41.25
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/module.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L952-L1005
def get_module_by_name(self, modName): """ @type modName: int @param modName: Name of the module to look for, as returned by L{Module.get_name}. If two or more modules with the same name are loaded, only one of the matching modules is returned. You can also pass a full pathname to the DLL file. This works correctly even if two modules with the same name are loaded from different paths. @rtype: L{Module} @return: C{Module} object that best matches the given name. Returns C{None} if no C{Module} can be found. """ # Convert modName to lowercase. # This helps make case insensitive string comparisons. modName = modName.lower() # modName is an absolute pathname. if PathOperations.path_is_absolute(modName): for lib in self.iter_modules(): if modName == lib.get_filename().lower(): return lib return None # Stop trying to match the name. # Get all the module names. # This prevents having to iterate through the module list # more than once. modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ] modDict = dict(modDict) # modName is a base filename. if modName in modDict: return modDict[modName] # modName is a base filename without extension. filepart, extpart = PathOperations.split_extension(modName) if filepart and extpart: if filepart in modDict: return modDict[filepart] # modName is a base address. try: baseAddress = HexInput.integer(modName) except ValueError: return None if self.has_module(baseAddress): return self.get_module(baseAddress) # Module not found. return None
[ "def", "get_module_by_name", "(", "self", ",", "modName", ")", ":", "# Convert modName to lowercase.", "# This helps make case insensitive string comparisons.", "modName", "=", "modName", ".", "lower", "(", ")", "# modName is an absolute pathname.", "if", "PathOperations", "....
@type modName: int @param modName: Name of the module to look for, as returned by L{Module.get_name}. If two or more modules with the same name are loaded, only one of the matching modules is returned. You can also pass a full pathname to the DLL file. This works correctly even if two modules with the same name are loaded from different paths. @rtype: L{Module} @return: C{Module} object that best matches the given name. Returns C{None} if no C{Module} can be found.
[ "@type", "modName", ":", "int", "@param", "modName", ":", "Name", "of", "the", "module", "to", "look", "for", "as", "returned", "by", "L", "{", "Module", ".", "get_name", "}", ".", "If", "two", "or", "more", "modules", "with", "the", "same", "name", ...
python
train
35.185185
MacHu-GWU/angora-project
angora/crawler/simplecrawler.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/simplecrawler.py#L194-L219
def html(self, url, timeout=None): """High level method to get http request response in text. smartly handle the encoding problem. """ response = self.get_response(url, timeout=timeout) if response: domain = self.get_domain(url) if domain in self.domain_encoding_map: # domain have been visited try: # apply extreme decoding html = self.decoder.decode(response.content, self.domain_encoding_map[domain])[0] return html except Exception as e: print(e) return None else: # never visit this domain try: html, encoding = self.decoder.autodecode(response.content) # save chardet analysis result self.domain_encoding_map[domain] = encoding return html except Exception as e: print(e) return None else: return None
[ "def", "html", "(", "self", ",", "url", ",", "timeout", "=", "None", ")", ":", "response", "=", "self", ".", "get_response", "(", "url", ",", "timeout", "=", "timeout", ")", "if", "response", ":", "domain", "=", "self", ".", "get_domain", "(", "url",...
High level method to get http request response in text. smartly handle the encoding problem.
[ "High", "level", "method", "to", "get", "http", "request", "response", "in", "text", ".", "smartly", "handle", "the", "encoding", "problem", "." ]
python
train
42
gem/oq-engine
openquake/hazardlib/probability_map.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/probability_map.py#L187-L191
def array(self): """ The underlying array of shape (N, L, I) """ return numpy.array([self[sid].array for sid in sorted(self)])
[ "def", "array", "(", "self", ")", ":", "return", "numpy", ".", "array", "(", "[", "self", "[", "sid", "]", ".", "array", "for", "sid", "in", "sorted", "(", "self", ")", "]", ")" ]
The underlying array of shape (N, L, I)
[ "The", "underlying", "array", "of", "shape", "(", "N", "L", "I", ")" ]
python
train
30.8
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L558-L595
def create_bird_config_files(bird_configuration): """Create bird configuration files per IP version. Creates bird configuration files if they don't exist. It also creates the directories where we store the history of changes, if this functionality is enabled. Arguments: bird_configuration (dict): A dictionary with settings for bird. Returns: None Raises: ValueError if we can't create bird configuration files and the directory to store the history of changes in bird configuration file. """ for ip_version in bird_configuration: # This creates the file if it doesn't exist. config_file = bird_configuration[ip_version]['config_file'] try: touch(config_file) except OSError as exc: raise ValueError("failed to create {f}:{e}" .format(f=config_file, e=exc)) if bird_configuration[ip_version]['keep_changes']: history_dir = os.path.join(os.path.dirname(config_file), 'history') try: os.mkdir(history_dir) except FileExistsError: pass except OSError as exc: raise ValueError("failed to make directory {d} for keeping a " "history of changes for {b}:{e}" .format(d=history_dir, b=config_file, e=exc)) else: print("{d} is created".format(d=history_dir))
[ "def", "create_bird_config_files", "(", "bird_configuration", ")", ":", "for", "ip_version", "in", "bird_configuration", ":", "# This creates the file if it doesn't exist.", "config_file", "=", "bird_configuration", "[", "ip_version", "]", "[", "'config_file'", "]", "try", ...
Create bird configuration files per IP version. Creates bird configuration files if they don't exist. It also creates the directories where we store the history of changes, if this functionality is enabled. Arguments: bird_configuration (dict): A dictionary with settings for bird. Returns: None Raises: ValueError if we can't create bird configuration files and the directory to store the history of changes in bird configuration file.
[ "Create", "bird", "configuration", "files", "per", "IP", "version", "." ]
python
train
38.631579
rsinger86/drf-flex-fields
rest_flex_fields/serializers.py
https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L50-L75
def _make_expanded_field_serializer( self, name, nested_expand, nested_fields, nested_omit ): """ Returns an instance of the dynamically created nested serializer. """ field_options = self.expandable_fields[name] serializer_class = field_options[0] serializer_settings = copy.deepcopy(field_options[1]) if name in nested_expand: serializer_settings["expand"] = nested_expand[name] if name in nested_fields: serializer_settings["fields"] = nested_fields[name] if name in nested_omit: serializer_settings["omit"] = nested_omit[name] if serializer_settings.get("source") == name: del serializer_settings["source"] if type(serializer_class) == str: serializer_class = self._import_serializer_class(serializer_class) return serializer_class(**serializer_settings)
[ "def", "_make_expanded_field_serializer", "(", "self", ",", "name", ",", "nested_expand", ",", "nested_fields", ",", "nested_omit", ")", ":", "field_options", "=", "self", ".", "expandable_fields", "[", "name", "]", "serializer_class", "=", "field_options", "[", "...
Returns an instance of the dynamically created nested serializer.
[ "Returns", "an", "instance", "of", "the", "dynamically", "created", "nested", "serializer", "." ]
python
train
34.884615
gamechanger/dusty
dusty/commands/run.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/commands/run.py#L113-L131
def restart_apps_or_services(app_or_service_names=None): """Restart any containers associated with Dusty, or associated with the provided app_or_service_names.""" if app_or_service_names: log_to_client("Restarting the following apps or services: {}".format(', '.join(app_or_service_names))) else: log_to_client("Restarting all active containers associated with Dusty") if app_or_service_names: specs = spec_assembler.get_assembled_specs() specs_list = [specs['apps'][app_name] for app_name in app_or_service_names if app_name in specs['apps']] repos = set() for spec in specs_list: if spec['repo']: repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec)) nfs.update_nfs_with_repos(repos) else: nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False)) compose.restart_running_services(app_or_service_names)
[ "def", "restart_apps_or_services", "(", "app_or_service_names", "=", "None", ")", ":", "if", "app_or_service_names", ":", "log_to_client", "(", "\"Restarting the following apps or services: {}\"", ".", "format", "(", "', '", ".", "join", "(", "app_or_service_names", ")", ...
Restart any containers associated with Dusty, or associated with the provided app_or_service_names.
[ "Restart", "any", "containers", "associated", "with", "Dusty", "or", "associated", "with", "the", "provided", "app_or_service_names", "." ]
python
valid
51.157895
innogames/polysh
polysh/console.py
https://github.com/innogames/polysh/blob/fbea36f3bc9f47a62d72040c48dad1776124dae3/polysh/console.py#L40-L56
def console_output(msg, logging_msg=None): """Use instead of print, to clear the status information before printing""" assert isinstance(msg, bytes) assert isinstance(logging_msg, bytes) or logging_msg is None from polysh import remote_dispatcher remote_dispatcher.log(logging_msg or msg) if remote_dispatcher.options.interactive: from polysh.stdin import the_stdin_thread the_stdin_thread.no_raw_input() global last_status_length if last_status_length: safe_write('\r{}\r'.format( last_status_length * ' ').encode()) last_status_length = 0 safe_write(msg)
[ "def", "console_output", "(", "msg", ",", "logging_msg", "=", "None", ")", ":", "assert", "isinstance", "(", "msg", ",", "bytes", ")", "assert", "isinstance", "(", "logging_msg", ",", "bytes", ")", "or", "logging_msg", "is", "None", "from", "polysh", "impo...
Use instead of print, to clear the status information before printing
[ "Use", "instead", "of", "print", "to", "clear", "the", "status", "information", "before", "printing" ]
python
train
37.764706
eternnoir/pyTelegramBotAPI
telebot/__init__.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L674-L688
def send_document(self, chat_id, data, reply_to_message_id=None, caption=None, reply_markup=None, parse_mode=None, disable_notification=None, timeout=None): """ Use this method to send general files. :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :param parse_mode: :param disable_notification: :return: API reply. """ return types.Message.de_json( apihelper.send_data(self.token, chat_id, data, 'document', reply_to_message_id, reply_markup, parse_mode, disable_notification, timeout, caption=caption))
[ "def", "send_document", "(", "self", ",", "chat_id", ",", "data", ",", "reply_to_message_id", "=", "None", ",", "caption", "=", "None", ",", "reply_markup", "=", "None", ",", "parse_mode", "=", "None", ",", "disable_notification", "=", "None", ",", "timeout"...
Use this method to send general files. :param chat_id: :param data: :param reply_to_message_id: :param reply_markup: :param parse_mode: :param disable_notification: :return: API reply.
[ "Use", "this", "method", "to", "send", "general", "files", ".", ":", "param", "chat_id", ":", ":", "param", "data", ":", ":", "param", "reply_to_message_id", ":", ":", "param", "reply_markup", ":", ":", "param", "parse_mode", ":", ":", "param", "disable_no...
python
train
44.933333
MartinThoma/hwrt
hwrt/datasets/crohme_eval.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/datasets/crohme_eval.py#L21-L41
def evaluate_dir(sample_dir): """Evaluate all recordings in `sample_dir`. Parameters ---------- sample_dir : string The path to a directory with *.inkml files. Returns ------- list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability' """ results = [] if sample_dir[-1] == "/": sample_dir = sample_dir[:-1] for filename in glob.glob("%s/*.inkml" % sample_dir): results.append(evaluate_inkml(filename)) return results
[ "def", "evaluate_dir", "(", "sample_dir", ")", ":", "results", "=", "[", "]", "if", "sample_dir", "[", "-", "1", "]", "==", "\"/\"", ":", "sample_dir", "=", "sample_dir", "[", ":", "-", "1", "]", "for", "filename", "in", "glob", ".", "glob", "(", "...
Evaluate all recordings in `sample_dir`. Parameters ---------- sample_dir : string The path to a directory with *.inkml files. Returns ------- list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability'
[ "Evaluate", "all", "recordings", "in", "sample_dir", "." ]
python
train
29.761905
RJT1990/pyflux
pyflux/gas/gasllt.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasllt.py#L389-L448
def _sim_prediction(self, theta, theta_t, Y, scores, h, t_params, simulations): """ Simulates a h-step ahead mean prediction Parameters ---------- theta : np.array The past predicted values theta_t : np.array The past local linear trend Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform Returns ---------- Matrix of simulations """ model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params) sim_vector = np.zeros([simulations,h]) for n in range(0,simulations): Y_exp = Y.copy() theta_exp = theta.copy() theta_t_exp = theta_t.copy() scores_exp = scores.copy() #(TODO: vectorize the inner construction here) for t in range(0,h): new_value1 = theta_t_exp[-1] + theta_exp[-1] + t_params[0]*scores_exp[-1] new_value2 = theta_t_exp[-1] + t_params[1]*scores_exp[-1] if self.model_name2 == "Exponential": rnd_value = self.family.draw_variable(1.0/self.link(new_value1),model_scale,model_shape,model_skewness,1)[0] else: rnd_value = self.family.draw_variable(self.link(new_value1),model_scale,model_shape,model_skewness,1)[0] Y_exp = np.append(Y_exp,[rnd_value]) theta_exp = np.append(theta_exp,[new_value1]) # For indexing consistency theta_t_exp = np.append(theta_t_exp,[new_value2]) scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero sim_vector[n] = Y_exp[-h:] return np.transpose(sim_vector)
[ "def", "_sim_prediction", "(", "self", ",", "theta", ",", "theta_t", ",", "Y", ",", "scores", ",", "h", ",", "t_params", ",", "simulations", ")", ":", "model_scale", ",", "model_shape", ",", "model_skewness", "=", "self", ".", "_get_scale_and_shape", "(", ...
Simulates a h-step ahead mean prediction Parameters ---------- theta : np.array The past predicted values theta_t : np.array The past local linear trend Y : np.array The past data scores : np.array The past scores h : int How many steps ahead for the prediction t_params : np.array A vector of (transformed) latent variables simulations : int How many simulations to perform Returns ---------- Matrix of simulations
[ "Simulates", "a", "h", "-", "step", "ahead", "mean", "prediction" ]
python
train
33.083333
pinterest/pymemcache
pymemcache/client/base.py
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L258-L261
def check_key(self, key): """Checks key and add key_prefix.""" return _check_key(key, allow_unicode_keys=self.allow_unicode_keys, key_prefix=self.key_prefix)
[ "def", "check_key", "(", "self", ",", "key", ")", ":", "return", "_check_key", "(", "key", ",", "allow_unicode_keys", "=", "self", ".", "allow_unicode_keys", ",", "key_prefix", "=", "self", ".", "key_prefix", ")" ]
Checks key and add key_prefix.
[ "Checks", "key", "and", "add", "key_prefix", "." ]
python
train
49
rodluger/everest
everest/detrender.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/detrender.py#L375-L397
def cv_compute(self, b, A, B, C, mK, f, m1, m2): ''' Compute the model (cross-validation step only) for chunk :py:obj:`b`. ''' A = np.sum([l * a for l, a in zip(self.lam[b], A) if l is not None], axis=0) B = np.sum([l * b for l, b in zip(self.lam[b], B) if l is not None], axis=0) W = np.linalg.solve(mK + A + C, f) if self.transit_model is None: model = np.dot(B, W) else: w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W) for n, l in enumerate(self.lam[b]) if l is not None]) model = np.dot(np.hstack( [self.X(n, m1) for n, l in enumerate(self.lam[b]) if l is not None]), w_pld) model -= np.nanmedian(model) return model
[ "def", "cv_compute", "(", "self", ",", "b", ",", "A", ",", "B", ",", "C", ",", "mK", ",", "f", ",", "m1", ",", "m2", ")", ":", "A", "=", "np", ".", "sum", "(", "[", "l", "*", "a", "for", "l", ",", "a", "in", "zip", "(", "self", ".", "...
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
[ "Compute", "the", "model", "(", "cross", "-", "validation", "step", "only", ")", "for", "chunk", ":", "py", ":", "obj", ":", "b", "." ]
python
train
37.782609
apache/spark
python/pyspark/sql/functions.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L314-L329
def approx_count_distinct(col, rsd=None): """Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] """ sc = SparkContext._active_spark_context if rsd is None: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col)) else: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd) return Column(jc)
[ "def", "approx_count_distinct", "(", "col", ",", "rsd", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "rsd", "is", "None", ":", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "approx_count_distinct", "(", "_to_...
Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`. :param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`countDistinct` >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)]
[ "Aggregate", "function", ":", "returns", "a", "new", ":", "class", ":", "Column", "for", "approximate", "distinct", "count", "of", "column", "col", "." ]
python
train
40.1875
deschler/django-modeltranslation
modeltranslation/widgets.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/widgets.py#L53-L58
def media(self): """ Combines media of both components and adds a small script that unchecks the clear box, when a value in any wrapped input is modified. """ return self.widget.media + self.checkbox.media + Media(self.Media)
[ "def", "media", "(", "self", ")", ":", "return", "self", ".", "widget", ".", "media", "+", "self", ".", "checkbox", ".", "media", "+", "Media", "(", "self", ".", "Media", ")" ]
Combines media of both components and adds a small script that unchecks the clear box, when a value in any wrapped input is modified.
[ "Combines", "media", "of", "both", "components", "and", "adds", "a", "small", "script", "that", "unchecks", "the", "clear", "box", "when", "a", "value", "in", "any", "wrapped", "input", "is", "modified", "." ]
python
train
43.333333
BeyondTheClouds/enoslib
docs/tutorials/using-tasks/step2.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/docs/tutorials/using-tasks/step2.py#L32-L40
def up(force=True, env=None, **kwargs): "Starts a new experiment" inventory = os.path.join(os.getcwd(), "hosts") conf = Configuration.from_dictionnary(provider_conf) provider = Enos_vagrant(conf) roles, networks = provider.init() check_networks(roles, networks) env["roles"] = roles env["networks"] = networks
[ "def", "up", "(", "force", "=", "True", ",", "env", "=", "None", ",", "*", "*", "kwargs", ")", ":", "inventory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\"hosts\"", ")", "conf", "=", "Configuration", ".", ...
Starts a new experiment
[ "Starts", "a", "new", "experiment" ]
python
train
37
Clinical-Genomics/scout
scout/adapter/mongo/index.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/index.py#L74-L79
def drop_indexes(self): """Delete all indexes for the database""" LOG.warning("Dropping all indexe") for collection_name in INDEXES: LOG.warning("Dropping all indexes for collection name %s", collection_name) self.db[collection_name].drop_indexes()
[ "def", "drop_indexes", "(", "self", ")", ":", "LOG", ".", "warning", "(", "\"Dropping all indexe\"", ")", "for", "collection_name", "in", "INDEXES", ":", "LOG", ".", "warning", "(", "\"Dropping all indexes for collection name %s\"", ",", "collection_name", ")", "sel...
Delete all indexes for the database
[ "Delete", "all", "indexes", "for", "the", "database" ]
python
test
48.5
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1022-L1046
def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values """ values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs)
[ "def", "to_native_types", "(", "self", ",", "slicer", "=", "None", ",", "*", "*", "kwargs", ")", ":", "values", "=", "self", "if", "slicer", "is", "not", "None", ":", "values", "=", "values", "[", "slicer", "]", "return", "values", ".", "_format_native...
Format specified values of `self` and return them. Parameters ---------- slicer : int, array-like An indexer into `self` that specifies which values are used in the formatting process. kwargs : dict Options for specifying how the values should be formatted. These options include the following: 1) na_rep : str The value that serves as a placeholder for NULL values 2) quoting : bool or None Whether or not there are quoted values in `self` 3) date_format : str The format used to represent date-like values
[ "Format", "specified", "values", "of", "self", "and", "return", "them", "." ]
python
train
34.96
lemieuxl/pyGenClean
pyGenClean/DupSNPs/duplicated_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/DupSNPs/duplicated_snps.py#L933-L1063
def computeStatistics(tped, tfam, snps): """Computes the completion and concordance of each SNPs. :param tped: a representation of the ``tped``. :param tfam: a representation of the ``tfam`` :param snps: the position of the duplicated markers in the ``tped``. :type tped: numpy.array :type tfam: list :type snps: dict :returns: a tuple containing the completion of duplicated markers (:py:class:`numpy.array`) as first element, and the concordance (:py:class:`dict`) of duplicated markers, as last element. A marker's completion is compute using this formula (where :math:`G_i` is the set of genotypes for the marker :math:`i`): .. math:: Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||} {||G_i||} The pairwise concordance between duplicated markers is compute as follow (where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers :math:`i` and :math:`j`, respectively): .. math:: Concordance_{i,j} = \\frac{ ||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0|| }{ ||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0|| } Hence, we only computes the numerators and denominators of the completion and concordance, for future reference. .. note:: When the genotypes are not comparable, the function tries to flip one of the genotype to see if it becomes comparable. """ # The completion data type completion = np.array([[0 for i in xrange(len(tped))], [0 for i in xrange(len(tped))]]) # The concordance data type concordance = {} for snpID in snps.keys(): nbDup = len(snps[snpID]) concordance[snpID] = [ np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)), np.asmatrix(np.zeros((nbDup, nbDup), dtype=int)) ] # The women and the no sex menIndex = np.where(tfam[:, 4] == "1") womenIndex = np.where(tfam[:, 4] == "2") noSexIndex = np.where(tfam[:, 4] == "0") for snpID, indexes in snps.iteritems(): nbDup = len(indexes) currGenotypes = tped[indexes, 4:] chromosome, position = snpID # if chromosome == "24": # # Remove the heterozygous men # menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex) # # Remove the women and the no sex # currGenotypes = np.delete(currGenotypes, # np.hstack((womenIndex, noSexIndex, # menToRemove)), 1) # elif chromosome == "23": # # Remove the heterozygous men # menToRemove = getIndexOfHeteroMen(currGenotypes, menIndex) # # Remove the no sex # currGenotypes = np.delete(currGenotypes, # np.hstack((noSexIndex, menToRemove)), # 1) for i in xrange(nbDup): # Compute completion here completion[0][indexes[i]] = len( np.where(currGenotypes[i] != "0 0")[0] ) completion[1][indexes[i]] = len(currGenotypes[i]) for j in xrange(i+1, nbDup): # Compute concordance here # Removing samples with at least one null genotype nullGenotypeIndexes = np.where( np.any(currGenotypes[[i, j]] == "0 0", 0) ) subGenotypes = np.delete( currGenotypes, nullGenotypeIndexes, 1, ) # Finding the errors in the subseted genotypes errorIndexes = np.where(subGenotypes[i] != subGenotypes[j])[0] nbDiff = len(errorIndexes) for k in errorIndexes: # Getting the genotypes genotype1 = set(subGenotypes[i, k].split(" ")) genotype2 = set(subGenotypes[j, k].split(" ")) # Checking for flips if len(genotype1) == len(genotype2): # Both have the same number of different alleles, # so they might be flipped genotype2 = flipGenotype(genotype2) if genotype1 == genotype2: # The genotypes are equivalent after the flip nbDiff -= 1 # Updating the concordance nbTot = len(subGenotypes[i]) concordance[snpID][0][i, j] = nbTot - nbDiff concordance[snpID][0][j, i] = nbTot - nbDiff if nbTot == 0: # We will have a division by 0... nbTot = 1 concordance[snpID][1][i, j] = nbTot concordance[snpID][1][j, i] = nbTot for snpID in concordance.iterkeys(): for i in range(len(concordance[snpID][0])): concordance[snpID][0][i, i] = 1 concordance[snpID][1][i, i] = 1 return completion, concordance
[ "def", "computeStatistics", "(", "tped", ",", "tfam", ",", "snps", ")", ":", "# The completion data type", "completion", "=", "np", ".", "array", "(", "[", "[", "0", "for", "i", "in", "xrange", "(", "len", "(", "tped", ")", ")", "]", ",", "[", "0", ...
Computes the completion and concordance of each SNPs. :param tped: a representation of the ``tped``. :param tfam: a representation of the ``tfam`` :param snps: the position of the duplicated markers in the ``tped``. :type tped: numpy.array :type tfam: list :type snps: dict :returns: a tuple containing the completion of duplicated markers (:py:class:`numpy.array`) as first element, and the concordance (:py:class:`dict`) of duplicated markers, as last element. A marker's completion is compute using this formula (where :math:`G_i` is the set of genotypes for the marker :math:`i`): .. math:: Completion_i = \\frac{||g \\in G_i \\textrm{ where } g \\neq 0||} {||G_i||} The pairwise concordance between duplicated markers is compute as follow (where :math:`G_i` and :math:`G_j` are the sets of genotypes for markers :math:`i` and :math:`j`, respectively): .. math:: Concordance_{i,j} = \\frac{ ||g \\in G_i \\cup G_j \\textrm{ where } g_i = g_j \\neq 0|| }{ ||g \\in G_i \\cup G_j \\textrm{ where } g \\neq 0|| } Hence, we only computes the numerators and denominators of the completion and concordance, for future reference. .. note:: When the genotypes are not comparable, the function tries to flip one of the genotype to see if it becomes comparable.
[ "Computes", "the", "completion", "and", "concordance", "of", "each", "SNPs", "." ]
python
train
38.839695
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muc.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muc.py#L494-L504
def send_message(self,body): """ Send a message to the room. :Parameters: - `body`: the message body. :Types: - `body`: `unicode` """ m=Message(to_jid=self.room_jid.bare(),stanza_type="groupchat",body=body) self.manager.stream.send(m)
[ "def", "send_message", "(", "self", ",", "body", ")", ":", "m", "=", "Message", "(", "to_jid", "=", "self", ".", "room_jid", ".", "bare", "(", ")", ",", "stanza_type", "=", "\"groupchat\"", ",", "body", "=", "body", ")", "self", ".", "manager", ".", ...
Send a message to the room. :Parameters: - `body`: the message body. :Types: - `body`: `unicode`
[ "Send", "a", "message", "to", "the", "room", "." ]
python
valid
27.727273
DataBiosphere/toil
src/toil/wdl/wdl_analysis.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/wdl/wdl_analysis.py#L883-L927
def parse_declaration_expressn_fncall(self, name, params, es): """ Parses out cromwell's built-in function calls. Some of these are special and need minor adjustments, for example length(), which is equivalent to python's len() function. Or sub, which is equivalent to re.sub(), but needs a rearrangement of input variables. Known to be supported: sub, size, read_tsv, length, select_first. :param name: :param params: :param es: :return: """ # name of the function if isinstance(name, wdl_parser.Terminal): if name.str: # use python's built-in for length() if name.source_string == 'length': es = es + 'len(' elif name.source_string == 'stdout': return es + 'stdout' else: es = es + name.source_string + '(' else: raise NotImplementedError elif isinstance(name, wdl_parser.Ast): raise NotImplementedError elif isinstance(name, wdl_parser.AstList): raise NotImplementedError # use python's re.sub() for sub() if name.source_string == 'sub': es_params = self.parse_declaration_expressn_fncall_SUBparams(params) else: es_params = self.parse_declaration_expressn_fncall_normalparams(params) if name.source_string == 'glob': return es + es_params + ', tempDir)' elif name.source_string == 'size': return es + es_params + ', fileStore=fileStore)' else: return es + es_params + ')'
[ "def", "parse_declaration_expressn_fncall", "(", "self", ",", "name", ",", "params", ",", "es", ")", ":", "# name of the function", "if", "isinstance", "(", "name", ",", "wdl_parser", ".", "Terminal", ")", ":", "if", "name", ".", "str", ":", "# use python's bu...
Parses out cromwell's built-in function calls. Some of these are special and need minor adjustments, for example length(), which is equivalent to python's len() function. Or sub, which is equivalent to re.sub(), but needs a rearrangement of input variables. Known to be supported: sub, size, read_tsv, length, select_first. :param name: :param params: :param es: :return:
[ "Parses", "out", "cromwell", "s", "built", "-", "in", "function", "calls", "." ]
python
train
36.911111
d0c-s4vage/pfp
pfp/interp.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L281-L293
def push(self, new_scope=None): """Create a new scope :returns: TODO """ if new_scope is None: new_scope = { "types": {}, "vars": {} } self._curr_scope = new_scope self._dlog("pushing new scope, scope level = {}".format(self.level())) self._scope_stack.append(self._curr_scope)
[ "def", "push", "(", "self", ",", "new_scope", "=", "None", ")", ":", "if", "new_scope", "is", "None", ":", "new_scope", "=", "{", "\"types\"", ":", "{", "}", ",", "\"vars\"", ":", "{", "}", "}", "self", ".", "_curr_scope", "=", "new_scope", "self", ...
Create a new scope :returns: TODO
[ "Create", "a", "new", "scope", ":", "returns", ":", "TODO" ]
python
train
29.076923
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L292-L300
def loglike(self): ''' The summed log-probability of all stochastic variables that depend on self.stochastics, with self.stochastics removed. ''' sum = logp_of_set(self.children) if self.verbose > 2: print_('\t' + self._id + ' Current log-likelihood ', sum) return sum
[ "def", "loglike", "(", "self", ")", ":", "sum", "=", "logp_of_set", "(", "self", ".", "children", ")", "if", "self", ".", "verbose", ">", "2", ":", "print_", "(", "'\\t'", "+", "self", ".", "_id", "+", "' Current log-likelihood '", ",", "sum", ")", "...
The summed log-probability of all stochastic variables that depend on self.stochastics, with self.stochastics removed.
[ "The", "summed", "log", "-", "probability", "of", "all", "stochastic", "variables", "that", "depend", "on", "self", ".", "stochastics", "with", "self", ".", "stochastics", "removed", "." ]
python
train
36.444444
yunojuno/elasticsearch-django
elasticsearch_django/apps.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/apps.py#L112-L137
def _update_search_index(*, instance, index, update_fields): """Process index / update search index update actions.""" if not _in_search_queryset(instance=instance, index=index): logger.debug( "Object (%r) is not in search queryset, ignoring update.", instance ) return try: if update_fields: pre_update.send( sender=instance.__class__, instance=instance, index=index, update_fields=update_fields, ) if settings.auto_sync(instance): instance.update_search_document( index=index, update_fields=update_fields ) else: pre_index.send(sender=instance.__class__, instance=instance, index=index) if settings.auto_sync(instance): instance.index_search_document(index=index) except Exception: logger.exception("Error handling 'post_save' signal for %s", instance)
[ "def", "_update_search_index", "(", "*", ",", "instance", ",", "index", ",", "update_fields", ")", ":", "if", "not", "_in_search_queryset", "(", "instance", "=", "instance", ",", "index", "=", "index", ")", ":", "logger", ".", "debug", "(", "\"Object (%r) is...
Process index / update search index update actions.
[ "Process", "index", "/", "update", "search", "index", "update", "actions", "." ]
python
train
38.384615
havardgulldahl/jottalib
src/jottalib/JFS.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L1103-L1180
def up(self, path, fileobject, upload_callback=None, resume_offset=None): "Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API" """ *** WHAT DID I DO?: created file *** POST https://up.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testFolder/testFile.txt?cphash=d41d8cd98f00b204e9800998ecf8427e HTTP/1.1 User-Agent: Desktop_Jottacloud 3.0.22.203 Windows_8 6.2.9200 x86_64 Authorization: Basic ****************** X-JottaAPIVersion: 2.2 X-Jfs-DeviceName: **CENSORED** JCreated: 2014-10-26T12:33:09Z+00:00 JModified: 2014-10-26T12:33:09Z+00:00 JMd5: d41d8cd98f00b204e9800998ecf8427e JSize: 0 jx_csid: dOq1NCRer6uxuR/bFxihasj4QzBU3Tn7S2jVF1CE71YW1fGhxPFYYsw2T0XYjnJBtxKQzhWixmg+u5kp8bJtvMpIFHbhSDmPPSk+PVBf2UdFhXxli4YEII9a97eO4XBfn5QWAV1LJ2Z9l59jmnLkJQgfOyexkuQbxHdSLgQPXu8= jx_lisence: M1v3p31oQf2OXvyAn2GvfS2I2oiMXrw+cofuMVHHI/2K+wlxhj22VkON6fN6fJMsGNcMzvcFYfmKPgL0Yf8TCO5A/6ULk6N8LctY3+fPegx+Jgbyc4hh0IXwnOdqa+UZ6Lg1ub4VXr5XnX3P3IxeVDg0VbcJnzv4TbFA+oMXmfM= Content-Type: application/octet-stream Content-Length: 0 Connection: Keep-Alive Accept-Encoding: gzip Accept-Language: nb-NO,en,* Host: up.jottacloud.com """ url = path.replace('www.jottacloud.com', 'up.jottacloud.com') # Calculate file length fileobject.seek(0,2) contentlen = fileobject.tell() # Rewind read head to correct offset # If we're resuming an incomplete upload, continue from that offset try: fileobject.seek(resume_offset) except TypeError as e: if resume_offset is None: fileobject.seek(0) except IOError as e: log.exception(e) log.warning('Could not seek to file offset %r, re-starting upload of %r from 0', resume_offset, url) fileobject.seek(0) # Calculate file md5 hash md5hash = calculate_md5(fileobject) log.debug('posting content (len %s, hash %s) to url %r', contentlen, md5hash, url) try: mtime = os.path.getmtime(fileobject.name) timestamp = datetime.datetime.fromtimestamp(mtime).isoformat() except Exception as e: if hasattr(fileobject, 'name'): log.exception('Problems getting mtime from fileobjet: %r', e) timestamp = datetime.datetime.now().isoformat() params = {'cphash': md5hash} m = requests_toolbelt.MultipartEncoder({ 'md5': ('', md5hash), 'modified': ('', timestamp), 'created': ('', timestamp), 'file': (os.path.basename(url), fileobject, 'application/octet-stream'), }) headers = {'JMd5':md5hash, 'JCreated': timestamp, 'JModified': timestamp, 'X-Jfs-DeviceName': 'Jotta', 'JSize': str(contentlen), # headers have to be strings or bytes , cf #122 'jx_csid': '', 'jx_lisence': '', 'content-type': m.content_type, } fileobject.seek(0) # rewind read index for requests.post files = {'md5': ('', md5hash), 'modified': ('', timestamp), 'created': ('', timestamp), 'file': (os.path.basename(url), fileobject, 'application/octet-stream')} return self.post(url, None, files=files, params=params, extra_headers=headers, upload_callback=upload_callback)
[ "def", "up", "(", "self", ",", "path", ",", "fileobject", ",", "upload_callback", "=", "None", ",", "resume_offset", "=", "None", ")", ":", "\"\"\"\n\n *** WHAT DID I DO?: created file\n ***\n\n POST https://up.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testF...
Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API
[ "Upload", "a", "fileobject", "to", "path", "HTTP", "POST", "-", "ing", "to", "up", ".", "jottacloud", ".", "com", "using", "the", "JottaCloud", "API" ]
python
train
46
tensorflow/datasets
tensorflow_datasets/image/cifar.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/image/cifar.py#L110-L127
def _generate_examples(self, filepaths): """Generate CIFAR examples as dicts. Shared across CIFAR-{10, 100}. Uses self._cifar_info as configuration. Args: filepaths (list[str]): The files to use to generate the data. Yields: The cifar examples, as defined in the dataset info features. """ label_keys = self._cifar_info.label_keys for path in filepaths: for labels, np_image in _load_data(path, len(label_keys)): row = dict(zip(label_keys, labels)) row["image"] = np_image yield row
[ "def", "_generate_examples", "(", "self", ",", "filepaths", ")", ":", "label_keys", "=", "self", ".", "_cifar_info", ".", "label_keys", "for", "path", "in", "filepaths", ":", "for", "labels", ",", "np_image", "in", "_load_data", "(", "path", ",", "len", "(...
Generate CIFAR examples as dicts. Shared across CIFAR-{10, 100}. Uses self._cifar_info as configuration. Args: filepaths (list[str]): The files to use to generate the data. Yields: The cifar examples, as defined in the dataset info features.
[ "Generate", "CIFAR", "examples", "as", "dicts", "." ]
python
train
30.111111
pgmpy/pgmpy
pgmpy/readwrite/BIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/BIF.py#L69-L85
def get_variable_grammar(self): """ A method that returns variable grammar """ # Defining a expression for valid word word_expr = Word(alphanums + '_' + '-') word_expr2 = Word(initChars=printables, excludeChars=['{', '}', ',', ' ']) name_expr = Suppress('variable') + word_expr + Suppress('{') state_expr = ZeroOrMore(word_expr2 + Optional(Suppress(","))) # Defining a variable state expression variable_state_expr = Suppress('type') + Suppress(word_expr) + Suppress('[') + Suppress(Word(nums)) + \ Suppress(']') + Suppress('{') + Group(state_expr) + Suppress('}') + Suppress(';') # variable states is of the form type description [args] { val1, val2 }; (comma may or may not be present) property_expr = Suppress('property') + CharsNotIn(';') + Suppress(';') # Creating a expr to find property return name_expr, variable_state_expr, property_expr
[ "def", "get_variable_grammar", "(", "self", ")", ":", "# Defining a expression for valid word", "word_expr", "=", "Word", "(", "alphanums", "+", "'_'", "+", "'-'", ")", "word_expr2", "=", "Word", "(", "initChars", "=", "printables", ",", "excludeChars", "=", "["...
A method that returns variable grammar
[ "A", "method", "that", "returns", "variable", "grammar" ]
python
train
55.882353
jtambasco/modesolverpy
modesolverpy/structure_base.py
https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L142-L151
def y(self): ''' np.array: The grid points in y. ''' if None not in (self.y_min, self.y_max, self.y_step) and \ self.y_min != self.y_max: y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step) else: y = np.array([]) return y
[ "def", "y", "(", "self", ")", ":", "if", "None", "not", "in", "(", "self", ".", "y_min", ",", "self", ".", "y_max", ",", "self", ".", "y_step", ")", "and", "self", ".", "y_min", "!=", "self", ".", "y_max", ":", "y", "=", "np", ".", "arange", ...
np.array: The grid points in y.
[ "np", ".", "array", ":", "The", "grid", "points", "in", "y", "." ]
python
train
31.5
scanny/python-pptx
pptx/oxml/shapes/autoshape.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/shapes/autoshape.py#L89-L97
def add_moveTo(self, x, y): """Return a newly created `a:moveTo` subtree with point *(x, y)*. The new `a:moveTo` element is appended to this `a:path` element. """ moveTo = self._add_moveTo() pt = moveTo._add_pt() pt.x, pt.y = x, y return moveTo
[ "def", "add_moveTo", "(", "self", ",", "x", ",", "y", ")", ":", "moveTo", "=", "self", ".", "_add_moveTo", "(", ")", "pt", "=", "moveTo", ".", "_add_pt", "(", ")", "pt", ".", "x", ",", "pt", ".", "y", "=", "x", ",", "y", "return", "moveTo" ]
Return a newly created `a:moveTo` subtree with point *(x, y)*. The new `a:moveTo` element is appended to this `a:path` element.
[ "Return", "a", "newly", "created", "a", ":", "moveTo", "subtree", "with", "point", "*", "(", "x", "y", ")", "*", "." ]
python
train
32.555556
rakanalh/pocket-api
pocket/__init__.py
https://github.com/rakanalh/pocket-api/blob/d8222dd34e3aa5e545f9b8ba407fa277c734ab82/pocket/__init__.py#L61-L76
def bulk_add(self, item_id, ref_id=None, tags=None, time=None, title=None, url=None): """ Add an item to list See: https://getpocket.com/developer/docs/v3/modify :param item_id: int :param ref_id: tweet_id :param tags: list of tags :param time: time of action :param title: given title :param url: item url :return: self for chaining :rtype: Pocket """ self._add_action('add') return self
[ "def", "bulk_add", "(", "self", ",", "item_id", ",", "ref_id", "=", "None", ",", "tags", "=", "None", ",", "time", "=", "None", ",", "title", "=", "None", ",", "url", "=", "None", ")", ":", "self", ".", "_add_action", "(", "'add'", ")", "return", ...
Add an item to list See: https://getpocket.com/developer/docs/v3/modify :param item_id: int :param ref_id: tweet_id :param tags: list of tags :param time: time of action :param title: given title :param url: item url :return: self for chaining :rtype: Pocket
[ "Add", "an", "item", "to", "list", "See", ":", "https", ":", "//", "getpocket", ".", "com", "/", "developer", "/", "docs", "/", "v3", "/", "modify", ":", "param", "item_id", ":", "int", ":", "param", "ref_id", ":", "tweet_id", ":", "param", "tags", ...
python
train
31.375
ccubed/Shosetsu
Shosetsu/Parsing.py
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L16-L35
async def parse_release_results(soup): """ Parse Releases search pages. :param soup: The BS4 class object :return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel. It contains a Date released, Platform, Ages group and Name. """ soup = list(soup.find_all('table', class_='stripe')[0].children)[1:] releases = [] for item in soup: child = list(item.children) temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None} temp_rel['date'] = child[0].string temp_rel['ages'] = child[1].string temp_rel['platform'] = child[2].abbr.get('title') temp_rel['name'] = child[3].a.string releases.append(temp_rel) del temp_rel return releases
[ "async", "def", "parse_release_results", "(", "soup", ")", ":", "soup", "=", "list", "(", "soup", ".", "find_all", "(", "'table'", ",", "class_", "=", "'stripe'", ")", "[", "0", "]", ".", "children", ")", "[", "1", ":", "]", "releases", "=", "[", "...
Parse Releases search pages. :param soup: The BS4 class object :return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel. It contains a Date released, Platform, Ages group and Name.
[ "Parse", "Releases", "search", "pages", "." ]
python
test
39.65
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L41-L54
def delete_agent_cloud(self, agent_cloud_id): """DeleteAgentCloud. [Preview API] :param int agent_cloud_id: :rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>` """ route_values = {} if agent_cloud_id is not None: route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int') response = self._send(http_method='DELETE', location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9', version='5.1-preview.1', route_values=route_values) return self._deserialize('TaskAgentCloud', response)
[ "def", "delete_agent_cloud", "(", "self", ",", "agent_cloud_id", ")", ":", "route_values", "=", "{", "}", "if", "agent_cloud_id", "is", "not", "None", ":", "route_values", "[", "'agentCloudId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'agent_c...
DeleteAgentCloud. [Preview API] :param int agent_cloud_id: :rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
[ "DeleteAgentCloud", ".", "[", "Preview", "API", "]", ":", "param", "int", "agent_cloud_id", ":", ":", "rtype", ":", ":", "class", ":", "<TaskAgentCloud", ">", "<azure", ".", "devops", ".", "v5_1", ".", "task", "-", "agent", ".", "models", ".", "TaskAgent...
python
train
50.071429
spyder-ide/spyder
spyder/plugins/editor/panels/scrollflag.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/scrollflag.py#L185-L188
def get_scrollbar_value_height(self): """Return the value span height of the scrollbar""" vsb = self.editor.verticalScrollBar() return vsb.maximum()-vsb.minimum()+vsb.pageStep()
[ "def", "get_scrollbar_value_height", "(", "self", ")", ":", "vsb", "=", "self", ".", "editor", ".", "verticalScrollBar", "(", ")", "return", "vsb", ".", "maximum", "(", ")", "-", "vsb", ".", "minimum", "(", ")", "+", "vsb", ".", "pageStep", "(", ")" ]
Return the value span height of the scrollbar
[ "Return", "the", "value", "span", "height", "of", "the", "scrollbar" ]
python
train
49.5
dagster-io/dagster
python_modules/libraries/dagster-gcp/dagster_gcp/types.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-gcp/dagster_gcp/types.py#L86-L93
def _is_valid_dataset(config_value): '''Datasets must be of form "project.dataset" or "dataset" ''' return re.match( # regex matches: project.table -- OR -- table r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$', config_value, )
[ "def", "_is_valid_dataset", "(", "config_value", ")", ":", "return", "re", ".", "match", "(", "# regex matches: project.table -- OR -- table", "r'^'", "+", "RE_PROJECT", "+", "r'\\.'", "+", "RE_DS_TABLE", "+", "r'$|^'", "+", "RE_DS_TABLE", "+", "r'$'", ",", "confi...
Datasets must be of form "project.dataset" or "dataset"
[ "Datasets", "must", "be", "of", "form", "project", ".", "dataset", "or", "dataset" ]
python
test
35.25
openstack/networking-cisco
networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/9148d96f9b39_rename_tenantid_to_projectid.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/9148d96f9b39_rename_tenantid_to_projectid.py#L35-L47
def get_inspector(): """Reuse inspector""" global _INSPECTOR if _INSPECTOR: return _INSPECTOR else: bind = op.get_bind() _INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind) return _INSPECTOR
[ "def", "get_inspector", "(", ")", ":", "global", "_INSPECTOR", "if", "_INSPECTOR", ":", "return", "_INSPECTOR", "else", ":", "bind", "=", "op", ".", "get_bind", "(", ")", "_INSPECTOR", "=", "sa", ".", "engine", ".", "reflection", ".", "Inspector", ".", "...
Reuse inspector
[ "Reuse", "inspector" ]
python
train
18.153846
carsonyl/pypac
pypac/os_settings.py
https://github.com/carsonyl/pypac/blob/9e14a9e84a1ec5513a4fa819573073942fed0980/pypac/os_settings.py#L53-L76
def autoconfig_url_from_preferences(): """ Get the PAC ``AutoConfigURL`` value from the macOS System Preferences. This setting is visible as the "URL" field in System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotDarwinError: If called on a non-macOS/OSX platform. """ if not ON_DARWIN: raise NotDarwinError() try: config = SystemConfiguration.SCDynamicStoreCopyProxies(None) except AttributeError: return # Key or value not found. if all(('ProxyAutoConfigEnable' in config, 'ProxyAutoConfigURLString' in config, not config.get('ProxyAutoDiscoveryEnable', 0))): # Only return a value if it is enabled, not empty, and auto discovery is disabled. return str(config['ProxyAutoConfigURLString'])
[ "def", "autoconfig_url_from_preferences", "(", ")", ":", "if", "not", "ON_DARWIN", ":", "raise", "NotDarwinError", "(", ")", "try", ":", "config", "=", "SystemConfiguration", ".", "SCDynamicStoreCopyProxies", "(", "None", ")", "except", "AttributeError", ":", "ret...
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences. This setting is visible as the "URL" field in System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotDarwinError: If called on a non-macOS/OSX platform.
[ "Get", "the", "PAC", "AutoConfigURL", "value", "from", "the", "macOS", "System", "Preferences", ".", "This", "setting", "is", "visible", "as", "the", "URL", "field", "in", "System", "Preferences", ">", "Network", ">", "Advanced", "...", ">", "Proxies", ">", ...
python
train
42.75
google/flatbuffers
python/flatbuffers/table.py
https://github.com/google/flatbuffers/blob/6cc30b3272d79c85db7d4871ac0aa69541dc89de/python/flatbuffers/table.py#L66-L75
def Vector(self, off): """Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.""" N.enforce_number(off, N.UOffsetTFlags) off += self.Pos x = off + self.Get(N.UOffsetTFlags, off) # data starts after metadata containing the vector length x += N.UOffsetTFlags.bytewidth return x
[ "def", "Vector", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "off", "+=", "self", ".", "Pos", "x", "=", "off", "+", "self", ".", "Get", "(", "N", ".", "UOffsetTFlags", ",", "off"...
Vector retrieves the start of data of the vector whose offset is stored at "off" in this object.
[ "Vector", "retrieves", "the", "start", "of", "data", "of", "the", "vector", "whose", "offset", "is", "stored", "at", "off", "in", "this", "object", "." ]
python
train
37.8
biolink/ontobio
ontobio/io/parsereport.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/parsereport.py#L26-L39
def message(self, message: Message, rule: Optional[int]) -> None: """ Add a message to the appropriate list of messages. If `rule` refers to a valid id range for a go rule, the message is entered in a list keyed by the full gorule-{id}. Otherwise, if `rule` is None, or outside the id range, then we put this in the catch-all "other" keyed list of messages. """ rule_id = self._rule_id(rule) if rule_id not in self.messages: self.messages[rule_id] = [] if len(self.messages[rule_id]) < self._rule_message_cap: self.messages[rule_id].append(message)
[ "def", "message", "(", "self", ",", "message", ":", "Message", ",", "rule", ":", "Optional", "[", "int", "]", ")", "->", "None", ":", "rule_id", "=", "self", ".", "_rule_id", "(", "rule", ")", "if", "rule_id", "not", "in", "self", ".", "messages", ...
Add a message to the appropriate list of messages. If `rule` refers to a valid id range for a go rule, the message is entered in a list keyed by the full gorule-{id}. Otherwise, if `rule` is None, or outside the id range, then we put this in the catch-all "other" keyed list of messages.
[ "Add", "a", "message", "to", "the", "appropriate", "list", "of", "messages", ".", "If", "rule", "refers", "to", "a", "valid", "id", "range", "for", "a", "go", "rule", "the", "message", "is", "entered", "in", "a", "list", "keyed", "by", "the", "full", ...
python
train
45.714286
aleju/imgaug
imgaug/external/poly_point_isect_py2py3.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/external/poly_point_isect_py2py3.py#L1042-L1053
def iter_items(self, start_key=None, end_key=None, reverse=False): """Iterates over the (key, value) items of the associated tree, in ascending order if reverse is True, iterate in descending order, reverse defaults to False""" # optimized iterator (reduced method calls) - faster on CPython but slower on pypy if self.is_empty(): return [] if reverse: return self._iter_items_backward(start_key, end_key) else: return self._iter_items_forward(start_key, end_key)
[ "def", "iter_items", "(", "self", ",", "start_key", "=", "None", ",", "end_key", "=", "None", ",", "reverse", "=", "False", ")", ":", "# optimized iterator (reduced method calls) - faster on CPython but slower on pypy", "if", "self", ".", "is_empty", "(", ")", ":", ...
Iterates over the (key, value) items of the associated tree, in ascending order if reverse is True, iterate in descending order, reverse defaults to False
[ "Iterates", "over", "the", "(", "key", "value", ")", "items", "of", "the", "associated", "tree", "in", "ascending", "order", "if", "reverse", "is", "True", "iterate", "in", "descending", "order", "reverse", "defaults", "to", "False" ]
python
valid
45.5
quantopian/zipline
zipline/lib/labelarray.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L303-L312
def as_int_array(self): """ Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data. """ return self.view( type=ndarray, dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), )
[ "def", "as_int_array", "(", "self", ")", ":", "return", "self", ".", "view", "(", "type", "=", "ndarray", ",", "dtype", "=", "unsigned_int_dtype_with_size_in_bytes", "(", "self", ".", "itemsize", ")", ",", ")" ]
Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data.
[ "Convert", "self", "into", "a", "regular", "ndarray", "of", "ints", "." ]
python
train
29.9
simon-anders/htseq
python2/HTSeq/__init__.py
https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python2/HTSeq/__init__.py#L144-L175
def parse_GFF_attribute_string(attrStr, extra_return_first_value=False): """Parses a GFF attribute string and returns it as a dictionary. If 'extra_return_first_value' is set, a pair is returned: the dictionary and the value of the first attribute. This might be useful if this is the ID. """ if attrStr.endswith("\n"): attrStr = attrStr[:-1] d = {} first_val = "_unnamed_" for (i, attr) in itertools.izip( itertools.count(), _HTSeq.quotesafe_split(attrStr)): if _re_attr_empty.match(attr): continue if attr.count('"') not in (0, 2): raise ValueError( "The attribute string seems to contain mismatched quotes.") mo = _re_attr_main.match(attr) if not mo: raise ValueError("Failure parsing GFF attribute line") val = mo.group(2) if val.startswith('"') and val.endswith('"'): val = val[1:-1] d[intern(mo.group(1))] = intern(val) if extra_return_first_value and i == 0: first_val = val if extra_return_first_value: return (d, first_val) else: return d
[ "def", "parse_GFF_attribute_string", "(", "attrStr", ",", "extra_return_first_value", "=", "False", ")", ":", "if", "attrStr", ".", "endswith", "(", "\"\\n\"", ")", ":", "attrStr", "=", "attrStr", "[", ":", "-", "1", "]", "d", "=", "{", "}", "first_val", ...
Parses a GFF attribute string and returns it as a dictionary. If 'extra_return_first_value' is set, a pair is returned: the dictionary and the value of the first attribute. This might be useful if this is the ID.
[ "Parses", "a", "GFF", "attribute", "string", "and", "returns", "it", "as", "a", "dictionary", "." ]
python
train
35.90625
ewels/MultiQC
multiqc/modules/hicpro/hicpro.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/hicpro/hicpro.py#L301-L320
def hicpro_pairing_chart (self): """ Generate Pairing chart """ # Specify the order of the different possible categories keys = OrderedDict() keys['Unique_paired_alignments'] = { 'color': '#005ce6', 'name': 'Uniquely Aligned' } keys['Low_qual_pairs'] = { 'color': '#b97b35', 'name': 'Low Quality' } keys['Pairs_with_singleton'] = { 'color': '#ff9933', 'name': 'Singleton' } keys['Multiple_pairs_alignments'] = { 'color': '#e67300', 'name': 'Multi Aligned' } keys['Unmapped_airs'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' } # Config for the plot config = { 'id': 'hicpro_pairing_stats_plot', 'title': 'HiC-Pro: Pairing Statistics', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } return bargraph.plot(self.hicpro_data, keys, config)
[ "def", "hicpro_pairing_chart", "(", "self", ")", ":", "# Specify the order of the different possible categories", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'Unique_paired_alignments'", "]", "=", "{", "'color'", ":", "'#005ce6'", ",", "'name'", ":", "'Uniquel...
Generate Pairing chart
[ "Generate", "Pairing", "chart" ]
python
train
44.4
knipknap/exscript
Exscript/protocols/telnetlib.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/telnetlib.py#L379-L391
def read_very_lazy(self): """Return any data available in the cooked queue (very lazy). Raise EOFError if connection closed and no data available. Return '' if no cooked data available otherwise. Don't block. """ buf = self.cookedq.getvalue() self.cookedq.seek(0) self.cookedq.truncate() if not buf and self.eof and not self.rawq: raise EOFError('telnet connection closed') return buf
[ "def", "read_very_lazy", "(", "self", ")", ":", "buf", "=", "self", ".", "cookedq", ".", "getvalue", "(", ")", "self", ".", "cookedq", ".", "seek", "(", "0", ")", "self", ".", "cookedq", ".", "truncate", "(", ")", "if", "not", "buf", "and", "self",...
Return any data available in the cooked queue (very lazy). Raise EOFError if connection closed and no data available. Return '' if no cooked data available otherwise. Don't block.
[ "Return", "any", "data", "available", "in", "the", "cooked", "queue", "(", "very", "lazy", ")", "." ]
python
train
35.307692
gwastro/pycbc
pycbc/waveform/ringdown.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L202-L218
def lm_ffinal(freqs, damping_times, modes): """Return the maximum f_final of the modes given, with f_final the frequency at which the amplitude falls to 1/1000 of the peak amplitude """ f_max = {} for lmn in modes: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): f_max['%d%d%d' %(l,m,n)] = qnm_freq_decay(freqs['%d%d%d' %(l,m,n)], damping_times['%d%d%d' %(l,m,n)], 1./1000) f_final = max(f_max.values()) if f_final > max_freq: f_final = max_freq return f_final
[ "def", "lm_ffinal", "(", "freqs", ",", "damping_times", ",", "modes", ")", ":", "f_max", "=", "{", "}", "for", "lmn", "in", "modes", ":", "l", ",", "m", ",", "nmodes", "=", "int", "(", "lmn", "[", "0", "]", ")", ",", "int", "(", "lmn", "[", "...
Return the maximum f_final of the modes given, with f_final the frequency at which the amplitude falls to 1/1000 of the peak amplitude
[ "Return", "the", "maximum", "f_final", "of", "the", "modes", "given", "with", "f_final", "the", "frequency", "at", "which", "the", "amplitude", "falls", "to", "1", "/", "1000", "of", "the", "peak", "amplitude" ]
python
train
34.235294
ansible/molecule
molecule/provisioner/lint/ansible_lint.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/provisioner/lint/ansible_lint.py#L64-L86
def bake(self): """ Bake an `ansible-lint` command so it's ready to execute and returns None. :return: None """ options = self.options default_exclude_list = options.pop('default_exclude') options_exclude_list = options.pop('exclude') excludes = default_exclude_list + options_exclude_list x_list = options.pop('x') exclude_args = ['--exclude={}'.format(exclude) for exclude in excludes] x_args = tuple(('-x', x) for x in x_list) self._ansible_lint_command = sh.ansible_lint.bake( options, exclude_args, sum(x_args, ()), self._playbook, _env=self.env, _out=LOG.out, _err=LOG.error)
[ "def", "bake", "(", "self", ")", ":", "options", "=", "self", ".", "options", "default_exclude_list", "=", "options", ".", "pop", "(", "'default_exclude'", ")", "options_exclude_list", "=", "options", ".", "pop", "(", "'exclude'", ")", "excludes", "=", "defa...
Bake an `ansible-lint` command so it's ready to execute and returns None. :return: None
[ "Bake", "an", "ansible", "-", "lint", "command", "so", "it", "s", "ready", "to", "execute", "and", "returns", "None", "." ]
python
train
32.565217
tisimst/mcerp
mcerp/umath.py
https://github.com/tisimst/mcerp/blob/2bb8260c9ad2d58a806847f1b627b6451e407de1/mcerp/umath.py#L161-L169
def fabs(x): """ Absolute value function """ if isinstance(x, UncertainFunction): mcpts = np.fabs(x._mcpts) return UncertainFunction(mcpts) else: return np.fabs(x)
[ "def", "fabs", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "UncertainFunction", ")", ":", "mcpts", "=", "np", ".", "fabs", "(", "x", ".", "_mcpts", ")", "return", "UncertainFunction", "(", "mcpts", ")", "else", ":", "return", "np", ".", ...
Absolute value function
[ "Absolute", "value", "function" ]
python
train
22.111111
h2oai/h2o-3
h2o-py/h2o/model/regression.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/regression.py#L69-L78
def h2o_median_absolute_error(y_actual, y_predicted): """ Median absolute error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :returns: median absolute error loss (best is 0.0) """ ModelBase._check_targets(y_actual, y_predicted) return (y_predicted - y_actual).abs().median()
[ "def", "h2o_median_absolute_error", "(", "y_actual", ",", "y_predicted", ")", ":", "ModelBase", ".", "_check_targets", "(", "y_actual", ",", "y_predicted", ")", "return", "(", "y_predicted", "-", "y_actual", ")", ".", "abs", "(", ")", ".", "median", "(", ")"...
Median absolute error regression loss :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :returns: median absolute error loss (best is 0.0)
[ "Median", "absolute", "error", "regression", "loss" ]
python
test
36.7
tomasbasham/dominos
dominos/api.py
https://github.com/tomasbasham/dominos/blob/59729a8bdca0ae30a84115a0e93e9b1f259faf0e/dominos/api.py#L224-L236
def process_payment(self): ''' Proceed with payment using the payment method selected earlier. :return: A response having processes the payment. :rtype: requests.Response ''' params = { '__RequestVerificationToken': self.session.cookies, 'method': 'submit' } return self.__post('/PaymentOptions/Proceed', json=params)
[ "def", "process_payment", "(", "self", ")", ":", "params", "=", "{", "'__RequestVerificationToken'", ":", "self", ".", "session", ".", "cookies", ",", "'method'", ":", "'submit'", "}", "return", "self", ".", "__post", "(", "'/PaymentOptions/Proceed'", ",", "js...
Proceed with payment using the payment method selected earlier. :return: A response having processes the payment. :rtype: requests.Response
[ "Proceed", "with", "payment", "using", "the", "payment", "method", "selected", "earlier", "." ]
python
test
30.384615
chimera0/accel-brain-code
Generative-Adversarial-Networks/pygan/generativemodel/autoencodermodel/encoder_decoder_model.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Generative-Adversarial-Networks/pygan/generativemodel/autoencodermodel/encoder_decoder_model.py#L68-L78
def draw(self): ''' Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.noise_sampler.generate() _ = self.__encoder_decoder_controller.encoder.inference(observed_arr) arr = self.__encoder_decoder_controller.encoder.get_feature_points() return arr
[ "def", "draw", "(", "self", ")", ":", "observed_arr", "=", "self", ".", "noise_sampler", ".", "generate", "(", ")", "_", "=", "self", ".", "__encoder_decoder_controller", ".", "encoder", ".", "inference", "(", "observed_arr", ")", "arr", "=", "self", ".", ...
Draws samples from the `fake` distribution. Returns: `np.ndarray` of samples.
[ "Draws", "samples", "from", "the", "fake", "distribution", ".", "Returns", ":", "np", ".", "ndarray", "of", "samples", "." ]
python
train
33.909091
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1887-L1902
def set_default_unit_all(self, twig=None, unit=None, **kwargs): """ TODO: add documentation """ if twig is not None and unit is None: # then try to support value as the first argument if no matches with twigs if isinstance(unit, u.Unit) or not isinstance(twig, str): unit = twig twig = None elif not len(self.filter(twig=twig, check_default=check_default, **kwargs)): unit = twig twig = None for param in self.filter(twig=twig, **kwargs).to_list(): param.set_default_unit(unit)
[ "def", "set_default_unit_all", "(", "self", ",", "twig", "=", "None", ",", "unit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "twig", "is", "not", "None", "and", "unit", "is", "None", ":", "# then try to support value as the first argument if no mat...
TODO: add documentation
[ "TODO", ":", "add", "documentation" ]
python
train
38.5
keras-rl/keras-rl
rl/callbacks.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/callbacks.py#L81-L89
def on_step_end(self, step, logs={}): """ Called at end of each step for each callback in callbackList""" for callback in self.callbacks: # Check if callback supports the more appropriate `on_step_end` callback. # If not, fall back to `on_batch_end` to be compatible with built-in Keras callbacks. if callable(getattr(callback, 'on_step_end', None)): callback.on_step_end(step, logs=logs) else: callback.on_batch_end(step, logs=logs)
[ "def", "on_step_end", "(", "self", ",", "step", ",", "logs", "=", "{", "}", ")", ":", "for", "callback", "in", "self", ".", "callbacks", ":", "# Check if callback supports the more appropriate `on_step_end` callback.", "# If not, fall back to `on_batch_end` to be compatible...
Called at end of each step for each callback in callbackList
[ "Called", "at", "end", "of", "each", "step", "for", "each", "callback", "in", "callbackList" ]
python
train
57.888889
mlperf/training
rnn_translator/pytorch/seq2seq/inference/inference.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/inference/inference.py#L87-L145
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None, summary=False, reference_path=None): """ Runs translation on test dataset. :param calc_bleu: if True compares results with reference and computes BLEU score :param epoch: index of the current epoch :param iteration: index of the current iteration :param eval_path: path to the file for saving results :param summary: if True prints summary :param reference_path: path to the file with reference translation """ if self.cuda: test_bleu = torch.cuda.FloatTensor([0]) break_training = torch.cuda.LongTensor([0]) else: test_bleu = torch.FloatTensor([0]) break_training = torch.LongTensor([0]) if eval_path is None: eval_path = self.build_eval_path(epoch, iteration) detok_eval_path = eval_path + '.detok' with contextlib.suppress(FileNotFoundError): os.remove(eval_path) os.remove(detok_eval_path) rank = get_rank() logging.info(f'Running evaluation on test set') self.model.eval() torch.cuda.empty_cache() output = self.evaluate(epoch, iteration, summary) output = output[:len(self.loader.dataset)] output = self.loader.dataset.unsort(output) if rank == 0: with open(eval_path, 'a') as eval_file: eval_file.writelines(output) if calc_bleu: self.run_detokenizer(eval_path) test_bleu[0] = self.run_sacrebleu(detok_eval_path, reference_path) if summary: logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}') if self.target_bleu and test_bleu[0] >= self.target_bleu: logging.info(f'Target accuracy reached') break_training[0] = 1 barrier() torch.cuda.empty_cache() logging.info(f'Finished evaluation on test set') if self.distributed: dist.broadcast(break_training, 0) dist.broadcast(test_bleu, 0) return test_bleu[0].item(), break_training[0].item()
[ "def", "run", "(", "self", ",", "calc_bleu", "=", "True", ",", "epoch", "=", "None", ",", "iteration", "=", "None", ",", "eval_path", "=", "None", ",", "summary", "=", "False", ",", "reference_path", "=", "None", ")", ":", "if", "self", ".", "cuda", ...
Runs translation on test dataset. :param calc_bleu: if True compares results with reference and computes BLEU score :param epoch: index of the current epoch :param iteration: index of the current iteration :param eval_path: path to the file for saving results :param summary: if True prints summary :param reference_path: path to the file with reference translation
[ "Runs", "translation", "on", "test", "dataset", "." ]
python
train
37.033898