repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
adrianliaw/PyCuber
pycuber/solver/cfop/f2l.py
https://github.com/adrianliaw/PyCuber/blob/e44b5ba48c831b964ce73d046fb813222771853f/pycuber/solver/cfop/f2l.py#L135-L147
def combining_successors(state, last_action=()): """ Successors function for finding path of combining F2L pair. """ ((corner, edge), (L, U, F, D, R, B)) = state U_turns = [Formula("U"), Formula("U'"), Formula("U2")] if len(last_action) != 1 else [] R_turns = [Formula("R U R'"), Formula("R U' R'"), Formula("R U2 R'")] if "R" not in last_action else [] F_turns = [Formula("F' U F"), Formula("F' U' F"), Formula("F' U2 F")] if "F" not in last_action else [] for act in (U_turns + R_turns + F_turns): new = (corner, edge) for q in act: new = F2LPairSolver._rotate(new, q) yield act, (new, (L, U, F, D, R, B))
[ "def", "combining_successors", "(", "state", ",", "last_action", "=", "(", ")", ")", ":", "(", "(", "corner", ",", "edge", ")", ",", "(", "L", ",", "U", ",", "F", ",", "D", ",", "R", ",", "B", ")", ")", "=", "state", "U_turns", "=", "[", "For...
Successors function for finding path of combining F2L pair.
[ "Successors", "function", "for", "finding", "path", "of", "combining", "F2L", "pair", "." ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L264-L269
def morphTo(self, region): """ Change shape of this region to match the given ``Region`` object """ if not region or not isinstance(region, Region): raise TypeError("morphTo expected a Region object") self.setROI(region) return self
[ "def", "morphTo", "(", "self", ",", "region", ")", ":", "if", "not", "region", "or", "not", "isinstance", "(", "region", ",", "Region", ")", ":", "raise", "TypeError", "(", "\"morphTo expected a Region object\"", ")", "self", ".", "setROI", "(", "region", ...
Change shape of this region to match the given ``Region`` object
[ "Change", "shape", "of", "this", "region", "to", "match", "the", "given", "Region", "object" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8349-L8359
def serial_udb_extra_f13_encode(self, sue_week_no, sue_lat_origin, sue_lon_origin, sue_alt_origin): ''' Backwards compatible version of SERIAL_UDB_EXTRA F13: format sue_week_no : Serial UDB Extra GPS Week Number (int16_t) sue_lat_origin : Serial UDB Extra MP Origin Latitude (int32_t) sue_lon_origin : Serial UDB Extra MP Origin Longitude (int32_t) sue_alt_origin : Serial UDB Extra MP Origin Altitude Above Sea Level (int32_t) ''' return MAVLink_serial_udb_extra_f13_message(sue_week_no, sue_lat_origin, sue_lon_origin, sue_alt_origin)
[ "def", "serial_udb_extra_f13_encode", "(", "self", ",", "sue_week_no", ",", "sue_lat_origin", ",", "sue_lon_origin", ",", "sue_alt_origin", ")", ":", "return", "MAVLink_serial_udb_extra_f13_message", "(", "sue_week_no", ",", "sue_lat_origin", ",", "sue_lon_origin", ",", ...
Backwards compatible version of SERIAL_UDB_EXTRA F13: format sue_week_no : Serial UDB Extra GPS Week Number (int16_t) sue_lat_origin : Serial UDB Extra MP Origin Latitude (int32_t) sue_lon_origin : Serial UDB Extra MP Origin Longitude (int32_t) sue_alt_origin : Serial UDB Extra MP Origin Altitude Above Sea Level (int32_t)
[ "Backwards", "compatible", "version", "of", "SERIAL_UDB_EXTRA", "F13", ":", "format" ]
python
train
Qiskit/qiskit-terra
qiskit/extensions/simulator/snapshot.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/extensions/simulator/snapshot.py#L72-L84
def label(self, name): """Set snapshot label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None. """ if isinstance(name, str): self._label = name else: raise TypeError('label expects a string')
[ "def", "label", "(", "self", ",", "name", ")", ":", "if", "isinstance", "(", "name", ",", "str", ")", ":", "self", ".", "_label", "=", "name", "else", ":", "raise", "TypeError", "(", "'label expects a string'", ")" ]
Set snapshot label to name Args: name (str or None): label to assign unitary Raises: TypeError: name is not string or None.
[ "Set", "snapshot", "label", "to", "name" ]
python
test
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L412-L416
def namespaced_view_name(view_name, metric_prefix): """ create string to be used as metric type """ metric_prefix = metric_prefix or "custom.googleapis.com/opencensus" return os.path.join(metric_prefix, view_name).replace('\\', '/')
[ "def", "namespaced_view_name", "(", "view_name", ",", "metric_prefix", ")", ":", "metric_prefix", "=", "metric_prefix", "or", "\"custom.googleapis.com/opencensus\"", "return", "os", ".", "path", ".", "join", "(", "metric_prefix", ",", "view_name", ")", ".", "replace...
create string to be used as metric type
[ "create", "string", "to", "be", "used", "as", "metric", "type" ]
python
train
nanoporetech/ont_fast5_api
ont_fast5_api/fast5_file.py
https://github.com/nanoporetech/ont_fast5_api/blob/352b3903155fcf4f19234c4f429dcefaa6d6bc4a/ont_fast5_api/fast5_file.py#L454-L468
def get_analysis_config(self, group_name): """ Gets any config data saved for the analysis. :param group_name: The name of the analysis group. :returns: A dictionary of dictionaries. Each key represents an analysis step. Each value is a dictionary containing the analysis parameters as key/value pairs. Returns None if no configuration exists for the analysis. """ self.assert_open() group = 'Analyses/{}/Configuration'.format(group_name) config = None if group in self.handle: config = self._parse_attribute_tree(group) return config
[ "def", "get_analysis_config", "(", "self", ",", "group_name", ")", ":", "self", ".", "assert_open", "(", ")", "group", "=", "'Analyses/{}/Configuration'", ".", "format", "(", "group_name", ")", "config", "=", "None", "if", "group", "in", "self", ".", "handle...
Gets any config data saved for the analysis. :param group_name: The name of the analysis group. :returns: A dictionary of dictionaries. Each key represents an analysis step. Each value is a dictionary containing the analysis parameters as key/value pairs. Returns None if no configuration exists for the analysis.
[ "Gets", "any", "config", "data", "saved", "for", "the", "analysis", "." ]
python
train
ewels/MultiQC
multiqc/modules/clipandmerge/clipandmerge.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/clipandmerge/clipandmerge.py#L78-L92
def clipandmerge_general_stats_table(self): """ Take the parsed stats from the ClipAndMerge report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['percentage'] = { 'title': '% Merged', 'description': 'Percentage of reads merged', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'Greens', 'format': '{:,.2f}', } self.general_stats_addcols(self.clipandmerge_data, headers)
[ "def", "clipandmerge_general_stats_table", "(", "self", ")", ":", "headers", "=", "OrderedDict", "(", ")", "headers", "[", "'percentage'", "]", "=", "{", "'title'", ":", "'% Merged'", ",", "'description'", ":", "'Percentage of reads merged'", ",", "'min'", ":", ...
Take the parsed stats from the ClipAndMerge report and add it to the basic stats table at the top of the report
[ "Take", "the", "parsed", "stats", "from", "the", "ClipAndMerge", "report", "and", "add", "it", "to", "the", "basic", "stats", "table", "at", "the", "top", "of", "the", "report" ]
python
train
crytic/slither
slither/core/declarations/function.py
https://github.com/crytic/slither/blob/04c147f7e50223c6af458ca430befae747ccd259/slither/core/declarations/function.py#L926-L938
def get_summary(self): """ Return the function summary Returns: (str, str, str, list(str), list(str), listr(str), list(str), list(str); contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions """ return (self.contract.name, self.full_name, self.visibility, [str(x) for x in self.modifiers], [str(x) for x in self.state_variables_read + self.solidity_variables_read], [str(x) for x in self.state_variables_written], [str(x) for x in self.internal_calls], [str(x) for x in self.external_calls_as_expressions])
[ "def", "get_summary", "(", "self", ")", ":", "return", "(", "self", ".", "contract", ".", "name", ",", "self", ".", "full_name", ",", "self", ".", "visibility", ",", "[", "str", "(", "x", ")", "for", "x", "in", "self", ".", "modifiers", "]", ",", ...
Return the function summary Returns: (str, str, str, list(str), list(str), listr(str), list(str), list(str); contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions
[ "Return", "the", "function", "summary", "Returns", ":", "(", "str", "str", "str", "list", "(", "str", ")", "list", "(", "str", ")", "listr", "(", "str", ")", "list", "(", "str", ")", "list", "(", "str", ")", ";", "contract_name", "name", "visibility"...
python
train
dmbee/seglearn
seglearn/split.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L92-L114
def _ts_slice(self, Xt, y): ''' takes time series data, and splits each series into temporal folds ''' Ns = len(Xt) Xt_new = [] for i in range(self.n_splits): for j in range(Ns): Njs = int(len(Xt[j]) / self.n_splits) Xt_new.append(Xt[j][(Njs * i):(Njs * (i + 1))]) Xt_new = np.array(Xt_new) if len(np.atleast_1d(y[0])) == len(Xt[0]): # y is a time series y_new = [] for i in range(self.n_splits): for j in range(Ns): Njs = int(len(y[j]) / self.n_splits) y_new.append(y[j][(Njs * i):(Njs * (i + 1))]) y_new = np.array(y_new) else: # y is contextual to each series y_new = np.concatenate([y for i in range(self.n_splits)]) return Xt_new, y_new
[ "def", "_ts_slice", "(", "self", ",", "Xt", ",", "y", ")", ":", "Ns", "=", "len", "(", "Xt", ")", "Xt_new", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_splits", ")", ":", "for", "j", "in", "range", "(", "Ns", ")", ":", "N...
takes time series data, and splits each series into temporal folds
[ "takes", "time", "series", "data", "and", "splits", "each", "series", "into", "temporal", "folds" ]
python
train
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L688-L712
def headerData(self, section, orientation, role): """Get the information to put in the header.""" if role == Qt.TextAlignmentRole: if orientation == Qt.Horizontal: return Qt.AlignCenter | Qt.AlignBottom else: return Qt.AlignRight | Qt.AlignVCenter if role != Qt.DisplayRole and role != Qt.ToolTipRole: return None if self.axis == 1 and self._shape[1] <= 1: return None orient_axis = 0 if orientation == Qt.Horizontal else 1 if self.model.header_shape[orient_axis] > 1: header = section else: header = self.model.header(self.axis, section) # Don't perform any conversion on strings # because it leads to differences between # the data present in the dataframe and # what is shown by Spyder if not is_type_text_string(header): header = to_text_string(header) return header
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", ")", ":", "if", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "return", "Qt", ".", "AlignCenter", "|", "Qt", ...
Get the information to put in the header.
[ "Get", "the", "information", "to", "put", "in", "the", "header", "." ]
python
train
Guake/guake
guake/guake_app.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/guake_app.py#L870-L879
def gen_accel_switch_tabN(self, N): """Generates callback (which called by accel key) to go to the Nth tab. """ def callback(*args): if 0 <= N < self.get_notebook().get_n_pages(): self.get_notebook().set_current_page(N) return True return callback
[ "def", "gen_accel_switch_tabN", "(", "self", ",", "N", ")", ":", "def", "callback", "(", "*", "args", ")", ":", "if", "0", "<=", "N", "<", "self", ".", "get_notebook", "(", ")", ".", "get_n_pages", "(", ")", ":", "self", ".", "get_notebook", "(", "...
Generates callback (which called by accel key) to go to the Nth tab.
[ "Generates", "callback", "(", "which", "called", "by", "accel", "key", ")", "to", "go", "to", "the", "Nth", "tab", "." ]
python
train
HiPERCAM/hcam_widgets
hcam_widgets/gtc/corba.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/gtc/corba.py#L40-L44
def get_object(self, binding_name, cls): """ Get a reference to a remote object using CORBA """ return self._state.get_object(self, binding_name, cls)
[ "def", "get_object", "(", "self", ",", "binding_name", ",", "cls", ")", ":", "return", "self", ".", "_state", ".", "get_object", "(", "self", ",", "binding_name", ",", "cls", ")" ]
Get a reference to a remote object using CORBA
[ "Get", "a", "reference", "to", "a", "remote", "object", "using", "CORBA" ]
python
train
pteichman/cobe
cobe/brain.py
https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L80-L87
def start_batch_learning(self): """Begin a series of batch learn operations. Data will not be committed to the database until stop_batch_learning is called. Learn text using the normal learn(text) method.""" self._learning = True self.graph.cursor().execute("PRAGMA journal_mode=memory") self.graph.drop_reply_indexes()
[ "def", "start_batch_learning", "(", "self", ")", ":", "self", ".", "_learning", "=", "True", "self", ".", "graph", ".", "cursor", "(", ")", ".", "execute", "(", "\"PRAGMA journal_mode=memory\"", ")", "self", ".", "graph", ".", "drop_reply_indexes", "(", ")" ...
Begin a series of batch learn operations. Data will not be committed to the database until stop_batch_learning is called. Learn text using the normal learn(text) method.
[ "Begin", "a", "series", "of", "batch", "learn", "operations", ".", "Data", "will", "not", "be", "committed", "to", "the", "database", "until", "stop_batch_learning", "is", "called", ".", "Learn", "text", "using", "the", "normal", "learn", "(", "text", ")", ...
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L495-L562
def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
[ "def", "determine_file_type", "(", "fname", ")", ":", "if", "not", "(", "isinstance", "(", "fname", ",", "str", ")", "or", "isinstance", "(", "fname", ",", "unicode", ")", ")", ":", "raise", "ValueError", "(", "\"{} is not a file name!\"", ",", "fname", ")...
Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case.
[ "Detect", "file", "type", "." ]
python
train
KieranWynn/pyquaternion
pyquaternion/quaternion.py
https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L488-L495
def _normalise(self): """Object is guaranteed to be a unit quaternion after calling this operation UNLESS the object is equivalent to Quaternion(0) """ if not self.is_unit(): n = self.norm if n > 0: self.q = self.q / n
[ "def", "_normalise", "(", "self", ")", ":", "if", "not", "self", ".", "is_unit", "(", ")", ":", "n", "=", "self", ".", "norm", "if", "n", ">", "0", ":", "self", ".", "q", "=", "self", ".", "q", "/", "n" ]
Object is guaranteed to be a unit quaternion after calling this operation UNLESS the object is equivalent to Quaternion(0)
[ "Object", "is", "guaranteed", "to", "be", "a", "unit", "quaternion", "after", "calling", "this", "operation", "UNLESS", "the", "object", "is", "equivalent", "to", "Quaternion", "(", "0", ")" ]
python
train
saltstack/salt
salt/utils/gitfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1640-L1666
def init_remote(self): ''' Initialize/attach to a remote using pygit2. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function. ''' # https://github.com/libgit2/pygit2/issues/339 # https://github.com/libgit2/libgit2/issues/2122 home = os.path.expanduser('~') pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home new = False if not os.listdir(self.cachedir): # Repo cachedir is empty, initialize a new repo there self.repo = pygit2.init_repository(self.cachedir) new = True else: # Repo cachedir exists, try to attach try: self.repo = pygit2.Repository(self.cachedir) except KeyError: log.error(_INVALID_REPO, self.cachedir, self.url, self.role) return new self.gitdir = salt.utils.path.join(self.repo.workdir, '.git') self.enforce_git_config() return new
[ "def", "init_remote", "(", "self", ")", ":", "# https://github.com/libgit2/pygit2/issues/339", "# https://github.com/libgit2/libgit2/issues/2122", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "pygit2", ".", "settings", ".", "search_path", "[", "...
Initialize/attach to a remote using pygit2. Return a boolean which will let the calling function know whether or not a new repo was initialized by this function.
[ "Initialize", "/", "attach", "to", "a", "remote", "using", "pygit2", ".", "Return", "a", "boolean", "which", "will", "let", "the", "calling", "function", "know", "whether", "or", "not", "a", "new", "repo", "was", "initialized", "by", "this", "function", "....
python
train
materialsproject/pymatgen
pymatgen/analysis/wulff.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/wulff.py#L271-L298
def _get_simpx_plane(self): """ Locate the plane for simpx of on wulff_cv, by comparing the center of the simpx triangle with the plane functions. """ on_wulff = [False] * len(self.miller_list) surface_area = [0.0] * len(self.miller_list) for simpx in self.wulff_cv_simp: pts = [self.wulff_pt_list[simpx[i]] for i in range(3)] center = np.sum(pts, 0) / 3.0 # check whether the center of the simplices is on one plane for plane in self.facets: abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf) if abs_diff < 1e-5: on_wulff[plane.index] = True surface_area[plane.index] += get_tri_area(pts) plane.points.append(pts) plane.outer_lines.append([simpx[0], simpx[1]]) plane.outer_lines.append([simpx[1], simpx[2]]) plane.outer_lines.append([simpx[0], simpx[2]]) # already find the plane, move to the next simplices break for plane in self.facets: plane.outer_lines.sort() plane.outer_lines = [line for line in plane.outer_lines if plane.outer_lines.count(line) != 2] return on_wulff, surface_area
[ "def", "_get_simpx_plane", "(", "self", ")", ":", "on_wulff", "=", "[", "False", "]", "*", "len", "(", "self", ".", "miller_list", ")", "surface_area", "=", "[", "0.0", "]", "*", "len", "(", "self", ".", "miller_list", ")", "for", "simpx", "in", "sel...
Locate the plane for simpx of on wulff_cv, by comparing the center of the simpx triangle with the plane functions.
[ "Locate", "the", "plane", "for", "simpx", "of", "on", "wulff_cv", "by", "comparing", "the", "center", "of", "the", "simpx", "triangle", "with", "the", "plane", "functions", "." ]
python
train
pytroll/posttroll
posttroll/subscriber.py
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L166-L172
def _add_hook(self, socket, callback): """Generic hook. The passed socket has to be "receive only". """ self._hooks.append(socket) self._hooks_cb[socket] = callback if self.poller: self.poller.register(socket, POLLIN)
[ "def", "_add_hook", "(", "self", ",", "socket", ",", "callback", ")", ":", "self", ".", "_hooks", ".", "append", "(", "socket", ")", "self", ".", "_hooks_cb", "[", "socket", "]", "=", "callback", "if", "self", ".", "poller", ":", "self", ".", "poller...
Generic hook. The passed socket has to be "receive only".
[ "Generic", "hook", ".", "The", "passed", "socket", "has", "to", "be", "receive", "only", "." ]
python
train
ehansis/ozelot
ozelot/orm/base.py
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/orm/base.py#L187-L228
def render_diagram(out_base): """Render a data model diagram Included in the diagram are all classes from the model registry. For your project, write a small script that imports all models that you would like to have included and then calls this function. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: out_base (str): output base path (file endings will be appended) """ import codecs import subprocess import sadisplay # generate class descriptions desc = sadisplay.describe(list(model_registry.values()), show_methods=False, show_properties=True, show_indexes=True, ) # write description in DOT format with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f: f.write(sadisplay.dot(desc)) # check existence of DOT_EXECUTABLE variable and file if not hasattr(config, 'DOT_EXECUTABLE'): raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'") if not os.path.exists(config.DOT_EXECUTABLE): raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE)) # render to image using DOT # noinspection PyUnresolvedReferences subprocess.check_call([ config.DOT_EXECUTABLE, '-T', 'png', '-o', out_base + '.png', out_base + '.dot' ])
[ "def", "render_diagram", "(", "out_base", ")", ":", "import", "codecs", "import", "subprocess", "import", "sadisplay", "# generate class descriptions", "desc", "=", "sadisplay", ".", "describe", "(", "list", "(", "model_registry", ".", "values", "(", ")", ")", "...
Render a data model diagram Included in the diagram are all classes from the model registry. For your project, write a small script that imports all models that you would like to have included and then calls this function. .. note:: This function requires the 'dot' executable from the GraphViz package to be installed and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`. Args: out_base (str): output base path (file endings will be appended)
[ "Render", "a", "data", "model", "diagram" ]
python
train
soravux/scoop
scoop/utils.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/utils.py#L144-L157
def getHosts(filename=None, hostlist=None): """Return a list of hosts depending on the environment""" if filename: return getHostsFromFile(filename) elif hostlist: return getHostsFromList(hostlist) elif getEnv() == "SLURM": return getHostsFromSLURM() elif getEnv() == "PBS": return getHostsFromPBS() elif getEnv() == "SGE": return getHostsFromSGE() else: return getDefaultHosts()
[ "def", "getHosts", "(", "filename", "=", "None", ",", "hostlist", "=", "None", ")", ":", "if", "filename", ":", "return", "getHostsFromFile", "(", "filename", ")", "elif", "hostlist", ":", "return", "getHostsFromList", "(", "hostlist", ")", "elif", "getEnv",...
Return a list of hosts depending on the environment
[ "Return", "a", "list", "of", "hosts", "depending", "on", "the", "environment" ]
python
train
erinxocon/spotify-local
src/spotify_local/core.py
https://github.com/erinxocon/spotify-local/blob/8188eef221e3d8b9f408ff430d80e74560360459/src/spotify_local/core.py#L78-L84
def emit(self, event, *args, **kwargs): """Send out an event and call it's associated functions :param event: Name of the event to trigger """ for func in self._registered_events[event].values(): func(*args, **kwargs)
[ "def", "emit", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "func", "in", "self", ".", "_registered_events", "[", "event", "]", ".", "values", "(", ")", ":", "func", "(", "*", "args", ",", "*", "*", "kw...
Send out an event and call it's associated functions :param event: Name of the event to trigger
[ "Send", "out", "an", "event", "and", "call", "it", "s", "associated", "functions" ]
python
train
junzis/pyModeS
pyModeS/decoder/bds/__init__.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/__init__.py#L30-L89
def is50or60(msg, spd_ref, trk_ref, alt_ref): """Use reference ground speed and trk to determine BDS50 and DBS60. Args: msg (String): 28 bytes hexadecimal message string spd_ref (float): reference speed (ADS-B ground speed), kts trk_ref (float): reference track (ADS-B track angle), deg alt_ref (float): reference altitude (ADS-B altitude), ft Returns: String or None: BDS version, or possible versions, or None if nothing matches. """ def vxy(v, angle): vx = v * np.sin(np.radians(angle)) vy = v * np.cos(np.radians(angle)) return vx, vy if not (bds50.is50(msg) and bds60.is60(msg)): return None h50 = bds50.trk50(msg) v50 = bds50.gs50(msg) if h50 is None or v50 is None: return 'BDS50,BDS60' h60 = bds60.hdg60(msg) m60 = bds60.mach60(msg) i60 = bds60.ias60(msg) if h60 is None or (m60 is None and i60 is None): return 'BDS50,BDS60' m60 = np.nan if m60 is None else m60 i60 = np.nan if i60 is None else i60 XY5 = vxy(v50*aero.kts, h50) XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60) XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60) allbds = ['BDS50', 'BDS60', 'BDS60'] X = np.array([XY5, XY6m, XY6i]) Mu = np.array(vxy(spd_ref*aero.kts, trk_ref)) # compute Mahalanobis distance matrix # Cov = [[20**2, 0], [0, 20**2]] # mmatrix = np.sqrt(np.dot(np.dot(X-Mu, np.linalg.inv(Cov)), (X-Mu).T)) # dist = np.diag(mmatrix) # since the covariance matrix is identity matrix, # M-dist is same as eculidian distance try: dist = np.linalg.norm(X-Mu, axis=1) BDS = allbds[np.nanargmin(dist)] except ValueError: return 'BDS50,BDS60' return BDS
[ "def", "is50or60", "(", "msg", ",", "spd_ref", ",", "trk_ref", ",", "alt_ref", ")", ":", "def", "vxy", "(", "v", ",", "angle", ")", ":", "vx", "=", "v", "*", "np", ".", "sin", "(", "np", ".", "radians", "(", "angle", ")", ")", "vy", "=", "v",...
Use reference ground speed and trk to determine BDS50 and DBS60. Args: msg (String): 28 bytes hexadecimal message string spd_ref (float): reference speed (ADS-B ground speed), kts trk_ref (float): reference track (ADS-B track angle), deg alt_ref (float): reference altitude (ADS-B altitude), ft Returns: String or None: BDS version, or possible versions, or None if nothing matches.
[ "Use", "reference", "ground", "speed", "and", "trk", "to", "determine", "BDS50", "and", "DBS60", "." ]
python
train
raphaelgyory/django-rest-messaging
rest_messaging/compat.py
https://github.com/raphaelgyory/django-rest-messaging/blob/c9d5405fed7db2d79ec5c93c721a8fe42ea86958/rest_messaging/compat.py#L67-L75
def compat_get_paginated_response(view, page): """ get_paginated_response is unknown to DRF 3.0 """ if DRFVLIST[0] == 3 and DRFVLIST[1] >= 1: from rest_messaging.serializers import ComplexMessageSerializer # circular import serializer = ComplexMessageSerializer(page, many=True) return view.get_paginated_response(serializer.data) else: serializer = view.get_pagination_serializer(page) return Response(serializer.data)
[ "def", "compat_get_paginated_response", "(", "view", ",", "page", ")", ":", "if", "DRFVLIST", "[", "0", "]", "==", "3", "and", "DRFVLIST", "[", "1", "]", ">=", "1", ":", "from", "rest_messaging", ".", "serializers", "import", "ComplexMessageSerializer", "# c...
get_paginated_response is unknown to DRF 3.0
[ "get_paginated_response", "is", "unknown", "to", "DRF", "3", ".", "0" ]
python
train
artisanofcode/python-broadway
broadway/errors.py
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/broadway/errors.py#L40-L45
def init_app(application): """ Associates the error handler """ for code in werkzeug.exceptions.default_exceptions: application.register_error_handler(code, handle_http_exception)
[ "def", "init_app", "(", "application", ")", ":", "for", "code", "in", "werkzeug", ".", "exceptions", ".", "default_exceptions", ":", "application", ".", "register_error_handler", "(", "code", ",", "handle_http_exception", ")" ]
Associates the error handler
[ "Associates", "the", "error", "handler" ]
python
train
nuagenetworks/bambou
bambou/nurest_root_object.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_root_object.py#L118-L124
def get_resource_url(self): """ Get resource complete url """ name = self.__class__.resource_name url = self.__class__.rest_base_url() return "%s/%s" % (url, name)
[ "def", "get_resource_url", "(", "self", ")", ":", "name", "=", "self", ".", "__class__", ".", "resource_name", "url", "=", "self", ".", "__class__", ".", "rest_base_url", "(", ")", "return", "\"%s/%s\"", "%", "(", "url", ",", "name", ")" ]
Get resource complete url
[ "Get", "resource", "complete", "url" ]
python
train
topic2k/pygcgen
pygcgen/generator.py
https://github.com/topic2k/pygcgen/blob/c41701815df2c8c3a57fd5f7b8babe702127c8a1/pygcgen/generator.py#L797-L813
def get_filtered_pull_requests(self, pull_requests): """ This method fetches missing params for PR and filter them by specified options. It include add all PR's with labels from options.include_labels and exclude all from options.exclude_labels. :param list(dict) pull_requests: All pull requests. :rtype: list(dict) :return: Filtered pull requests. """ pull_requests = self.filter_by_labels(pull_requests, "pull requests") pull_requests = self.filter_merged_pull_requests(pull_requests) if self.options.verbose > 1: print("\tremaining pull requests: {}".format(len(pull_requests))) return pull_requests
[ "def", "get_filtered_pull_requests", "(", "self", ",", "pull_requests", ")", ":", "pull_requests", "=", "self", ".", "filter_by_labels", "(", "pull_requests", ",", "\"pull requests\"", ")", "pull_requests", "=", "self", ".", "filter_merged_pull_requests", "(", "pull_r...
This method fetches missing params for PR and filter them by specified options. It include add all PR's with labels from options.include_labels and exclude all from options.exclude_labels. :param list(dict) pull_requests: All pull requests. :rtype: list(dict) :return: Filtered pull requests.
[ "This", "method", "fetches", "missing", "params", "for", "PR", "and", "filter", "them", "by", "specified", "options", ".", "It", "include", "add", "all", "PR", "s", "with", "labels", "from", "options", ".", "include_labels", "and", "exclude", "all", "from", ...
python
valid
rootpy/rootpy
rootpy/io/pickler.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/io/pickler.py#L272-L291
def load(self, key=None): """Read a pickled object representation from the open file.""" if key is None: key = '_pickle' obj = None if _compat_hooks: save = _compat_hooks[0]() try: self.__n += 1 s = self.__file.Get(key + ';{0:d}'.format(self.__n)) self.__io.setvalue(s.GetName()) if sys.version_info[0] < 3: obj = pickle.Unpickler.load(self) else: obj = super(Unpickler, self).load() self.__io.reopen() finally: if _compat_hooks: save = _compat_hooks[1](save) return obj
[ "def", "load", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key", "=", "'_pickle'", "obj", "=", "None", "if", "_compat_hooks", ":", "save", "=", "_compat_hooks", "[", "0", "]", "(", ")", "try", ":", "self", "."...
Read a pickled object representation from the open file.
[ "Read", "a", "pickled", "object", "representation", "from", "the", "open", "file", "." ]
python
train
mehmetg/streak_client
streak_client/streak_client.py
https://github.com/mehmetg/streak_client/blob/46575510b4e4163a4a3cc06f7283a1ae377cdce6/streak_client/streak_client.py#L925-L948
def update_reminder(self, reminder): '''Creates a reminder with the provided attributes. Args: reminder updated reminder of StreakReminder type return (status code, reminder dict) ''' uri = '/'.join([self.api_uri, self.reminders_suffix, ]) #req sanity check payload = None if type(reminder) is not StreakReminder: return requests.codes.bad_request, None payload = reminder.to_dict(rw = True) try: uri = '/'.join([uri, reminder.attributes['key']]) except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
[ "def", "update_reminder", "(", "self", ",", "reminder", ")", ":", "uri", "=", "'/'", ".", "join", "(", "[", "self", ".", "api_uri", ",", "self", ".", "reminders_suffix", ",", "]", ")", "#req sanity check", "payload", "=", "None", "if", "type", "(", "re...
Creates a reminder with the provided attributes. Args: reminder updated reminder of StreakReminder type return (status code, reminder dict)
[ "Creates", "a", "reminder", "with", "the", "provided", "attributes", ".", "Args", ":", "reminder", "updated", "reminder", "of", "StreakReminder", "type", "return", "(", "status", "code", "reminder", "dict", ")" ]
python
train
tsnaomi/finnsyll
finnsyll/syllabifier.py
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/syllabifier.py#L114-L150
def annotate(self, word): '''Annotate 'word' for syllabification, stress, weights, and vowels.''' info = [] # e.g., [ ('\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ] for syllabification, _ in syllabify(self.normalize(word), stress=True): stresses = '' weights = '' vowels = '' for syll in syllable_split(syllabification): try: vowels += get_vowel(syll) weights += get_weight(syll) stresses += {'\'': 'P', '`': 'S'}.get(syll[0], 'U') except AttributeError: # if the syllable is vowel-less... if syll[-1].isalpha(): stresses += '*' weights += '*' vowels += '*' else: stresses += ' ' weights += ' ' vowels += ' ' info.append(( syllabification, stresses, weights, vowels, )) return info
[ "def", "annotate", "(", "self", ",", "word", ")", ":", "info", "=", "[", "]", "# e.g., [ ('\\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ]", "for", "syllabification", ",", "_", "in", "syllabify", "(", "self", ".", "normalize", "(", "word", ")", ",", "stress", "...
Annotate 'word' for syllabification, stress, weights, and vowels.
[ "Annotate", "word", "for", "syllabification", "stress", "weights", "and", "vowels", "." ]
python
train
Miserlou/django-knockout-modeler
knockout_modeler/ko.py
https://github.com/Miserlou/django-knockout-modeler/blob/714d21cc5ed008f132cea01dbae9f214c2bf1b76/knockout_modeler/ko.py#L107-L123
def ko_bindings(model): """ Given a model, returns the Knockout data bindings. """ try: if isinstance(model, str): modelName = model else: modelName = model.__class__.__name__ modelBindingsString = "ko.applyBindings(new " + modelName + "ViewModel(), $('#" + modelName.lower() + "s')[0]);" return modelBindingsString except Exception as e: logger.error(e) return ''
[ "def", "ko_bindings", "(", "model", ")", ":", "try", ":", "if", "isinstance", "(", "model", ",", "str", ")", ":", "modelName", "=", "model", "else", ":", "modelName", "=", "model", ".", "__class__", ".", "__name__", "modelBindingsString", "=", "\"ko.applyB...
Given a model, returns the Knockout data bindings.
[ "Given", "a", "model", "returns", "the", "Knockout", "data", "bindings", "." ]
python
train
timmahrt/ProMo
promo/morph_utils/modify_pitch_accent.py
https://github.com/timmahrt/ProMo/blob/99d9f5cc01ff328a62973c5a5da910cc905ae4d5/promo/morph_utils/modify_pitch_accent.py#L149-L163
def reintegrate(self, fullPointList): ''' Integrates the pitch values of the accent into a larger pitch contour ''' # Erase the original region of the accent fullPointList = _deletePoints(fullPointList, self.minT, self.maxT) # Erase the new region of the accent fullPointList = self.deleteOverlapping(fullPointList) # Add the accent into the full pitch list outputPointList = fullPointList + self.pointList outputPointList.sort() return outputPointList
[ "def", "reintegrate", "(", "self", ",", "fullPointList", ")", ":", "# Erase the original region of the accent", "fullPointList", "=", "_deletePoints", "(", "fullPointList", ",", "self", ".", "minT", ",", "self", ".", "maxT", ")", "# Erase the new region of the accent", ...
Integrates the pitch values of the accent into a larger pitch contour
[ "Integrates", "the", "pitch", "values", "of", "the", "accent", "into", "a", "larger", "pitch", "contour" ]
python
train
Varkal/chuda
chuda/commands.py
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/commands.py#L92-L109
def setup(self, app): ''' Setup properties from parent app on the command ''' self.logger = app.logger self.shell.logger = self.logger if not self.command_name: raise EmptyCommandNameException() self.app = app self.arguments_declaration = self.arguments self.arguments = app.arguments if self.use_subconfig: _init_config(self) else: self.config = self.app.config
[ "def", "setup", "(", "self", ",", "app", ")", ":", "self", ".", "logger", "=", "app", ".", "logger", "self", ".", "shell", ".", "logger", "=", "self", ".", "logger", "if", "not", "self", ".", "command_name", ":", "raise", "EmptyCommandNameException", "...
Setup properties from parent app on the command
[ "Setup", "properties", "from", "parent", "app", "on", "the", "command" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L1314-L1329
def __callHandler(self, callback, event, *params): """ Calls a "pre" or "post" handler, if set. @type callback: function @param callback: Callback function to call. @type event: L{ExceptionEvent} @param event: Breakpoint hit event. @type params: tuple @param params: Parameters for the callback function. """ if callback is not None: event.hook = self callback(event, *params)
[ "def", "__callHandler", "(", "self", ",", "callback", ",", "event", ",", "*", "params", ")", ":", "if", "callback", "is", "not", "None", ":", "event", ".", "hook", "=", "self", "callback", "(", "event", ",", "*", "params", ")" ]
Calls a "pre" or "post" handler, if set. @type callback: function @param callback: Callback function to call. @type event: L{ExceptionEvent} @param event: Breakpoint hit event. @type params: tuple @param params: Parameters for the callback function.
[ "Calls", "a", "pre", "or", "post", "handler", "if", "set", "." ]
python
train
svasilev94/GraphLibrary
graphlibrary/paths.py
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/paths.py#L6-L23
def find_all_paths(G, start, end, path=[]): """ Find all paths between vertices start and end in graph. """ path = path + [start] if start == end: return [path] if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) if end not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (end,)) paths = [] for vertex in G.vertices[start]: if vertex not in path: newpaths = find_all_paths(G, vertex, end, path) for newpath in newpaths: paths.append(newpath) return paths
[ "def", "find_all_paths", "(", "G", ",", "start", ",", "end", ",", "path", "=", "[", "]", ")", ":", "path", "=", "path", "+", "[", "start", "]", "if", "start", "==", "end", ":", "return", "[", "path", "]", "if", "start", "not", "in", "G", ".", ...
Find all paths between vertices start and end in graph.
[ "Find", "all", "paths", "between", "vertices", "start", "and", "end", "in", "graph", "." ]
python
train
Stewori/pytypes
pytypes/typechecker.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/typechecker.py#L726-L741
def typechecked_func(func, force = False, argType = None, resType = None, prop_getter = False): """Works like typechecked, but is only applicable to functions, methods and properties. """ if not pytypes.checking_enabled and not pytypes.do_logging_in_typechecked: return func assert(_check_as_func(func)) if not force and is_no_type_check(func): return func if hasattr(func, 'do_typecheck'): func.do_typecheck = True return func elif hasattr(func, 'do_logging'): # actually shouldn't happen return _typeinspect_func(func, True, func.do_logging, argType, resType, prop_getter) else: return _typeinspect_func(func, True, False, argType, resType, prop_getter)
[ "def", "typechecked_func", "(", "func", ",", "force", "=", "False", ",", "argType", "=", "None", ",", "resType", "=", "None", ",", "prop_getter", "=", "False", ")", ":", "if", "not", "pytypes", ".", "checking_enabled", "and", "not", "pytypes", ".", "do_l...
Works like typechecked, but is only applicable to functions, methods and properties.
[ "Works", "like", "typechecked", "but", "is", "only", "applicable", "to", "functions", "methods", "and", "properties", "." ]
python
train
wal-e/wal-e
wal_e/worker/pg/wal_transfer.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/worker/pg/wal_transfer.py#L146-L162
def start(self, segment): """Begin transfer for an indicated wal segment.""" if self.closed: raise UserCritical(msg='attempt to transfer wal after closing', hint='report a bug') g = gevent.Greenlet(self.transferer, segment) g.link(self._complete_execution) self.greenlets.add(g) # Increment .expect before starting the greenlet, or else a # very unlucky .join could be fooled as to when pool is # complete. self.expect += 1 g.start()
[ "def", "start", "(", "self", ",", "segment", ")", ":", "if", "self", ".", "closed", ":", "raise", "UserCritical", "(", "msg", "=", "'attempt to transfer wal after closing'", ",", "hint", "=", "'report a bug'", ")", "g", "=", "gevent", ".", "Greenlet", "(", ...
Begin transfer for an indicated wal segment.
[ "Begin", "transfer", "for", "an", "indicated", "wal", "segment", "." ]
python
train
edx/ecommerce-worker
ecommerce_worker/sailthru/v1/tasks.py
https://github.com/edx/ecommerce-worker/blob/55246961d805b1f64d661a5c0bae0a216589401f/ecommerce_worker/sailthru/v1/tasks.py#L22-L25
def schedule_retry(self, config): """Schedule a retry""" raise self.retry(countdown=config.get('SAILTHRU_RETRY_SECONDS'), max_retries=config.get('SAILTHRU_RETRY_ATTEMPTS'))
[ "def", "schedule_retry", "(", "self", ",", "config", ")", ":", "raise", "self", ".", "retry", "(", "countdown", "=", "config", ".", "get", "(", "'SAILTHRU_RETRY_SECONDS'", ")", ",", "max_retries", "=", "config", ".", "get", "(", "'SAILTHRU_RETRY_ATTEMPTS'", ...
Schedule a retry
[ "Schedule", "a", "retry" ]
python
test
adamrehn/slidingwindow
slidingwindow/SlidingWindow.py
https://github.com/adamrehn/slidingwindow/blob/17ea9395b48671e8cb7321b9510c6b25fec5e45f/slidingwindow/SlidingWindow.py#L100-L143
def generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms = []): """ Generates a set of sliding windows for a dataset with the specified dimensions and order. """ # If the input data is smaller than the specified window size, # clip the window size to the input size on both dimensions windowSizeX = min(maxWindowSize, width) windowSizeY = min(maxWindowSize, height) # Compute the window overlap and step size windowOverlapX = int(math.floor(windowSizeX * overlapPercent)) windowOverlapY = int(math.floor(windowSizeY * overlapPercent)) stepSizeX = windowSizeX - windowOverlapX stepSizeY = windowSizeY - windowOverlapY # Determine how many windows we will need in order to cover the input data lastX = width - windowSizeX lastY = height - windowSizeY xOffsets = list(range(0, lastX+1, stepSizeX)) yOffsets = list(range(0, lastY+1, stepSizeY)) # Unless the input data dimensions are exact multiples of the step size, # we will need one additional row and column of windows to get 100% coverage if len(xOffsets) == 0 or xOffsets[-1] != lastX: xOffsets.append(lastX) if len(yOffsets) == 0 or yOffsets[-1] != lastY: yOffsets.append(lastY) # Generate the list of windows windows = [] for xOffset in xOffsets: for yOffset in yOffsets: for transform in [None] + transforms: windows.append(SlidingWindow( x=xOffset, y=yOffset, w=windowSizeX, h=windowSizeY, dimOrder=dimOrder, transform=transform )) return windows
[ "def", "generateForSize", "(", "width", ",", "height", ",", "dimOrder", ",", "maxWindowSize", ",", "overlapPercent", ",", "transforms", "=", "[", "]", ")", ":", "# If the input data is smaller than the specified window size,", "# clip the window size to the input size on both...
Generates a set of sliding windows for a dataset with the specified dimensions and order.
[ "Generates", "a", "set", "of", "sliding", "windows", "for", "a", "dataset", "with", "the", "specified", "dimensions", "and", "order", "." ]
python
train
apache/incubator-superset
superset/utils/decorators.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/decorators.py#L46-L118
def etag_cache(max_age, check_perms=bool): """ A decorator for caching views and handling etag conditional requests. The decorator adds headers to GET requests that help with caching: Last- Modified, Expires and ETag. It also handles conditional requests, when the client send an If-Matches header. If a cache is set, the decorator will cache GET responses, bypassing the dataframe serialization. POST requests will still benefit from the dataframe cache for requests that produce the same SQL. """ def decorator(f): @wraps(f) def wrapper(*args, **kwargs): # check if the user can access the resource check_perms(*args, **kwargs) # for POST requests we can't set cache headers, use the response # cache nor use conditional requests; this will still use the # dataframe cache in `superset/viz.py`, though. if request.method == 'POST': return f(*args, **kwargs) response = None if cache: try: # build the cache key from the function arguments and any # other additional GET arguments (like `form_data`, eg). key_args = list(args) key_kwargs = kwargs.copy() key_kwargs.update(request.args) cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs) response = cache.get(cache_key) except Exception: # pylint: disable=broad-except if app.debug: raise logging.exception('Exception possibly due to cache backend.') # if no response was cached, compute it using the wrapped function if response is None: response = f(*args, **kwargs) # add headers for caching: Last Modified, Expires and ETag response.cache_control.public = True response.last_modified = datetime.utcnow() expiration = max_age if max_age != 0 else FAR_FUTURE response.expires = \ response.last_modified + timedelta(seconds=expiration) response.add_etag() # if we have a cache, store the response from the request if cache: try: cache.set(cache_key, response, timeout=max_age) except Exception: # pylint: disable=broad-except if app.debug: raise logging.exception('Exception possibly due to cache backend.') return response.make_conditional(request) if cache: wrapper.uncached = f wrapper.cache_timeout = max_age wrapper.make_cache_key = \ cache._memoize_make_cache_key( # pylint: disable=protected-access make_name=None, timeout=max_age) return wrapper return decorator
[ "def", "etag_cache", "(", "max_age", ",", "check_perms", "=", "bool", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check if the user can access ...
A decorator for caching views and handling etag conditional requests. The decorator adds headers to GET requests that help with caching: Last- Modified, Expires and ETag. It also handles conditional requests, when the client send an If-Matches header. If a cache is set, the decorator will cache GET responses, bypassing the dataframe serialization. POST requests will still benefit from the dataframe cache for requests that produce the same SQL.
[ "A", "decorator", "for", "caching", "views", "and", "handling", "etag", "conditional", "requests", "." ]
python
train
AguaClara/aguaclara
aguaclara/design/floc.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/floc.py#L129-L138
def W_min_HS_ratio(self): """Calculate the minimum flocculator channel width, given the minimum ratio between expansion height (H) and baffle spacing (S). :returns: Minimum channel width given H_e/S :rtype: float * centimeter """ return ((self.HS_RATIO_MIN * self.Q / self.downstream_H) * (self.BAFFLE_K / (2 * self.downstream_H * pc.viscosity_kinematic(self.temp) * self.vel_grad_avg ** 2)) ** (1/3) ).to(u.cm)
[ "def", "W_min_HS_ratio", "(", "self", ")", ":", "return", "(", "(", "self", ".", "HS_RATIO_MIN", "*", "self", ".", "Q", "/", "self", ".", "downstream_H", ")", "*", "(", "self", ".", "BAFFLE_K", "/", "(", "2", "*", "self", ".", "downstream_H", "*", ...
Calculate the minimum flocculator channel width, given the minimum ratio between expansion height (H) and baffle spacing (S). :returns: Minimum channel width given H_e/S :rtype: float * centimeter
[ "Calculate", "the", "minimum", "flocculator", "channel", "width", "given", "the", "minimum", "ratio", "between", "expansion", "height", "(", "H", ")", "and", "baffle", "spacing", "(", "S", ")", ".", ":", "returns", ":", "Minimum", "channel", "width", "given"...
python
train
mitsei/dlkit
dlkit/filesystem_adapter/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/filesystem_adapter/repository/objects.py#L1342-L1403
def set_data(self, data=None): """Sets the content data. arg: data (osid.transport.DataInputStream): the content data raise: InvalidArgument - ``data`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``data`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ def has_secondary_storage(): return 'secondary_data_store_path' in self._config_map extension = data.name.split('.')[-1] data_store_path = self._config_map['data_store_path'] if has_secondary_storage(): secondary_data_store_path = self._config_map['secondary_data_store_path'] if '_id' in self._my_map: filename = self._my_map['_id'] # remove any old file that is set if str(self._my_map['_id']) not in self._my_map['url']: os.remove(self._my_map['url']) if has_secondary_storage(): old_path = '{0}/repository/AssetContent'.format(data_store_path) secondary_file_location = self._my_map['url'].replace(old_path, secondary_data_store_path) os.remove(secondary_file_location) else: filename = ObjectId() filesystem_location = '{0}/repository/AssetContent/'.format(data_store_path) if not os.path.isdir(filesystem_location): os.makedirs(filesystem_location) file_location = '{0}{1}.{2}'.format(filesystem_location, str(filename), extension) data.seek(0) with open(file_location, 'wb') as file_handle: file_handle.write(data.read()) # this URL should be a filesystem path...relative # to the setting in runtime self._payload.set_url(file_location) # if set, also make a backup copy in the secondary_data_store_path if has_secondary_storage(): data.seek(0) if not os.path.isdir(secondary_data_store_path): os.makedirs(secondary_data_store_path) file_location = '{0}/{1}.{2}'.format(secondary_data_store_path, str(filename), extension) with open(file_location, 'wb') as file_handle: file_handle.write(data.read())
[ "def", "set_data", "(", "self", ",", "data", "=", "None", ")", ":", "def", "has_secondary_storage", "(", ")", ":", "return", "'secondary_data_store_path'", "in", "self", ".", "_config_map", "extension", "=", "data", ".", "name", ".", "split", "(", "'.'", "...
Sets the content data. arg: data (osid.transport.DataInputStream): the content data raise: InvalidArgument - ``data`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``data`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "content", "data", "." ]
python
train
juju/charm-helpers
charmhelpers/core/services/base.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/services/base.py#L213-L220
def get_service(self, service_name): """ Given the name of a registered service, return its service definition. """ service = self.services.get(service_name) if not service: raise KeyError('Service not registered: %s' % service_name) return service
[ "def", "get_service", "(", "self", ",", "service_name", ")", ":", "service", "=", "self", ".", "services", ".", "get", "(", "service_name", ")", "if", "not", "service", ":", "raise", "KeyError", "(", "'Service not registered: %s'", "%", "service_name", ")", ...
Given the name of a registered service, return its service definition.
[ "Given", "the", "name", "of", "a", "registered", "service", "return", "its", "service", "definition", "." ]
python
train
saltstack/salt
salt/modules/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nova.py#L448-L463
def lock(instance_id, profile=None, **kwargs): ''' Lock an instance instance_id ID of the instance to be locked CLI Example: .. code-block:: bash salt '*' nova.lock 1138 ''' conn = _auth(profile, **kwargs) return conn.lock(instance_id)
[ "def", "lock", "(", "instance_id", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "_auth", "(", "profile", ",", "*", "*", "kwargs", ")", "return", "conn", ".", "lock", "(", "instance_id", ")" ]
Lock an instance instance_id ID of the instance to be locked CLI Example: .. code-block:: bash salt '*' nova.lock 1138
[ "Lock", "an", "instance" ]
python
train
PyCQA/pylint
pylint/config.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/config.py#L816-L825
def load_defaults(self): """initialize the provider using default values""" for opt, optdict in self.options: action = optdict.get("action") if action != "callback": # callback action have no default if optdict is None: optdict = self.get_option_def(opt) default = optdict.get("default") self.set_option(opt, default, action, optdict)
[ "def", "load_defaults", "(", "self", ")", ":", "for", "opt", ",", "optdict", "in", "self", ".", "options", ":", "action", "=", "optdict", ".", "get", "(", "\"action\"", ")", "if", "action", "!=", "\"callback\"", ":", "# callback action have no default", "if"...
initialize the provider using default values
[ "initialize", "the", "provider", "using", "default", "values" ]
python
test
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/message.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/message.py#L609-L613
def _fail(self, request_id, failure, duration): """Publish a CommandFailedEvent.""" self.listeners.publish_command_failure( duration, failure, self.name, request_id, self.sock_info.address, self.op_id)
[ "def", "_fail", "(", "self", ",", "request_id", ",", "failure", ",", "duration", ")", ":", "self", ".", "listeners", ".", "publish_command_failure", "(", "duration", ",", "failure", ",", "self", ".", "name", ",", "request_id", ",", "self", ".", "sock_info"...
Publish a CommandFailedEvent.
[ "Publish", "a", "CommandFailedEvent", "." ]
python
train
saltstack/salt
salt/modules/rh_ip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L1263-L1291
def build_network_settings(**settings): ''' Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings> ''' # Read current configuration and store default values current_network_settings = _parse_rh_config(_RH_NETWORK_FILE) # Build settings opts = _parse_network_settings(settings, current_network_settings) try: template = JINJA.get_template('network.jinja') except jinja2.exceptions.TemplateNotFound: log.error('Could not load template network.jinja') return '' network = template.render(opts) if settings['test']: return _read_temp(network) # Write settings _write_file_network(network, _RH_NETWORK_FILE) return _read_file(_RH_NETWORK_FILE)
[ "def", "build_network_settings", "(", "*", "*", "settings", ")", ":", "# Read current configuration and store default values", "current_network_settings", "=", "_parse_rh_config", "(", "_RH_NETWORK_FILE", ")", "# Build settings", "opts", "=", "_parse_network_settings", "(", "...
Build the global network script. CLI Example: .. code-block:: bash salt '*' ip.build_network_settings <settings>
[ "Build", "the", "global", "network", "script", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L2477-L2485
def start_and_wait(self, *args, **kwargs): """ Helper method to start the task and wait for completion. Mainly used when we are submitting the task via the shell without passing through a queue manager. """ self.start(*args, **kwargs) retcode = self.wait() return retcode
[ "def", "start_and_wait", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "start", "(", "*", "args", ",", "*", "*", "kwargs", ")", "retcode", "=", "self", ".", "wait", "(", ")", "return", "retcode" ]
Helper method to start the task and wait for completion. Mainly used when we are submitting the task via the shell without passing through a queue manager.
[ "Helper", "method", "to", "start", "the", "task", "and", "wait", "for", "completion", "." ]
python
train
konture/CloeePy
cloeepy/logger.py
https://github.com/konture/CloeePy/blob/dcb21284d2df405d92ac6868ea7215792c9323b9/cloeepy/logger.py#L59-L68
def _set_log_level(self): """ Inspects config and sets the log level as instance attr. If not present in config, default is "INFO". """ # set log level on logger log_level = "INFO" if hasattr(self._config, "level") and self._config.level.upper() in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]: log_level = self._config.level.upper() self._log_level = log_level
[ "def", "_set_log_level", "(", "self", ")", ":", "# set log level on logger", "log_level", "=", "\"INFO\"", "if", "hasattr", "(", "self", ".", "_config", ",", "\"level\"", ")", "and", "self", ".", "_config", ".", "level", ".", "upper", "(", ")", "in", "[", ...
Inspects config and sets the log level as instance attr. If not present in config, default is "INFO".
[ "Inspects", "config", "and", "sets", "the", "log", "level", "as", "instance", "attr", ".", "If", "not", "present", "in", "config", "default", "is", "INFO", "." ]
python
train
flatangle/flatlib
flatlib/protocols/temperament.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/protocols/temperament.py#L211-L245
def scores(factors): """ Computes the score of temperaments and elements. """ temperaments = { const.CHOLERIC: 0, const.MELANCHOLIC: 0, const.SANGUINE: 0, const.PHLEGMATIC: 0 } qualities = { const.HOT: 0, const.COLD: 0, const.DRY: 0, const.HUMID: 0 } for factor in factors: element = factor['element'] # Score temperament temperament = props.base.elementTemperament[element] temperaments[temperament] += 1 # Score qualities tqualities = props.base.temperamentQuality[temperament] qualities[tqualities[0]] += 1 qualities[tqualities[1]] += 1 return { 'temperaments': temperaments, 'qualities': qualities }
[ "def", "scores", "(", "factors", ")", ":", "temperaments", "=", "{", "const", ".", "CHOLERIC", ":", "0", ",", "const", ".", "MELANCHOLIC", ":", "0", ",", "const", ".", "SANGUINE", ":", "0", ",", "const", ".", "PHLEGMATIC", ":", "0", "}", "qualities",...
Computes the score of temperaments and elements.
[ "Computes", "the", "score", "of", "temperaments", "and", "elements", "." ]
python
train
cthoyt/onto2nx
src/onto2nx/utils.py
https://github.com/cthoyt/onto2nx/blob/94c86e5e187cca67534afe0260097177b66e02c8/src/onto2nx/utils.py#L26-L35
def get_uri_name(url): """Gets the file name from the end of the URL. Only useful for PyBEL's testing though since it looks specifically if the file is from the weird owncloud resources distributed by Fraunhofer""" url_parsed = urlparse(url) url_parts = url_parsed.path.split('/') log.info('url parts: %s', url_parts) return url_parts[-1]
[ "def", "get_uri_name", "(", "url", ")", ":", "url_parsed", "=", "urlparse", "(", "url", ")", "url_parts", "=", "url_parsed", ".", "path", ".", "split", "(", "'/'", ")", "log", ".", "info", "(", "'url parts: %s'", ",", "url_parts", ")", "return", "url_par...
Gets the file name from the end of the URL. Only useful for PyBEL's testing though since it looks specifically if the file is from the weird owncloud resources distributed by Fraunhofer
[ "Gets", "the", "file", "name", "from", "the", "end", "of", "the", "URL", ".", "Only", "useful", "for", "PyBEL", "s", "testing", "though", "since", "it", "looks", "specifically", "if", "the", "file", "is", "from", "the", "weird", "owncloud", "resources", ...
python
train
jazzband/django-discover-jenkins
discover_jenkins/runner.py
https://github.com/jazzband/django-discover-jenkins/blob/c0c859dfdd571de6e8f63865dfc8ebac6bab1d07/discover_jenkins/runner.py#L13-L32
def get_tasks(): """Get the imported task classes for each task that will be run""" task_classes = [] for task_path in TASKS: try: module, classname = task_path.rsplit('.', 1) except ValueError: raise ImproperlyConfigured('%s isn\'t a task module' % task_path) try: mod = import_module(module) except ImportError as e: raise ImproperlyConfigured('Error importing task %s: "%s"' % (module, e)) try: task_class = getattr(mod, classname) except AttributeError: raise ImproperlyConfigured('Task module "%s" does not define a ' '"%s" class' % (module, classname)) task_classes.append(task_class) return task_classes
[ "def", "get_tasks", "(", ")", ":", "task_classes", "=", "[", "]", "for", "task_path", "in", "TASKS", ":", "try", ":", "module", ",", "classname", "=", "task_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "raise", "Imprope...
Get the imported task classes for each task that will be run
[ "Get", "the", "imported", "task", "classes", "for", "each", "task", "that", "will", "be", "run" ]
python
valid
intiocean/pyinter
pyinter/interval.py
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/interval.py#L28-L36
def openclosed(lower_value, upper_value): """Helper function to construct an interval object with a open lower and closed upper. For example: >>> openclosed(100.2, 800.9) (100.2, 800.9] """ return Interval(Interval.OPEN, lower_value, upper_value, Interval.CLOSED)
[ "def", "openclosed", "(", "lower_value", ",", "upper_value", ")", ":", "return", "Interval", "(", "Interval", ".", "OPEN", ",", "lower_value", ",", "upper_value", ",", "Interval", ".", "CLOSED", ")" ]
Helper function to construct an interval object with a open lower and closed upper. For example: >>> openclosed(100.2, 800.9) (100.2, 800.9]
[ "Helper", "function", "to", "construct", "an", "interval", "object", "with", "a", "open", "lower", "and", "closed", "upper", "." ]
python
train
pybel/pybel
src/pybel/struct/query/query.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/query/query.py#L111-L119
def run(self, manager): """Run this query and returns the resulting BEL graph. :param manager: A cache manager :rtype: Optional[pybel.BELGraph] """ universe = self._get_universe(manager) graph = self.seeding.run(universe) return self.pipeline.run(graph, universe=universe)
[ "def", "run", "(", "self", ",", "manager", ")", ":", "universe", "=", "self", ".", "_get_universe", "(", "manager", ")", "graph", "=", "self", ".", "seeding", ".", "run", "(", "universe", ")", "return", "self", ".", "pipeline", ".", "run", "(", "grap...
Run this query and returns the resulting BEL graph. :param manager: A cache manager :rtype: Optional[pybel.BELGraph]
[ "Run", "this", "query", "and", "returns", "the", "resulting", "BEL", "graph", "." ]
python
train
GeospatialPython/pyshp
shapefile.py
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L1041-L1045
def iterShapeRecords(self): """Returns a generator of combination geometry/attribute records for all records in a shapefile.""" for shape, record in izip(self.iterShapes(), self.iterRecords()): yield ShapeRecord(shape=shape, record=record)
[ "def", "iterShapeRecords", "(", "self", ")", ":", "for", "shape", ",", "record", "in", "izip", "(", "self", ".", "iterShapes", "(", ")", ",", "self", ".", "iterRecords", "(", ")", ")", ":", "yield", "ShapeRecord", "(", "shape", "=", "shape", ",", "re...
Returns a generator of combination geometry/attribute records for all records in a shapefile.
[ "Returns", "a", "generator", "of", "combination", "geometry", "/", "attribute", "records", "for", "all", "records", "in", "a", "shapefile", "." ]
python
train
PGower/PyCanvas
pycanvas/apis/pages.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/pages.py#L53-L101
def update_create_front_page_courses(self, course_id, wiki_page_body=None, wiki_page_editing_roles=None, wiki_page_notify_of_update=None, wiki_page_published=None, wiki_page_title=None): """ Update/create front page. Update the title or contents of the front page """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - wiki_page[title] """The title for the new page. NOTE: changing a page's title will change its url. The updated url will be returned in the result.""" if wiki_page_title is not None: data["wiki_page[title]"] = wiki_page_title # OPTIONAL - wiki_page[body] """The content for the new page.""" if wiki_page_body is not None: data["wiki_page[body]"] = wiki_page_body # OPTIONAL - wiki_page[editing_roles] """Which user roles are allowed to edit this page. Any combination of these roles is allowed (separated by commas). "teachers":: Allows editing by teachers in the course. "students":: Allows editing by students in the course. "members":: For group wikis, allows editing by members of the group. "public":: Allows editing by any user.""" if wiki_page_editing_roles is not None: self._validate_enum(wiki_page_editing_roles, ["teachers", "students", "members", "public"]) data["wiki_page[editing_roles]"] = wiki_page_editing_roles # OPTIONAL - wiki_page[notify_of_update] """Whether participants should be notified when this page changes.""" if wiki_page_notify_of_update is not None: data["wiki_page[notify_of_update]"] = wiki_page_notify_of_update # OPTIONAL - wiki_page[published] """Whether the page is published (true) or draft state (false).""" if wiki_page_published is not None: data["wiki_page[published]"] = wiki_page_published self.logger.debug("PUT /api/v1/courses/{course_id}/front_page with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/front_page".format(**path), data=data, params=params, single_item=True)
[ "def", "update_create_front_page_courses", "(", "self", ",", "course_id", ",", "wiki_page_body", "=", "None", ",", "wiki_page_editing_roles", "=", "None", ",", "wiki_page_notify_of_update", "=", "None", ",", "wiki_page_published", "=", "None", ",", "wiki_page_title", ...
Update/create front page. Update the title or contents of the front page
[ "Update", "/", "create", "front", "page", ".", "Update", "the", "title", "or", "contents", "of", "the", "front", "page" ]
python
train
wummel/linkchecker
linkcheck/checker/httpurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/httpurl.py#L118-L152
def check_connection (self): """ Check a URL with HTTP protocol. Here is an excerpt from RFC 1945 with common response codes: The first digit of the Status-Code defines the class of response. The last two digits do not have any categorization role. There are 5 values for the first digit: - 1xx: Informational - Not used, but reserved for future use - 2xx: Success - The action was successfully received, understood, and accepted. - 3xx: Redirection - Further action must be taken in order to complete the request - 4xx: Client Error - The request contains bad syntax or cannot be fulfilled - 5xx: Server Error - The server failed to fulfill an apparently valid request """ self.session = self.aggregate.get_request_session() # set the proxy, so a 407 status after this is an error self.set_proxy(self.aggregate.config["proxy"].get(self.scheme)) self.construct_auth() # check robots.txt if not self.allows_robots(self.url): self.add_info(_("Access denied by robots.txt, checked only syntax.")) self.set_result(_("syntax OK")) self.do_check_content = False return # check the http connection request = self.build_request() self.send_request(request) self._add_response_info() self.follow_redirections(request) self.check_response() if self.allows_simple_recursion(): self.parse_header_links()
[ "def", "check_connection", "(", "self", ")", ":", "self", ".", "session", "=", "self", ".", "aggregate", ".", "get_request_session", "(", ")", "# set the proxy, so a 407 status after this is an error", "self", ".", "set_proxy", "(", "self", ".", "aggregate", ".", ...
Check a URL with HTTP protocol. Here is an excerpt from RFC 1945 with common response codes: The first digit of the Status-Code defines the class of response. The last two digits do not have any categorization role. There are 5 values for the first digit: - 1xx: Informational - Not used, but reserved for future use - 2xx: Success - The action was successfully received, understood, and accepted. - 3xx: Redirection - Further action must be taken in order to complete the request - 4xx: Client Error - The request contains bad syntax or cannot be fulfilled - 5xx: Server Error - The server failed to fulfill an apparently valid request
[ "Check", "a", "URL", "with", "HTTP", "protocol", ".", "Here", "is", "an", "excerpt", "from", "RFC", "1945", "with", "common", "response", "codes", ":", "The", "first", "digit", "of", "the", "Status", "-", "Code", "defines", "the", "class", "of", "respons...
python
train
facelessuser/wcmatch
wcmatch/_wcparse.py
https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/_wcparse.py#L756-L858
def _sequence(self, i): """Handle character group.""" result = ['['] end_range = 0 escape_hyphen = -1 removed = False last_posix = False c = next(i) if c in ('!', '^'): # Handle negate char result.append('^') c = next(i) if c == '[': last_posix = self._handle_posix(i, result, 0) if not last_posix: result.append(re.escape(c)) c = next(i) elif c in ('-', ']'): result.append(re.escape(c)) c = next(i) while c != ']': if c == '-': if last_posix: result.append('\\' + c) last_posix = False elif i.index - 1 > escape_hyphen: # Found a range delimiter. # Mark the next two characters as needing to be escaped if hyphens. # The next character would be the end char range (s-e), # and the one after that would be the potential start char range # of a new range (s-es-e), so neither can be legitimate range delimiters. result.append(c) escape_hyphen = i.index + 1 end_range = i.index elif end_range and i.index - 1 >= end_range: if self._sequence_range_check(result, '\\' + c): removed = True end_range = 0 else: result.append('\\' + c) c = next(i) continue last_posix = False if c == '[': last_posix = self._handle_posix(i, result, end_range) if last_posix: c = next(i) continue if c == '\\': # Handle escapes subindex = i.index try: value = self._references(i, True) except PathNameException: raise StopIteration except StopIteration: i.rewind(i.index - subindex) value = r'\\' elif c == '/': if self.pathname: raise StopIteration value = c elif c in SET_OPERATORS: # Escape &, |, and ~ to avoid &&, ||, and ~~ value = '\\' + c else: # Anything else value = c if end_range and i.index - 1 >= end_range: if self._sequence_range_check(result, value): removed = True end_range = 0 else: result.append(value) c = next(i) result.append(']') # Bad range removed. if removed: value = "".join(result) if value == '[]': # We specified some ranges, but they are all # out of reach. Create an impossible sequence to match. result = ['[^%s]' % ('\x00-\xff' if self.is_bytes else uniprops.UNICODE_RANGE)] elif value == '[^]': # We specified some range, but hey are all # out of reach. Since this is exclusive # that means we can match *anything*. result = ['[%s]' % ('\x00-\xff' if self.is_bytes else uniprops.UNICODE_RANGE)] else: result = [value] if self.pathname or self.after_start: return self._restrict_sequence() + ''.join(result) return ''.join(result)
[ "def", "_sequence", "(", "self", ",", "i", ")", ":", "result", "=", "[", "'['", "]", "end_range", "=", "0", "escape_hyphen", "=", "-", "1", "removed", "=", "False", "last_posix", "=", "False", "c", "=", "next", "(", "i", ")", "if", "c", "in", "("...
Handle character group.
[ "Handle", "character", "group", "." ]
python
train
scidash/sciunit
sciunit/utils.py
https://github.com/scidash/sciunit/blob/41b2e38c45c0776727ab1f281a572b65be19cea1/sciunit/utils.py#L352-L376
def import_all_modules(package, skip=None, verbose=False, prefix="", depth=0): """Recursively imports all subpackages, modules, and submodules of a given package. 'package' should be an imported package, not a string. 'skip' is a list of modules or subpackages not to import. """ skip = [] if skip is None else skip for ff, modname, ispkg in pkgutil.walk_packages(path=package.__path__, prefix=prefix, onerror=lambda x: None): if ff.path not in package.__path__[0]: # Solves weird bug continue if verbose: print('\t'*depth,modname) if modname in skip: if verbose: print('\t'*depth,'*Skipping*') continue module = '%s.%s' % (package.__name__,modname) subpackage = importlib.import_module(module) if ispkg: import_all_modules(subpackage, skip=skip, verbose=verbose,depth=depth+1)
[ "def", "import_all_modules", "(", "package", ",", "skip", "=", "None", ",", "verbose", "=", "False", ",", "prefix", "=", "\"\"", ",", "depth", "=", "0", ")", ":", "skip", "=", "[", "]", "if", "skip", "is", "None", "else", "skip", "for", "ff", ",", ...
Recursively imports all subpackages, modules, and submodules of a given package. 'package' should be an imported package, not a string. 'skip' is a list of modules or subpackages not to import.
[ "Recursively", "imports", "all", "subpackages", "modules", "and", "submodules", "of", "a", "given", "package", ".", "package", "should", "be", "an", "imported", "package", "not", "a", "string", ".", "skip", "is", "a", "list", "of", "modules", "or", "subpacka...
python
train
ioos/cc-plugin-ncei
cc_plugin_ncei/ncei_timeseries.py
https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries.py#L154-L171
def check_recommended_attributes(self, dataset): ''' Feature type specific check of global recommended attributes. :param netCDF4.Dataset dataset: An open netCDF dataset ''' results = [] recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes') # Check time_coverage_duration and resolution for attr in ['time_coverage_duration', 'time_coverage_resolution']: attr_value = getattr(dataset, attr, '') try: parse_duration(attr_value) recommended_ctx.assert_true(True, '') # Score it True! except Exception: recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value)) results.append(recommended_ctx.to_result()) return results
[ "def", "check_recommended_attributes", "(", "self", ",", "dataset", ")", ":", "results", "=", "[", "]", "recommended_ctx", "=", "TestCtx", "(", "BaseCheck", ".", "MEDIUM", ",", "'Recommended global attributes'", ")", "# Check time_coverage_duration and resolution", "for...
Feature type specific check of global recommended attributes. :param netCDF4.Dataset dataset: An open netCDF dataset
[ "Feature", "type", "specific", "check", "of", "global", "recommended", "attributes", "." ]
python
train
abakan-zz/napi
napi/transformers.py
https://github.com/abakan-zz/napi/blob/314da65bd78e2c716b7efb6deaf3816d8f38f7fd/napi/transformers.py#L160-L210
def napi_and(values, **kwargs): """Perform element-wise logical *and* operation on arrays. If *values* contains a non-array object with truth_ value **False**, the outcome will be an array of **False**\s with suitable shape without arrays being evaluated. Non-array objects with truth value **True** are omitted. If array shapes do not match (after squeezing when enabled by user), :exc:`ValueError` is raised. This function uses :obj:`numpy.logical_and` or :obj:`numpy.all`.""" arrays = [] result = None shapes = set() for value in values: if isinstance(value, ndarray) and value.shape: arrays.append(value) shapes.add(value.shape) elif not value: result = value if len(shapes) > 1 and kwargs.get('sq', kwargs.get('squeeze', False)): shapes.clear() for i, a in enumerate(arrays): a = arrays[i] = a.squeeze() shapes.add(a.shape) if len(shapes) > 1: raise ValueError('array shape mismatch, even after squeezing') if len(shapes) > 1: raise ValueError('array shape mismatch') shape = shapes.pop() if shapes else None if result is not None: if shape: return numpy.zeros(shape, bool) else: return result elif arrays: sc = kwargs.get('sc', kwargs.get('shortcircuit', 0)) if sc and numpy.prod(shape) >= sc: return short_circuit_and(arrays, shape) elif len(arrays) == 2: return numpy.logical_and(*arrays) else: return numpy.all(arrays, 0) else: return value
[ "def", "napi_and", "(", "values", ",", "*", "*", "kwargs", ")", ":", "arrays", "=", "[", "]", "result", "=", "None", "shapes", "=", "set", "(", ")", "for", "value", "in", "values", ":", "if", "isinstance", "(", "value", ",", "ndarray", ")", "and", ...
Perform element-wise logical *and* operation on arrays. If *values* contains a non-array object with truth_ value **False**, the outcome will be an array of **False**\s with suitable shape without arrays being evaluated. Non-array objects with truth value **True** are omitted. If array shapes do not match (after squeezing when enabled by user), :exc:`ValueError` is raised. This function uses :obj:`numpy.logical_and` or :obj:`numpy.all`.
[ "Perform", "element", "-", "wise", "logical", "*", "and", "*", "operation", "on", "arrays", "." ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_kvlayer.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L249-L266
def get_kvlayer_stream_ids_by_doc_id(client, doc_id): '''Retrieve stream ids from :mod:`kvlayer`. Namely, it returns an iterator over all stream ids with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of str ''' if client is None: client = kvlayer.client() client.setup_namespace(STREAM_ITEM_TABLE_DEFS, STREAM_ITEM_VALUE_DEFS) doc_id_range = make_doc_id_range(doc_id) for k in client.scan_keys(STREAM_ITEMS_TABLE, doc_id_range): yield kvlayer_key_to_stream_id(k)
[ "def", "get_kvlayer_stream_ids_by_doc_id", "(", "client", ",", "doc_id", ")", ":", "if", "client", "is", "None", ":", "client", "=", "kvlayer", ".", "client", "(", ")", "client", ".", "setup_namespace", "(", "STREAM_ITEM_TABLE_DEFS", ",", "STREAM_ITEM_VALUE_DEFS",...
Retrieve stream ids from :mod:`kvlayer`. Namely, it returns an iterator over all stream ids with the given docid. The docid should be an md5 hash of the document's abs_url. :param client: kvlayer client object :type client: :class:`kvlayer.AbstractStorage` :param str doc_id: doc id of documents to retrieve :return: generator of str
[ "Retrieve", "stream", "ids", "from", ":", "mod", ":", "kvlayer", "." ]
python
test
sam-cox/pytides
pytides/tide.py
https://github.com/sam-cox/pytides/blob/63a2507299002f1979ea55a17a82561158d685f7/pytides/tide.py#L229-L239
def _partition(hours, partition = 3600.0): """ Partition a sorted list of numbers (or in this case hours). Arguments: hours -- sorted ndarray of hours. partition -- maximum partition length (default: 3600.0) """ partition = float(partition) relative = hours - hours[0] total_partitions = np.ceil(relative[-1] / partition + 10*np.finfo(np.float).eps).astype('int') return [hours[np.floor(np.divide(relative, partition)) == i] for i in range(total_partitions)]
[ "def", "_partition", "(", "hours", ",", "partition", "=", "3600.0", ")", ":", "partition", "=", "float", "(", "partition", ")", "relative", "=", "hours", "-", "hours", "[", "0", "]", "total_partitions", "=", "np", ".", "ceil", "(", "relative", "[", "-"...
Partition a sorted list of numbers (or in this case hours). Arguments: hours -- sorted ndarray of hours. partition -- maximum partition length (default: 3600.0)
[ "Partition", "a", "sorted", "list", "of", "numbers", "(", "or", "in", "this", "case", "hours", ")", ".", "Arguments", ":", "hours", "--", "sorted", "ndarray", "of", "hours", ".", "partition", "--", "maximum", "partition", "length", "(", "default", ":", "...
python
train
StellarCN/py-stellar-base
stellar_base/builder.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/builder.py#L91-L105
def append_op(self, operation): """Append an :class:`Operation <stellar_base.operation.Operation>` to the list of operations. Add the operation specified if it doesn't already exist in the list of operations of this :class:`Builder` instance. :param operation: The operation to append to the list of operations. :type operation: :class:`Operation` :return: This builder instance. """ if operation not in self.ops: self.ops.append(operation) return self
[ "def", "append_op", "(", "self", ",", "operation", ")", ":", "if", "operation", "not", "in", "self", ".", "ops", ":", "self", ".", "ops", ".", "append", "(", "operation", ")", "return", "self" ]
Append an :class:`Operation <stellar_base.operation.Operation>` to the list of operations. Add the operation specified if it doesn't already exist in the list of operations of this :class:`Builder` instance. :param operation: The operation to append to the list of operations. :type operation: :class:`Operation` :return: This builder instance.
[ "Append", "an", ":", "class", ":", "Operation", "<stellar_base", ".", "operation", ".", "Operation", ">", "to", "the", "list", "of", "operations", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/scoped_variable_list.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/scoped_variable_list.py#L185-L196
def apply_new_scoped_variable_default_value(self, path, new_default_value_str): """Applies the new default value of the scoped variable defined by path :param str path: The path identifying the edited variable :param str new_default_value_str: New default value as string """ data_port_id = self.get_list_store_row_from_cursor_selection()[self.ID_STORAGE_ID] try: if str(self.model.state.scoped_variables[data_port_id].default_value) != new_default_value_str: self.model.state.scoped_variables[data_port_id].default_value = new_default_value_str except (TypeError, AttributeError) as e: logger.error("Error while changing default value: {0}".format(e))
[ "def", "apply_new_scoped_variable_default_value", "(", "self", ",", "path", ",", "new_default_value_str", ")", ":", "data_port_id", "=", "self", ".", "get_list_store_row_from_cursor_selection", "(", ")", "[", "self", ".", "ID_STORAGE_ID", "]", "try", ":", "if", "str...
Applies the new default value of the scoped variable defined by path :param str path: The path identifying the edited variable :param str new_default_value_str: New default value as string
[ "Applies", "the", "new", "default", "value", "of", "the", "scoped", "variable", "defined", "by", "path" ]
python
train
pschmitt/pyteleloisirs
pyteleloisirs/pyteleloisirs.py
https://github.com/pschmitt/pyteleloisirs/blob/d63610fd3729862455ac42afca440469f8063fba/pyteleloisirs/pyteleloisirs.py#L128-L144
def get_remaining_time(program): ''' Get the remaining time in seconds of a program that is currently on. ''' now = datetime.datetime.now() program_start = program.get('start_time') program_end = program.get('end_time') if not program_start or not program_end: _LOGGER.error('Could not determine program start and/or end times.') _LOGGER.debug('Program data: %s', program) return if now > program_end: _LOGGER.error('The provided program has already ended.') _LOGGER.debug('Program data: %s', program) return 0 progress = now - program_start return progress.seconds
[ "def", "get_remaining_time", "(", "program", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "program_start", "=", "program", ".", "get", "(", "'start_time'", ")", "program_end", "=", "program", ".", "get", "(", "'end_time'", ")", ...
Get the remaining time in seconds of a program that is currently on.
[ "Get", "the", "remaining", "time", "in", "seconds", "of", "a", "program", "that", "is", "currently", "on", "." ]
python
train
python-visualization/folium
folium/features.py
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/features.py#L469-L494
def process_data(self, data): """Convert an unknown data input into a geojson dictionary.""" if isinstance(data, dict): self.embed = True return data elif isinstance(data, str): if data.lower().startswith(('http:', 'ftp:', 'https:')): if not self.embed: self.embed_link = data return requests.get(data).json() elif data.lstrip()[0] in '[{': # This is a GeoJSON inline string self.embed = True return json.loads(data) else: # This is a filename if not self.embed: self.embed_link = data with open(data) as f: return json.loads(f.read()) elif hasattr(data, '__geo_interface__'): self.embed = True if hasattr(data, 'to_crs'): data = data.to_crs(epsg='4326') return json.loads(json.dumps(data.__geo_interface__)) else: raise ValueError('Cannot render objects with any missing geometries' ': {!r}'.format(data))
[ "def", "process_data", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "self", ".", "embed", "=", "True", "return", "data", "elif", "isinstance", "(", "data", ",", "str", ")", ":", "if", "data", ".", "low...
Convert an unknown data input into a geojson dictionary.
[ "Convert", "an", "unknown", "data", "input", "into", "a", "geojson", "dictionary", "." ]
python
train
BerkeleyAutomation/visualization
visualization/visualizer3d.py
https://github.com/BerkeleyAutomation/visualization/blob/f8d038cc65c78f841ef27f99fb2a638f44fa72b6/visualization/visualizer3d.py#L76-L106
def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer. """ v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) if clf: Visualizer3D.clf() return v.saved_frames
[ "def", "render", "(", "n_frames", "=", "1", ",", "axis", "=", "np", ".", "array", "(", "[", "0.", ",", "0.", ",", "1.", "]", ")", ",", "clf", "=", "True", ",", "*", "*", "kwargs", ")", ":", "v", "=", "SceneViewer", "(", "Visualizer3D", ".", "...
Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer.
[ "Render", "frames", "from", "the", "viewer", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L2387-L2397
def getActors(self): """Unpack a list of ``vtkActor`` objects from a ``vtkAssembly``.""" cl = vtk.vtkPropCollection() self.GetActors(cl) self.actors = [] cl.InitTraversal() for i in range(self.GetNumberOfPaths()): act = vtk.vtkActor.SafeDownCast(cl.GetNextProp()) if act.GetPickable(): self.actors.append(act) return self.actors
[ "def", "getActors", "(", "self", ")", ":", "cl", "=", "vtk", ".", "vtkPropCollection", "(", ")", "self", ".", "GetActors", "(", "cl", ")", "self", ".", "actors", "=", "[", "]", "cl", ".", "InitTraversal", "(", ")", "for", "i", "in", "range", "(", ...
Unpack a list of ``vtkActor`` objects from a ``vtkAssembly``.
[ "Unpack", "a", "list", "of", "vtkActor", "objects", "from", "a", "vtkAssembly", "." ]
python
train
Azure/azure-sdk-for-python
azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/operations/knowledgebase_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/operations/knowledgebase_operations.py#L285-L347
def update( self, kb_id, update_kb, custom_headers=None, raw=False, **operation_config): """Asynchronous operation to modify a knowledgebase. :param kb_id: Knowledgebase id. :type kb_id: str :param update_kb: Post body of the request. :type update_kb: ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: Operation or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>` """ # Construct URL url = self.update.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'kbId': self._serialize.url("kb_id", kb_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) # Construct body body_content = self._serialize.body(update_kb, 'UpdateKbOperationDTO') # Construct and send request request = self._client.patch(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [202]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None header_dict = {} if response.status_code == 202: deserialized = self._deserialize('Operation', response) header_dict = { 'Location': 'str', } if raw: client_raw_response = ClientRawResponse(deserialized, response) client_raw_response.add_headers(header_dict) return client_raw_response return deserialized
[ "def", "update", "(", "self", ",", "kb_id", ",", "update_kb", ",", "custom_headers", "=", "None", ",", "raw", "=", "False", ",", "*", "*", "operation_config", ")", ":", "# Construct URL", "url", "=", "self", ".", "update", ".", "metadata", "[", "'url'", ...
Asynchronous operation to modify a knowledgebase. :param kb_id: Knowledgebase id. :type kb_id: str :param update_kb: Post body of the request. :type update_kb: ~azure.cognitiveservices.knowledge.qnamaker.models.UpdateKbOperationDTO :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: Operation or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.knowledge.qnamaker.models.Operation or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.knowledge.qnamaker.models.ErrorResponseException>`
[ "Asynchronous", "operation", "to", "modify", "a", "knowledgebase", "." ]
python
test
DocNow/twarc
twarc/decorators.py
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/decorators.py#L81-L93
def catch_timeout(f): """ A decorator to handle read timeouts from Twitter. """ def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except (requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.ReadTimeoutError) as e: log.warning("caught read timeout: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
[ "def", "catch_timeout", "(", "f", ")", ":", "def", "new_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "requests", "."...
A decorator to handle read timeouts from Twitter.
[ "A", "decorator", "to", "handle", "read", "timeouts", "from", "Twitter", "." ]
python
train
foremast/foremast
src/foremast/securitygroup/destroy_sg/destroy_sg.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/securitygroup/destroy_sg/destroy_sg.py#L27-L52
def destroy_sg(app='', env='', region='', **_): """Destroy Security Group. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): Region name, e.g. us-east-1. Returns: True upon successful completion. """ vpc = get_vpc_id(account=env, region=region) url = '{api}/securityGroups/{env}/{region}/{app}'.format(api=API_URL, env=env, region=region, app=app) payload = {'vpcId': vpc} security_group = requests.get(url, params=payload, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) if not security_group: LOG.info('Nothing to delete.') else: LOG.info('Found Security Group in %(region)s: %(name)s', security_group) destroy_request = get_template('destroy/destroy_sg.json.j2', app=app, env=env, region=region, vpc=vpc) wait_for_task(destroy_request) return True
[ "def", "destroy_sg", "(", "app", "=", "''", ",", "env", "=", "''", ",", "region", "=", "''", ",", "*", "*", "_", ")", ":", "vpc", "=", "get_vpc_id", "(", "account", "=", "env", ",", "region", "=", "region", ")", "url", "=", "'{api}/securityGroups/{...
Destroy Security Group. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): Region name, e.g. us-east-1. Returns: True upon successful completion.
[ "Destroy", "Security", "Group", "." ]
python
train
Neurosim-lab/netpyne
netpyne/conversion/sonataImport.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/conversion/sonataImport.py#L103-L131
def fix_axon_peri(hobj): """Replace reconstructed axon with a stub :param hobj: hoc object """ for i,sec in enumerate(hobj.axon): hobj.axon[i] = None for i,sec in enumerate(hobj.all): if 'axon' in sec.name(): hobj.all[i] = None hobj.all = [sec for sec in hobj.all if sec is not None] hobj.axon = None #h.execute('create axon[2]', hobj) hobj.axon = [h.Section(name='axon[0]'), h.Section(name='axon[1]')] hobj.axonal = [] for sec in hobj.axon: sec.L = 30 sec.diam = 1 hobj.axonal.append(sec) hobj.all.append(sec) # need to remove this comment hobj.axon[0].connect(hobj.soma[0], 0.5, 0) hobj.axon[1].connect(hobj.axon[0], 1, 0) h.define_shape()
[ "def", "fix_axon_peri", "(", "hobj", ")", ":", "for", "i", ",", "sec", "in", "enumerate", "(", "hobj", ".", "axon", ")", ":", "hobj", ".", "axon", "[", "i", "]", "=", "None", "for", "i", ",", "sec", "in", "enumerate", "(", "hobj", ".", "all", "...
Replace reconstructed axon with a stub :param hobj: hoc object
[ "Replace", "reconstructed", "axon", "with", "a", "stub", ":", "param", "hobj", ":", "hoc", "object" ]
python
train
haikuginger/beekeeper
beekeeper/comms.py
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/comms.py#L152-L160
def mimetype(self): """ Get the Content-Type header from the response. Strip the ";charset=xxxxx" portion if necessary. If we can't find it, use the predefined format. """ if ';' in self.headers.get('Content-Type', ''): return self.headers['Content-Type'].split(';')[0] return self.headers.get('Content-Type', self.static_format)
[ "def", "mimetype", "(", "self", ")", ":", "if", "';'", "in", "self", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ":", "return", "self", ".", "headers", "[", "'Content-Type'", "]", ".", "split", "(", "';'", ")", "[", "0", "]",...
Get the Content-Type header from the response. Strip the ";charset=xxxxx" portion if necessary. If we can't find it, use the predefined format.
[ "Get", "the", "Content", "-", "Type", "header", "from", "the", "response", ".", "Strip", "the", ";", "charset", "=", "xxxxx", "portion", "if", "necessary", ".", "If", "we", "can", "t", "find", "it", "use", "the", "predefined", "format", "." ]
python
train
alfredodeza/remoto
remoto/connection.py
https://github.com/alfredodeza/remoto/blob/b7625e571a4b6c83f9589a1e9ad07354e42bf0d3/remoto/connection.py#L10-L48
def get(name, fallback='ssh'): """ Retrieve the matching backend class from a string. If no backend can be matched, it raises an error. >>> get('ssh') <class 'remoto.backends.BaseConnection'> >>> get() <class 'remoto.backends.BaseConnection'> >>> get('non-existent') <class 'remoto.backends.BaseConnection'> >>> get('non-existent', 'openshift') <class 'remoto.backends.openshift.OpenshiftConnection'> """ mapping = { 'ssh': ssh.SshConnection, 'oc': openshift.OpenshiftConnection, 'openshift': openshift.OpenshiftConnection, 'kubernetes': kubernetes.KubernetesConnection, 'k8s': kubernetes.KubernetesConnection, 'local': local.LocalConnection, 'popen': local.LocalConnection, 'localhost': local.LocalConnection, 'docker': docker.DockerConnection, 'podman': podman.PodmanConnection, } if not name: # fallsback to just plain local/ssh name = 'ssh' name = name.strip().lower() connection_class = mapping.get(name) if not connection_class: logger.warning('no connection backend found for: "%s"' % name) if fallback: logger.info('falling back to "%s"' % fallback) # this assumes that ``fallback`` is a valid mapping name return mapping.get(fallback) return connection_class
[ "def", "get", "(", "name", ",", "fallback", "=", "'ssh'", ")", ":", "mapping", "=", "{", "'ssh'", ":", "ssh", ".", "SshConnection", ",", "'oc'", ":", "openshift", ".", "OpenshiftConnection", ",", "'openshift'", ":", "openshift", ".", "OpenshiftConnection", ...
Retrieve the matching backend class from a string. If no backend can be matched, it raises an error. >>> get('ssh') <class 'remoto.backends.BaseConnection'> >>> get() <class 'remoto.backends.BaseConnection'> >>> get('non-existent') <class 'remoto.backends.BaseConnection'> >>> get('non-existent', 'openshift') <class 'remoto.backends.openshift.OpenshiftConnection'>
[ "Retrieve", "the", "matching", "backend", "class", "from", "a", "string", ".", "If", "no", "backend", "can", "be", "matched", "it", "raises", "an", "error", "." ]
python
train
CalebBell/thermo
thermo/eos_mix.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/eos_mix.py#L330-L353
def solve_T(self, P, V, quick=True): r'''Generic method to calculate `T` from a specified `P` and `V`. Provides SciPy's `newton` solver, and iterates to solve the general equation for `P`, recalculating `a_alpha` as a function of temperature using `a_alpha_and_derivatives` each iteration. Parameters ---------- P : float Pressure, [Pa] V : float Molar volume, [m^3/mol] quick : bool, optional Unimplemented, although it may be possible to derive explicit expressions as done for many pure-component EOS Returns ------- T : float Temperature, [K] ''' self.Tc = sum(self.Tcs)/self.N # -4 goes back from object, GCEOS return super(type(self).__mro__[-3], self).solve_T(P=P, V=V, quick=quick)
[ "def", "solve_T", "(", "self", ",", "P", ",", "V", ",", "quick", "=", "True", ")", ":", "self", ".", "Tc", "=", "sum", "(", "self", ".", "Tcs", ")", "/", "self", ".", "N", "# -4 goes back from object, GCEOS", "return", "super", "(", "type", "(", "s...
r'''Generic method to calculate `T` from a specified `P` and `V`. Provides SciPy's `newton` solver, and iterates to solve the general equation for `P`, recalculating `a_alpha` as a function of temperature using `a_alpha_and_derivatives` each iteration. Parameters ---------- P : float Pressure, [Pa] V : float Molar volume, [m^3/mol] quick : bool, optional Unimplemented, although it may be possible to derive explicit expressions as done for many pure-component EOS Returns ------- T : float Temperature, [K]
[ "r", "Generic", "method", "to", "calculate", "T", "from", "a", "specified", "P", "and", "V", ".", "Provides", "SciPy", "s", "newton", "solver", "and", "iterates", "to", "solve", "the", "general", "equation", "for", "P", "recalculating", "a_alpha", "as", "a...
python
valid
tmontaigu/pylas
pylas/lasreader.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L15-L21
def _raise_if_wrong_file_signature(stream): """ Reads the 4 first bytes of the stream to check that is LASF""" file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE)) if file_sig != headers.LAS_FILE_SIGNATURE: raise errors.PylasError( "File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE) )
[ "def", "_raise_if_wrong_file_signature", "(", "stream", ")", ":", "file_sig", "=", "stream", ".", "read", "(", "len", "(", "headers", ".", "LAS_FILE_SIGNATURE", ")", ")", "if", "file_sig", "!=", "headers", ".", "LAS_FILE_SIGNATURE", ":", "raise", "errors", "."...
Reads the 4 first bytes of the stream to check that is LASF
[ "Reads", "the", "4", "first", "bytes", "of", "the", "stream", "to", "check", "that", "is", "LASF" ]
python
test
ShawnClake/Apitax
apitax/ah/api/controllers/admins_controller.py
https://github.com/ShawnClake/Apitax/blob/2eb9c6990d4088b2503c7f13c2a76f8e59606e6d/apitax/ah/api/controllers/admins_controller.py#L54-L67
def system_status(): # noqa: E501 """Retrieve the system status Retrieve the system status # noqa: E501 :rtype: Response """ if(not hasAccess()): return redirectUnauthorized() body = State.config.serialize(["driver", "log", "log-file", "log-colorize"]) body.update({'debug': State.options.debug, 'sensitive': State.options.sensitive}) return Response(status=200, body=body)
[ "def", "system_status", "(", ")", ":", "# noqa: E501", "if", "(", "not", "hasAccess", "(", ")", ")", ":", "return", "redirectUnauthorized", "(", ")", "body", "=", "State", ".", "config", ".", "serialize", "(", "[", "\"driver\"", ",", "\"log\"", ",", "\"l...
Retrieve the system status Retrieve the system status # noqa: E501 :rtype: Response
[ "Retrieve", "the", "system", "status" ]
python
train
hydpy-dev/hydpy
hydpy/core/netcdftools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/netcdftools.py#L700-L709
def write(self) -> None: """Call method |NetCDFFile.write| of all handled |NetCDFFile| objects. """ if self.folders: init = hydpy.pub.timegrids.init timeunits = init.firstdate.to_cfunits('hours') timepoints = init.to_timepoints('hours') for folder in self.folders.values(): for file_ in folder.values(): file_.write(timeunits, timepoints)
[ "def", "write", "(", "self", ")", "->", "None", ":", "if", "self", ".", "folders", ":", "init", "=", "hydpy", ".", "pub", ".", "timegrids", ".", "init", "timeunits", "=", "init", ".", "firstdate", ".", "to_cfunits", "(", "'hours'", ")", "timepoints", ...
Call method |NetCDFFile.write| of all handled |NetCDFFile| objects.
[ "Call", "method", "|NetCDFFile", ".", "write|", "of", "all", "handled", "|NetCDFFile|", "objects", "." ]
python
train
allenai/allennlp
allennlp/common/checks.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/checks.py#L51-L71
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: """ Disambiguates single GPU and multiple GPU settings for cuda_device param. """ def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] elif len(strings) == 1: return int(strings[0]) else: return -1 if isinstance(cuda_device, str): return from_list(re.split(r',\s*', cuda_device)) elif isinstance(cuda_device, int): return cuda_device elif isinstance(cuda_device, list): return from_list(cuda_device) else: # TODO(brendanr): Determine why mypy can't tell that this matches the Union. return int(cuda_device)
[ "def", "parse_cuda_device", "(", "cuda_device", ":", "Union", "[", "str", ",", "int", ",", "List", "[", "int", "]", "]", ")", "->", "Union", "[", "int", ",", "List", "[", "int", "]", "]", ":", "def", "from_list", "(", "strings", ")", ":", "if", "...
Disambiguates single GPU and multiple GPU settings for cuda_device param.
[ "Disambiguates", "single", "GPU", "and", "multiple", "GPU", "settings", "for", "cuda_device", "param", "." ]
python
train
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L589-L605
def service_list(profile=None, **connection_args): ''' Return a list of available services (keystone services-list) CLI Example: .. code-block:: bash salt '*' keystone.service_list ''' kstone = auth(profile, **connection_args) ret = {} for service in kstone.services.list(): ret[service.name] = dict((value, getattr(service, value)) for value in dir(service) if not value.startswith('_') and isinstance(getattr(service, value), (six.string_types, dict, bool))) return ret
[ "def", "service_list", "(", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "ret", "=", "{", "}", "for", "service", "in", "kstone", ".", "services", ".", ...
Return a list of available services (keystone services-list) CLI Example: .. code-block:: bash salt '*' keystone.service_list
[ "Return", "a", "list", "of", "available", "services", "(", "keystone", "services", "-", "list", ")" ]
python
train
Arubacloud/pyArubaCloud
ArubaCloud/ReverseDns/ReverseDns.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L12-L21
def get(self, addresses): """ :type addresses: list[str] :param addresses: (list[str]) List of addresses to retrieve their reverse dns Retrieve the current configured ReverseDns entries :return: (list) List containing the current ReverseDns Addresses """ request = self._call(GetReverseDns.GetReverseDns, IPs=addresses) response = request.commit() return response['Value']
[ "def", "get", "(", "self", ",", "addresses", ")", ":", "request", "=", "self", ".", "_call", "(", "GetReverseDns", ".", "GetReverseDns", ",", "IPs", "=", "addresses", ")", "response", "=", "request", ".", "commit", "(", ")", "return", "response", "[", ...
:type addresses: list[str] :param addresses: (list[str]) List of addresses to retrieve their reverse dns Retrieve the current configured ReverseDns entries :return: (list) List containing the current ReverseDns Addresses
[ ":", "type", "addresses", ":", "list", "[", "str", "]", ":", "param", "addresses", ":", "(", "list", "[", "str", "]", ")", "List", "of", "addresses", "to", "retrieve", "their", "reverse", "dns", "Retrieve", "the", "current", "configured", "ReverseDns", "...
python
train
MakerReduxCorp/PLOD
PLOD/__init__.py
https://github.com/MakerReduxCorp/PLOD/blob/707502cd928e5be6bd5e46d7f6de7da0e188cf1e/PLOD/__init__.py#L195-L249
def upsert(self, key, value, entry): '''Update or Insert an entry into the list of dictionaries. If a dictionary in the list is found where key matches the value, then the FIRST matching list entry is replaced with entry else the entry is appended to the end of the list. The new entry is not examined in any way. It is, in fact, possible to upsert an entry that does not match the supplied key/value. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> entryA = {"name": "Willie", "age": 77} >>> myPLOD = PLOD(test) >>> print myPLOD.upsert("name", "Willie", entryA).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]}, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 77, income: None , name: 'Willie', wigs: None } ] >>> entryB = {"name": "Joe", "age": 20, "income": 30, "wigs": [3, 2, 9]} >>> print myPLOD.upsert("name", "Joe", entryB).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]}, {age: 20, income: 30, name: 'Joe' , wigs: [3, 2, 9]}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 77, income: None , name: 'Willie', wigs: None } ] :param key: The dictionary key to examine. :param value: The value to search for as referenced by the key. :param entry: The replacement (or new) entry for the list. :returns: class ''' index=internal.get_index(self.table, key, self.EQUAL, value) if index is None: self.index_track.append(len(self.table)) self.table.append(entry) else: self.table[index]=entry return self
[ "def", "upsert", "(", "self", ",", "key", ",", "value", ",", "entry", ")", ":", "index", "=", "internal", ".", "get_index", "(", "self", ".", "table", ",", "key", ",", "self", ".", "EQUAL", ",", "value", ")", "if", "index", "is", "None", ":", "se...
Update or Insert an entry into the list of dictionaries. If a dictionary in the list is found where key matches the value, then the FIRST matching list entry is replaced with entry else the entry is appended to the end of the list. The new entry is not examined in any way. It is, in fact, possible to upsert an entry that does not match the supplied key/value. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> entryA = {"name": "Willie", "age": 77} >>> myPLOD = PLOD(test) >>> print myPLOD.upsert("name", "Willie", entryA).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]}, {age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 77, income: None , name: 'Willie', wigs: None } ] >>> entryB = {"name": "Joe", "age": 20, "income": 30, "wigs": [3, 2, 9]} >>> print myPLOD.upsert("name", "Joe", entryB).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 18, income: None , name: 'Larry' , wigs: [3, 2, 9]}, {age: 20, income: 30, name: 'Joe' , wigs: [3, 2, 9]}, {age: 19, income: 29000, name: 'Bill' , wigs: None }, {age: 77, income: None , name: 'Willie', wigs: None } ] :param key: The dictionary key to examine. :param value: The value to search for as referenced by the key. :param entry: The replacement (or new) entry for the list. :returns: class
[ "Update", "or", "Insert", "an", "entry", "into", "the", "list", "of", "dictionaries", "." ]
python
train
auth0/auth0-python
auth0/v3/management/users.py
https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/users.py#L184-L193
def get_guardian_enrollments(self, user_id): """Retrieves all Guardian enrollments. Args: user_id (str): The user_id of the user to retrieve See: https://auth0.com/docs/api/management/v2#!/Users/get_enrollments """ url = self._url('{}/enrollments'.format(user_id)) return self.client.get(url)
[ "def", "get_guardian_enrollments", "(", "self", ",", "user_id", ")", ":", "url", "=", "self", ".", "_url", "(", "'{}/enrollments'", ".", "format", "(", "user_id", ")", ")", "return", "self", ".", "client", ".", "get", "(", "url", ")" ]
Retrieves all Guardian enrollments. Args: user_id (str): The user_id of the user to retrieve See: https://auth0.com/docs/api/management/v2#!/Users/get_enrollments
[ "Retrieves", "all", "Guardian", "enrollments", "." ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_misc.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_misc.py#L314-L320
def cmd_devid(self, args): '''decode device IDs from parameters''' for p in self.mav_param.keys(): if p.startswith('COMPASS_DEV_ID'): mp_util.decode_devid(self.mav_param[p], p) if p.startswith('INS_') and p.endswith('_ID'): mp_util.decode_devid(self.mav_param[p], p)
[ "def", "cmd_devid", "(", "self", ",", "args", ")", ":", "for", "p", "in", "self", ".", "mav_param", ".", "keys", "(", ")", ":", "if", "p", ".", "startswith", "(", "'COMPASS_DEV_ID'", ")", ":", "mp_util", ".", "decode_devid", "(", "self", ".", "mav_pa...
decode device IDs from parameters
[ "decode", "device", "IDs", "from", "parameters" ]
python
train
adewes/blitzdb
blitzdb/backends/file/index.py
https://github.com/adewes/blitzdb/blob/4b459e0bcde9e1f6224dd4e3bea74194586864b0/blitzdb/backends/file/index.py#L193-L210
def save_to_data(self, in_place=False): """Save index to data structure. :param in_place: Do not copy index value to a new list object :type in_place: bool :return: Index data structure :rtype: list """ if in_place: return [ list(self._index.items()), list(self._undefined_keys.keys()) ] return ( [(key, values[:]) for key, values in self._index.items()], list(self._undefined_keys.keys()), )
[ "def", "save_to_data", "(", "self", ",", "in_place", "=", "False", ")", ":", "if", "in_place", ":", "return", "[", "list", "(", "self", ".", "_index", ".", "items", "(", ")", ")", ",", "list", "(", "self", ".", "_undefined_keys", ".", "keys", "(", ...
Save index to data structure. :param in_place: Do not copy index value to a new list object :type in_place: bool :return: Index data structure :rtype: list
[ "Save", "index", "to", "data", "structure", "." ]
python
train
delph-in/pydelphin
delphin/mrs/vpm.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/vpm.py#L151-L173
def _valmatch(vs, ss, op, varsort, semi, section): """ Return `True` if for every paired *v* and *s* from *vs* and *ss*: v <> s (subsumption or equality if *semi* is `None`) v == s (equality) s == '*' s == '!' and v == `None` s == '[xyz]' and varsort == 'xyz' """ if op in _EQUAL_OPS or semi is None: return all( s == v or # value equality (s == '*' and v is not None) or # non-null wildcard ( v is None and ( # value is null (any or with matching varsort) s == '!' or (s[0], s[-1], s[1:-1]) == ('[', ']', varsort) ) ) for v, s in zip(vs, ss) ) else: pass
[ "def", "_valmatch", "(", "vs", ",", "ss", ",", "op", ",", "varsort", ",", "semi", ",", "section", ")", ":", "if", "op", "in", "_EQUAL_OPS", "or", "semi", "is", "None", ":", "return", "all", "(", "s", "==", "v", "or", "# value equality", "(", "s", ...
Return `True` if for every paired *v* and *s* from *vs* and *ss*: v <> s (subsumption or equality if *semi* is `None`) v == s (equality) s == '*' s == '!' and v == `None` s == '[xyz]' and varsort == 'xyz'
[ "Return", "True", "if", "for", "every", "paired", "*", "v", "*", "and", "*", "s", "*", "from", "*", "vs", "*", "and", "*", "ss", "*", ":", "v", "<", ">", "s", "(", "subsumption", "or", "equality", "if", "*", "semi", "*", "is", "None", ")", "v...
python
train
spdx/tools-python
spdx/parsers/tagvaluebuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L1062-L1072
def set_lic_id(self, doc, lic_id): """Adds a new extracted license to the document. Raises SPDXValueError if data format is incorrect. """ # FIXME: this state does not make sense self.reset_extr_lics() if validations.validate_extracted_lic_id(lic_id): doc.add_extr_lic(document.ExtractedLicense(lic_id)) return True else: raise SPDXValueError('ExtractedLicense::id')
[ "def", "set_lic_id", "(", "self", ",", "doc", ",", "lic_id", ")", ":", "# FIXME: this state does not make sense", "self", ".", "reset_extr_lics", "(", ")", "if", "validations", ".", "validate_extracted_lic_id", "(", "lic_id", ")", ":", "doc", ".", "add_extr_lic", ...
Adds a new extracted license to the document. Raises SPDXValueError if data format is incorrect.
[ "Adds", "a", "new", "extracted", "license", "to", "the", "document", ".", "Raises", "SPDXValueError", "if", "data", "format", "is", "incorrect", "." ]
python
valid
PythonicNinja/pydrill
pydrill/client/__init__.py
https://github.com/PythonicNinja/pydrill/blob/0713e78c84d44cd438018e4ba1588a8e242f78c4/pydrill/client/__init__.py#L68-L75
def plan(self, sql, timeout=10): """ :param sql: string :param timeout: int :return: pydrill.client.ResultQuery """ sql = 'explain plan for ' + sql return self.query(sql, timeout)
[ "def", "plan", "(", "self", ",", "sql", ",", "timeout", "=", "10", ")", ":", "sql", "=", "'explain plan for '", "+", "sql", "return", "self", ".", "query", "(", "sql", ",", "timeout", ")" ]
:param sql: string :param timeout: int :return: pydrill.client.ResultQuery
[ ":", "param", "sql", ":", "string", ":", "param", "timeout", ":", "int", ":", "return", ":", "pydrill", ".", "client", ".", "ResultQuery" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/block.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/block.py#L262-L304
def order_duplicate_volume(self, origin_volume_id, origin_snapshot_id=None, duplicate_size=None, duplicate_iops=None, duplicate_tier_level=None, duplicate_snapshot_size=None, hourly_billing_flag=False): """Places an order for a duplicate block volume. :param origin_volume_id: The ID of the origin volume to be duplicated :param origin_snapshot_id: Origin snapshot ID to use for duplication :param duplicate_size: Size/capacity for the duplicate volume :param duplicate_iops: The IOPS per GB for the duplicate volume :param duplicate_tier_level: Tier level for the duplicate volume :param duplicate_snapshot_size: Snapshot space size for the duplicate :param hourly_billing_flag: Billing type, monthly (False) or hourly (True), default to monthly. :return: Returns a SoftLayer_Container_Product_Order_Receipt """ block_mask = 'id,billingItem[location,hourlyFlag],snapshotCapacityGb,'\ 'storageType[keyName],capacityGb,originalVolumeSize,'\ 'provisionedIops,storageTierLevel,osType[keyName],'\ 'staasVersion,hasEncryptionAtRest' origin_volume = self.get_block_volume_details(origin_volume_id, mask=block_mask) if isinstance(utils.lookup(origin_volume, 'osType', 'keyName'), str): os_type = origin_volume['osType']['keyName'] else: raise exceptions.SoftLayerError( "Cannot find origin volume's os-type") order = storage_utils.prepare_duplicate_order_object( self, origin_volume, duplicate_iops, duplicate_tier_level, duplicate_size, duplicate_snapshot_size, 'block', hourly_billing_flag ) order['osFormatType'] = {'keyName': os_type} if origin_snapshot_id is not None: order['duplicateOriginSnapshotId'] = origin_snapshot_id return self.client.call('Product_Order', 'placeOrder', order)
[ "def", "order_duplicate_volume", "(", "self", ",", "origin_volume_id", ",", "origin_snapshot_id", "=", "None", ",", "duplicate_size", "=", "None", ",", "duplicate_iops", "=", "None", ",", "duplicate_tier_level", "=", "None", ",", "duplicate_snapshot_size", "=", "Non...
Places an order for a duplicate block volume. :param origin_volume_id: The ID of the origin volume to be duplicated :param origin_snapshot_id: Origin snapshot ID to use for duplication :param duplicate_size: Size/capacity for the duplicate volume :param duplicate_iops: The IOPS per GB for the duplicate volume :param duplicate_tier_level: Tier level for the duplicate volume :param duplicate_snapshot_size: Snapshot space size for the duplicate :param hourly_billing_flag: Billing type, monthly (False) or hourly (True), default to monthly. :return: Returns a SoftLayer_Container_Product_Order_Receipt
[ "Places", "an", "order", "for", "a", "duplicate", "block", "volume", "." ]
python
train
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3134-L3140
def lost_master_primary(self): """ Schedule an primary connection check which in turn can send a view change message """ self.primaries_disconnection_times[self.master_replica.instId] = time.perf_counter() self._schedule_view_change()
[ "def", "lost_master_primary", "(", "self", ")", ":", "self", ".", "primaries_disconnection_times", "[", "self", ".", "master_replica", ".", "instId", "]", "=", "time", ".", "perf_counter", "(", ")", "self", ".", "_schedule_view_change", "(", ")" ]
Schedule an primary connection check which in turn can send a view change message
[ "Schedule", "an", "primary", "connection", "check", "which", "in", "turn", "can", "send", "a", "view", "change", "message" ]
python
train
pazz/alot
alot/helper.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/helper.py#L440-L488
def mimewrap(path, filename=None, ctype=None): """Take the contents of the given path and wrap them into an email MIME part according to the content type. The content type is auto detected from the actual file contents and the file name if it is not given. :param path: the path to the file contents :type path: str :param filename: the file name to use in the generated MIME part :type filename: str or None :param ctype: the content type of the file contents in path :type ctype: str or None :returns: the message MIME part storing the data from path :rtype: subclasses of email.mime.base.MIMEBase """ with open(path, 'rb') as f: content = f.read() if not ctype: ctype = guess_mimetype(content) # libmagic < 5.12 incorrectly detects excel/powerpoint files as # 'application/msword' (see #179 and #186 in libmagic bugtracker) # This is a workaround, based on file extension, useful as long # as distributions still ship libmagic 5.11. if (ctype == 'application/msword' and not libmagic_version_at_least(513)): mimetype, _ = mimetypes.guess_type(path) if mimetype: ctype = mimetype maintype, subtype = ctype.split('/', 1) if maintype == 'text': part = MIMEText(content.decode(guess_encoding(content), 'replace'), _subtype=subtype, _charset='utf-8') elif maintype == 'image': part = MIMEImage(content, _subtype=subtype) elif maintype == 'audio': part = MIMEAudio(content, _subtype=subtype) else: part = MIMEBase(maintype, subtype) part.set_payload(content) # Encode the payload using Base64 email.encoders.encode_base64(part) # Set the filename parameter if not filename: filename = os.path.basename(path) part.add_header('Content-Disposition', 'attachment', filename=filename) return part
[ "def", "mimewrap", "(", "path", ",", "filename", "=", "None", ",", "ctype", "=", "None", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "if", "not", "ctype", ":", "ctype", "=...
Take the contents of the given path and wrap them into an email MIME part according to the content type. The content type is auto detected from the actual file contents and the file name if it is not given. :param path: the path to the file contents :type path: str :param filename: the file name to use in the generated MIME part :type filename: str or None :param ctype: the content type of the file contents in path :type ctype: str or None :returns: the message MIME part storing the data from path :rtype: subclasses of email.mime.base.MIMEBase
[ "Take", "the", "contents", "of", "the", "given", "path", "and", "wrap", "them", "into", "an", "email", "MIME", "part", "according", "to", "the", "content", "type", ".", "The", "content", "type", "is", "auto", "detected", "from", "the", "actual", "file", ...
python
train
horazont/aioxmpp
aioxmpp/security_layer.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/security_layer.py#L541-L549
def pin(self, hostname, x509): """ Pin an :class:`OpenSSL.crypto.X509` object `x509` for use with the given `hostname`. Which information exactly is used to identify the certificate depends :meth:`_x509_key`. """ key = self._x509_key(x509) self._storage.setdefault(hostname, set()).add(key)
[ "def", "pin", "(", "self", ",", "hostname", ",", "x509", ")", ":", "key", "=", "self", ".", "_x509_key", "(", "x509", ")", "self", ".", "_storage", ".", "setdefault", "(", "hostname", ",", "set", "(", ")", ")", ".", "add", "(", "key", ")" ]
Pin an :class:`OpenSSL.crypto.X509` object `x509` for use with the given `hostname`. Which information exactly is used to identify the certificate depends :meth:`_x509_key`.
[ "Pin", "an", ":", "class", ":", "OpenSSL", ".", "crypto", ".", "X509", "object", "x509", "for", "use", "with", "the", "given", "hostname", ".", "Which", "information", "exactly", "is", "used", "to", "identify", "the", "certificate", "depends", ":", "meth",...
python
train
smarie/python-autoclass
autoclass/autoprops_.py
https://github.com/smarie/python-autoclass/blob/097098776c69ebc87bc1aeda6997431b29bd583a/autoclass/autoprops_.py#L281-L338
def _get_setter_fun(object_type, # type: Type parameter, # type: Parameter private_property_name # type: str ): """ Utility method to find the overridden setter function for a given property, or generate a new one :param object_type: :param property_name: :param property_type: :param private_property_name: :return: """ # the property will have the same name than the constructor argument property_name = parameter.name overridden_setters = getmembers(object_type, _has_annotation(__SETTER_OVERRIDE_ANNOTATION, property_name)) if len(overridden_setters) > 0: # --check that we only have one if len(overridden_setters) > 1: raise DuplicateOverrideError('Setter is overridden more than once for attribute name : %s' % property_name) # --use the overridden setter setter_fun = overridden_setters[0][1] try: # python 2 setter_fun = setter_fun.im_func except AttributeError: pass # --find the parameter name and check the signature s = signature(setter_fun) p = [attribute_name for attribute_name, param in s.parameters.items() if attribute_name is not 'self'] if len(p) != 1: try: qname = setter_fun.__qualname__ except AttributeError: qname = setter_fun.__name__ raise IllegalSetterSignatureException('overridden setter must have only 1 non-self argument, found ' + '%s for function %s' '' % (len(s.parameters.items()) - 1, qname)) var_name = p[0] else: # --create the setter, equivalent of: # ** Dynamically compile a wrapper with correct argument name ** sig = Signature(parameters=[Parameter('self', kind=Parameter.POSITIONAL_OR_KEYWORD), parameter]) @with_signature(sig) def autoprops_generated_setter(self, **kwargs): setattr(self, private_property_name, kwargs.popitem()[1]) setter_fun = autoprops_generated_setter var_name = property_name return setter_fun, var_name
[ "def", "_get_setter_fun", "(", "object_type", ",", "# type: Type", "parameter", ",", "# type: Parameter", "private_property_name", "# type: str", ")", ":", "# the property will have the same name than the constructor argument", "property_name", "=", "parameter", ".", "name", "o...
Utility method to find the overridden setter function for a given property, or generate a new one :param object_type: :param property_name: :param property_type: :param private_property_name: :return:
[ "Utility", "method", "to", "find", "the", "overridden", "setter", "function", "for", "a", "given", "property", "or", "generate", "a", "new", "one" ]
python
train
pantsbuild/pants
src/python/pants/build_graph/build_graph.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_graph.py#L169-L180
def dependents_of(self, address): """Returns the addresses of the targets that depend on the target at `address`. This method asserts that the address given is actually in the BuildGraph. :API: public """ assert address in self._target_by_address, ( 'Cannot retrieve dependents of {address} because it is not in the BuildGraph.' .format(address=address) ) return self._target_dependees_by_address[address]
[ "def", "dependents_of", "(", "self", ",", "address", ")", ":", "assert", "address", "in", "self", ".", "_target_by_address", ",", "(", "'Cannot retrieve dependents of {address} because it is not in the BuildGraph.'", ".", "format", "(", "address", "=", "address", ")", ...
Returns the addresses of the targets that depend on the target at `address`. This method asserts that the address given is actually in the BuildGraph. :API: public
[ "Returns", "the", "addresses", "of", "the", "targets", "that", "depend", "on", "the", "target", "at", "address", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/stanzaprocessor.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/stanzaprocessor.py#L240-L275
def __try_handlers(self, handler_list, stanza, stanza_type = None): """ Search the handler list for handlers matching given stanza type and payload namespace. Run the handlers found ordering them by priority until the first one which returns `True`. :Parameters: - `handler_list`: list of available handlers - `stanza`: the stanza to handle - `stanza_type`: stanza type override (value of its "type" attribute) :return: result of the last handler or `False` if no handler was found. """ # pylint: disable=W0212 if stanza_type is None: stanza_type = stanza.stanza_type payload = stanza.get_all_payload() classes = [p.__class__ for p in payload] keys = [(p.__class__, p.handler_key) for p in payload] for handler in handler_list: type_filter = handler._pyxmpp_stanza_handled[1] class_filter = handler._pyxmpp_payload_class_handled extra_filter = handler._pyxmpp_payload_key if type_filter != stanza_type: continue if class_filter: if extra_filter is None and class_filter not in classes: continue if extra_filter and (class_filter, extra_filter) not in keys: continue response = handler(stanza) if self._process_handler_result(response): return True return False
[ "def", "__try_handlers", "(", "self", ",", "handler_list", ",", "stanza", ",", "stanza_type", "=", "None", ")", ":", "# pylint: disable=W0212", "if", "stanza_type", "is", "None", ":", "stanza_type", "=", "stanza", ".", "stanza_type", "payload", "=", "stanza", ...
Search the handler list for handlers matching given stanza type and payload namespace. Run the handlers found ordering them by priority until the first one which returns `True`. :Parameters: - `handler_list`: list of available handlers - `stanza`: the stanza to handle - `stanza_type`: stanza type override (value of its "type" attribute) :return: result of the last handler or `False` if no handler was found.
[ "Search", "the", "handler", "list", "for", "handlers", "matching", "given", "stanza", "type", "and", "payload", "namespace", ".", "Run", "the", "handlers", "found", "ordering", "them", "by", "priority", "until", "the", "first", "one", "which", "returns", "True...
python
valid
MolSSI-BSE/basis_set_exchange
basis_set_exchange/cli/bse_handlers.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bse_handlers.py#L75-L89
def _bse_cli_get_basis(args): '''Handles the get-basis subcommand''' return api.get_basis( name=args.basis, elements=args.elements, version=args.version, fmt=args.fmt, uncontract_general=args.unc_gen, uncontract_spdf=args.unc_spdf, uncontract_segmented=args.unc_seg, make_general=args.make_gen, optimize_general=args.opt_gen, data_dir=args.data_dir, header=not args.noheader)
[ "def", "_bse_cli_get_basis", "(", "args", ")", ":", "return", "api", ".", "get_basis", "(", "name", "=", "args", ".", "basis", ",", "elements", "=", "args", ".", "elements", ",", "version", "=", "args", ".", "version", ",", "fmt", "=", "args", ".", "...
Handles the get-basis subcommand
[ "Handles", "the", "get", "-", "basis", "subcommand" ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/hpc.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L61-L69
def _load_custom_config(run_config): """Load custom configuration input HOCON file for cromwell. """ from pyhocon import ConfigFactory, HOCONConverter, ConfigTree conf = ConfigFactory.parse_file(run_config) out = {} if "database" in conf: out["database"] = HOCONConverter.to_hocon(ConfigTree({"database": conf.get_config("database")})) return out
[ "def", "_load_custom_config", "(", "run_config", ")", ":", "from", "pyhocon", "import", "ConfigFactory", ",", "HOCONConverter", ",", "ConfigTree", "conf", "=", "ConfigFactory", ".", "parse_file", "(", "run_config", ")", "out", "=", "{", "}", "if", "\"database\""...
Load custom configuration input HOCON file for cromwell.
[ "Load", "custom", "configuration", "input", "HOCON", "file", "for", "cromwell", "." ]
python
train
etcher-be/emiz
emiz/weather/mission_weather/mission_weather.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mission_weather/mission_weather.py#L119-L130
def _gauss(mean: int, sigma: int) -> int: """ Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value """ return int(random.gauss(mean, sigma))
[ "def", "_gauss", "(", "mean", ":", "int", ",", "sigma", ":", "int", ")", "->", "int", ":", "return", "int", "(", "random", ".", "gauss", "(", "mean", ",", "sigma", ")", ")" ]
Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value
[ "Creates", "a", "variation", "from", "a", "base", "value" ]
python
train
apache/incubator-mxnet
example/ssd/symbol/common.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L21-L55
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="{}_conv".format(name)) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}_{}".format(name, act_type)) return relu
[ "def", "conv_act_layer", "(", "from_layer", ",", "name", ",", "num_filter", ",", "kernel", "=", "(", "1", ",", "1", ")", ",", "pad", "=", "(", "0", ",", "0", ")", ",", "stride", "=", "(", "1", ",", "1", ")", ",", "act_type", "=", "\"relu\"", ",...
wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols
[ "wrapper", "for", "a", "small", "Convolution", "group" ]
python
train
WhereSoftwareGoesToDie/pymarquise
marquise/marquise.py
https://github.com/WhereSoftwareGoesToDie/pymarquise/blob/67e52df70c50ed53ad315a64fea430a9567e2b1b/marquise/marquise.py#L64-L82
def close(self): """Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted. """ if self.marquise_ctx is None: self.__debug("Marquise handle is already closed, will do nothing.") # Multiple close() calls are okay. return self.__debug("Shutting down Marquise handle spooling to %s and %s" % (self.spool_path_points, self.spool_path_contents)) # At the time of writing this always succeeds (returns 0). MARQUISE_SHUTDOWN(self.marquise_ctx) # Signal that our context is no longer valid. self.marquise_ctx = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "marquise_ctx", "is", "None", ":", "self", ".", "__debug", "(", "\"Marquise handle is already closed, will do nothing.\"", ")", "# Multiple close() calls are okay.", "return", "self", ".", "__debug", "(", "\"...
Close the Marquise context, ensuring data is flushed and spool files are closed. This should always be closed explicitly, as there's no guarantees that it will happen when the instance is deleted.
[ "Close", "the", "Marquise", "context", "ensuring", "data", "is", "flushed", "and", "spool", "files", "are", "closed", "." ]
python
train