nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/bert/run_classifier.py
python
DataProcessor.get_labels
(self)
Gets the list of labels for this data set.
Gets the list of labels for this data set.
[ "Gets", "the", "list", "of", "labels", "for", "this", "data", "set", "." ]
def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError()
[ "def", "get_labels", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/bert/run_classifier.py#L197-L199
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/ultimatelistctrl.py
python
UltimateListItemAttr.GetFont
(self)
return self._font
Returns the currently set item font.
Returns the currently set item font.
[ "Returns", "the", "currently", "set", "item", "font", "." ]
def GetFont(self): """ Returns the currently set item font. """ return self._font
[ "def", "GetFont", "(", "self", ")", ":", "return", "self", ".", "_font" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ultimatelistctrl.py#L1308-L1311
pydoit/doit
cf7edfbe73fafebd1b2a6f1d3be8b69fde41383d
doc/samples/task_name.py
python
task_hello
()
return { 'actions': ['echo hello'] }
say hello
say hello
[ "say", "hello" ]
def task_hello(): """say hello""" return { 'actions': ['echo hello'] }
[ "def", "task_hello", "(", ")", ":", "return", "{", "'actions'", ":", "[", "'echo hello'", "]", "}" ]
https://github.com/pydoit/doit/blob/cf7edfbe73fafebd1b2a6f1d3be8b69fde41383d/doc/samples/task_name.py#L1-L5
pydata/xarray
9226c7ac87b3eb246f7a7e49f8f0f23d68951624
xarray/core/rolling.py
python
Coarsen.construct
( self, window_dim=None, keep_attrs=None, **window_dim_kwargs, )
Convert this Coarsen object to a DataArray or Dataset, where the coarsening dimension is split or reshaped to two new dimensions. Parameters ---------- window_dim: mapping A mapping from existing dimension name to new dimension names. The size of the second dimension will be the length of the coarsening window. keep_attrs: bool, optional Preserve attributes if True **window_dim_kwargs : {dim: new_name, ...} The keyword arguments form of ``window_dim``. Returns ------- Dataset or DataArray with reshaped dimensions Examples -------- >>> da = xr.DataArray(np.arange(24), dims="time") >>> da.coarsen(time=12).construct(time=("year", "month")) <xarray.DataArray (year: 2, month: 12)> array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]]) Dimensions without coordinates: year, month See Also -------- DataArrayRolling.construct DatasetRolling.construct
Convert this Coarsen object to a DataArray or Dataset, where the coarsening dimension is split or reshaped to two new dimensions.
[ "Convert", "this", "Coarsen", "object", "to", "a", "DataArray", "or", "Dataset", "where", "the", "coarsening", "dimension", "is", "split", "or", "reshaped", "to", "two", "new", "dimensions", "." ]
def construct( self, window_dim=None, keep_attrs=None, **window_dim_kwargs, ): """ Convert this Coarsen object to a DataArray or Dataset, where the coarsening dimension is split or reshaped to two new dimensions. Parameters ---------- window_dim: mapping A mapping from existing dimension name to new dimension names. The size of the second dimension will be the length of the coarsening window. keep_attrs: bool, optional Preserve attributes if True **window_dim_kwargs : {dim: new_name, ...} The keyword arguments form of ``window_dim``. Returns ------- Dataset or DataArray with reshaped dimensions Examples -------- >>> da = xr.DataArray(np.arange(24), dims="time") >>> da.coarsen(time=12).construct(time=("year", "month")) <xarray.DataArray (year: 2, month: 12)> array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]]) Dimensions without coordinates: year, month See Also -------- DataArrayRolling.construct DatasetRolling.construct """ from .dataarray import DataArray from .dataset import Dataset window_dim = either_dict_or_kwargs( window_dim, window_dim_kwargs, "Coarsen.construct" ) if not window_dim: raise ValueError( "Either window_dim or window_dim_kwargs need to be specified." ) bad_new_dims = tuple( win for win, dims in window_dim.items() if len(dims) != 2 or isinstance(dims, str) ) if bad_new_dims: raise ValueError( f"Please provide exactly two dimension names for the following coarsening dimensions: {bad_new_dims}" ) if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) missing_dims = set(window_dim) - set(self.windows) if missing_dims: raise ValueError( f"'window_dim' must contain entries for all dimensions to coarsen. Missing {missing_dims}" ) extra_windows = set(self.windows) - set(window_dim) if extra_windows: raise ValueError( f"'window_dim' includes dimensions that will not be coarsened: {extra_windows}" ) reshaped = Dataset() if isinstance(self.obj, DataArray): obj = self.obj._to_temp_dataset() else: obj = self.obj reshaped.attrs = obj.attrs if keep_attrs else {} for key, var in obj.variables.items(): reshaped_dims = tuple( itertools.chain(*[window_dim.get(dim, [dim]) for dim in list(var.dims)]) ) if reshaped_dims != var.dims: windows = {w: self.windows[w] for w in window_dim if w in var.dims} reshaped_var, _ = var.coarsen_reshape(windows, self.boundary, self.side) attrs = var.attrs if keep_attrs else {} reshaped[key] = (reshaped_dims, reshaped_var, attrs) else: reshaped[key] = var should_be_coords = set(window_dim) & set(self.obj.coords) result = reshaped.set_coords(should_be_coords) if isinstance(self.obj, DataArray): return self.obj._from_temp_dataset(result) else: return result
[ "def", "construct", "(", "self", ",", "window_dim", "=", "None", ",", "keep_attrs", "=", "None", ",", "*", "*", "window_dim_kwargs", ",", ")", ":", "from", ".", "dataarray", "import", "DataArray", "from", ".", "dataset", "import", "Dataset", "window_dim", ...
https://github.com/pydata/xarray/blob/9226c7ac87b3eb246f7a7e49f8f0f23d68951624/xarray/core/rolling.py#L814-L915
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/selenium/navigates_galaxy.py
python
NavigatesGalaxy.local_storage
(self, key: str, value: Union[float, str])
Method decorator to modify localStorage for the scope of the supplied context.
Method decorator to modify localStorage for the scope of the supplied context.
[ "Method", "decorator", "to", "modify", "localStorage", "for", "the", "scope", "of", "the", "supplied", "context", "." ]
def local_storage(self, key: str, value: Union[float, str]): """Method decorator to modify localStorage for the scope of the supplied context.""" self.driver.execute_script(f'''window.localStorage.setItem("{key}", {value});''') try: yield finally: self.driver.execute_script(f'''window.localStorage.removeItem("{key}");''')
[ "def", "local_storage", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Union", "[", "float", ",", "str", "]", ")", ":", "self", ".", "driver", ".", "execute_script", "(", "f'''window.localStorage.setItem(\"{key}\", {value});'''", ")", "try", ":", "...
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/selenium/navigates_galaxy.py#L228-L234
wistbean/learn_python3_spider
73c873f4845f4385f097e5057407d03dd37a117b
stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/ssh/service.py
python
SSHService.serviceStarted
(self)
called when the service is active on the transport.
called when the service is active on the transport.
[ "called", "when", "the", "service", "is", "active", "on", "the", "transport", "." ]
def serviceStarted(self): """ called when the service is active on the transport. """
[ "def", "serviceStarted", "(", "self", ")", ":" ]
https://github.com/wistbean/learn_python3_spider/blob/73c873f4845f4385f097e5057407d03dd37a117b/stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/ssh/service.py#L20-L23
kanzure/nanoengineer
874e4c9f8a9190f093625b267f9767e19f82e6c4
cad/src/graphics/display_styles/DnaCylinderChunks.py
python
DnaCylinderChunks.drawchunk
(self, glpane, chunk, memo, highlighted)
Draw chunk in glpane in the whole-chunk display mode represented by this ChunkDisplayMode subclass. Assume we're already in chunk's local coordinate system (i.e. do all drawing using atom coordinates in chunk.basepos, not chunk.atpos). If highlighted is true, draw it in hover-highlighted form (but note that it may have already been drawn in unhighlighted form in the same frame, so normally the highlighted form should augment or obscure the unhighlighted form). Draw it as unselected, whether or not chunk.picked is true. See also self.drawchunk_selection_frame. (The reason that's a separate method is to permit future drawing optimizations when a chunk is selected or deselected but does not otherwise change in appearance or position.) If this drawing requires info about chunk which it is useful to precompute (as an optimization), that info should be computed by our compute_memo method and will be passed as the memo argument (whose format and content is whatever self.compute_memo returns). That info must not depend on the highlighted variable or on whether the chunk is selected.
Draw chunk in glpane in the whole-chunk display mode represented by this ChunkDisplayMode subclass.
[ "Draw", "chunk", "in", "glpane", "in", "the", "whole", "-", "chunk", "display", "mode", "represented", "by", "this", "ChunkDisplayMode", "subclass", "." ]
def drawchunk(self, glpane, chunk, memo, highlighted): """ Draw chunk in glpane in the whole-chunk display mode represented by this ChunkDisplayMode subclass. Assume we're already in chunk's local coordinate system (i.e. do all drawing using atom coordinates in chunk.basepos, not chunk.atpos). If highlighted is true, draw it in hover-highlighted form (but note that it may have already been drawn in unhighlighted form in the same frame, so normally the highlighted form should augment or obscure the unhighlighted form). Draw it as unselected, whether or not chunk.picked is true. See also self.drawchunk_selection_frame. (The reason that's a separate method is to permit future drawing optimizations when a chunk is selected or deselected but does not otherwise change in appearance or position.) If this drawing requires info about chunk which it is useful to precompute (as an optimization), that info should be computed by our compute_memo method and will be passed as the memo argument (whose format and content is whatever self.compute_memo returns). That info must not depend on the highlighted variable or on whether the chunk is selected. """ # --------------------------------------------------------------------- if not memo: # nothing to render return if self.dnaExperimentalMode > 0: # experimental models is drawn in drawchunk_realtime return positions, colors, radii, \ arrows, struts_cylinders, base_cartoons = memo # render the axis cylinder if chunk.isAxisChunk() and \ positions: # fixed bug 2877 (exception when "positions" # is set to None) - piotr 080516 n_points = len(positions) if self.dnaStyleAxisShape > 0: # spherical ends if self.dnaStyleAxisEndingStyle == 4: drawsphere(colors[1], positions[1], radii[1], 2) drawsphere(colors[n_points - 2], positions[n_points - 2], radii[n_points - 2], 2) # set polycone parameters gleSetJoinStyle(TUBE_JN_ANGLE | TUBE_NORM_PATH_EDGE | TUBE_JN_CAP | TUBE_CONTOUR_CLOSED) # draw the polycone if self.dnaStyleAxisColor == 1 \ or self.dnaStyleAxisColor == 2 \ or self.dnaStyleAxisColor == 3: # render discrete colors drawpolycone_multicolor([0, 0, 0, -2], positions, colors, radii) else: drawpolycone(colors[1], positions, radii) elif chunk.isStrandChunk(): # strands, struts and bases gleSetJoinStyle(TUBE_JN_ANGLE | TUBE_NORM_PATH_EDGE | TUBE_JN_CAP | TUBE_CONTOUR_CLOSED) if positions: if self.dnaStyleStrandsColor == 1: # opacity value == -2 is a flag enabling # the "GL_COLOR_MATERIAL" mode, the # color argument is ignored and colors array # is used instead ### positions, colors, radii = self._make_discrete_polycone(positions, colors, radii) drawpolycone_multicolor([0, 0, 0, -2], positions, colors, radii) else: drawpolycone(colors[1], positions, radii) n_points = len(positions) # draw the ending spheres drawsphere( colors[1], positions[1], radii[1], 2) drawsphere( colors[n_points - 2], positions[n_points - 2], radii[n_points - 2], 2) # draw the arrows for color, pos, rad in arrows: drawpolycone(color, pos, rad) # render struts for color, pos1, pos2, rad in struts_cylinders: drawcylinder(color, pos1, pos2, rad, True) # render nucleotides if self.dnaStyleBasesShape > 0: for color, a1pos, a2pos, a3pos, normal, bname in base_cartoons: if a1pos: if self.dnaStyleBasesShape == 1: # sugar spheres drawsphere(color, a1pos, self.dnaStyleBasesScale, 2) elif self.dnaStyleBasesShape == 2: if a2pos: # draw a schematic 'cartoon' shape aposn = a1pos + 0.50 * (a2pos - a1pos) bposn = a1pos + 0.66 * (a2pos - a1pos) cposn = a1pos + 0.75 * (a2pos - a1pos) drawcylinder(color, a1pos, bposn, 0.20 * self.dnaStyleBasesScale, True) if bname == 'G' or \ bname == 'A': # draw two purine rings drawcylinder(color, aposn - 0.25 * self.dnaStyleBasesScale * normal, aposn + 0.25 * self.dnaStyleBasesScale * normal, 0.7 * self.dnaStyleBasesScale, True) drawcylinder(color, cposn - 0.25 * self.dnaStyleBasesScale * normal, cposn + 0.25 * self.dnaStyleBasesScale * normal, 0.9 * self.dnaStyleBasesScale, True) else: drawcylinder(color, bposn - 0.25 * self.dnaStyleBasesScale * normal, bposn + 0.25 * self.dnaStyleBasesScale * normal, 0.9 * self.dnaStyleBasesScale, True)
[ "def", "drawchunk", "(", "self", ",", "glpane", ",", "chunk", ",", "memo", ",", "highlighted", ")", ":", "# ---------------------------------------------------------------------", "if", "not", "memo", ":", "# nothing to render", "return", "if", "self", ".", "dnaExperi...
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/graphics/display_styles/DnaCylinderChunks.py#L987-L1132
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/_vendor/requests/utils.py
python
parse_dict_header
(value)
return result
Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict
Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict:
[ "Parse", "lists", "of", "key", "value", "pairs", "as", "described", "by", "RFC", "2068", "Section", "2", "and", "convert", "them", "into", "a", "python", "dict", ":" ]
def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict """ result = {} for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result
[ "def", "parse_dict_header", "(", "value", ")", ":", "result", "=", "{", "}", "for", "item", "in", "_parse_list_header", "(", "value", ")", ":", "if", "'='", "not", "in", "item", ":", "result", "[", "item", "]", "=", "None", "continue", "name", ",", "...
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/_vendor/requests/utils.py#L360-L391
NoGameNoLife00/mybolg
afe17ea5bfe405e33766e5682c43a4262232ee12
libs/wtforms/fields/core.py
python
Field.process_data
(self, value)
Process the Python data applied to this field and store the result. This will be called during form construction by the form's `kwargs` or `obj` argument. :param value: The python object containing the value to process.
Process the Python data applied to this field and store the result.
[ "Process", "the", "Python", "data", "applied", "to", "this", "field", "and", "store", "the", "result", "." ]
def process_data(self, value): """ Process the Python data applied to this field and store the result. This will be called during form construction by the form's `kwargs` or `obj` argument. :param value: The python object containing the value to process. """ self.data = value
[ "def", "process_data", "(", "self", ",", "value", ")", ":", "self", ".", "data", "=", "value" ]
https://github.com/NoGameNoLife00/mybolg/blob/afe17ea5bfe405e33766e5682c43a4262232ee12/libs/wtforms/fields/core.py#L293-L302
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/Django/django/contrib/gis/geos/coordseq.py
python
GEOSCoordSeq.setY
(self, index, value)
Set Y with the value at the given index.
Set Y with the value at the given index.
[ "Set", "Y", "with", "the", "value", "at", "the", "given", "index", "." ]
def setY(self, index, value): "Set Y with the value at the given index." self.setOrdinate(1, index, value)
[ "def", "setY", "(", "self", ",", "index", ",", "value", ")", ":", "self", ".", "setOrdinate", "(", "1", ",", "index", ",", "value", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/contrib/gis/geos/coordseq.py#L106-L108
4shadoww/hakkuframework
409a11fc3819d251f86faa3473439f8c19066a21
lib/pyparsing.py
python
ParseResults.insert
(self, index, insStr)
Inserts new element at location index in the list of parsed tokens. Similar to ``list.insert()``. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
Inserts new element at location index in the list of parsed tokens.
[ "Inserts", "new", "element", "at", "location", "index", "in", "the", "list", "of", "parsed", "tokens", "." ]
def insert(self, index, insStr): """ Inserts new element at location index in the list of parsed tokens. Similar to ``list.insert()``. Example:: print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] # use a parse action to insert the parse location in the front of the parsed results def insert_locn(locn, tokens): tokens.insert(0, locn) print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary for name, occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
[ "def", "insert", "(", "self", ",", "index", ",", "insStr", ")", ":", "self", ".", "__toklist", ".", "insert", "(", "index", ",", "insStr", ")", "# fixup indices in token dictionary", "for", "name", ",", "occurrences", "in", "self", ".", "__tokdict", ".", "...
https://github.com/4shadoww/hakkuframework/blob/409a11fc3819d251f86faa3473439f8c19066a21/lib/pyparsing.py#L779-L798
entropy1337/infernal-twin
10995cd03312e39a48ade0f114ebb0ae3a711bb8
Modules/build/reportlab/src/reportlab/graphics/charts/axes.py
python
sample5d
()
return drawing
Sample drawing, xvalue/yvalue axes, y connected at left of x.
Sample drawing, xvalue/yvalue axes, y connected at left of x.
[ "Sample", "drawing", "xvalue", "/", "yvalue", "axes", "y", "connected", "at", "left", "of", "x", "." ]
def sample5d(): "Sample drawing, xvalue/yvalue axes, y connected at left of x." drawing = Drawing(400, 200) data = [(10, 20, 30, 42)] xAxis = XValueAxis() xAxis.setPosition(50, 50, 300) xAxis.configure(data) yAxis = YValueAxis() yAxis.setPosition(50, 50, 125) yAxis.joinAxis = xAxis yAxis.joinAxisMode = 'left' yAxis.configure(data) drawing.add(xAxis) drawing.add(yAxis) return drawing
[ "def", "sample5d", "(", ")", ":", "drawing", "=", "Drawing", "(", "400", ",", "200", ")", "data", "=", "[", "(", "10", ",", "20", ",", "30", ",", "42", ")", "]", "xAxis", "=", "XValueAxis", "(", ")", "xAxis", ".", "setPosition", "(", "50", ",",...
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/reportlab/src/reportlab/graphics/charts/axes.py#L2177-L2191
allegroai/clearml-server
bc2c2ebbfdcc43f56520630eb004278642f27309
apiserver/bll/project/project_bll.py
python
ProjectBLL.calc_own_contents
(cls, company: str, project_ids: Sequence[str])
Returns the amount of task/models per requested project Use separate aggregation calls on Task/Model instead of lookup aggregation on projects in order not to hit memory limits on large tasks
Returns the amount of task/models per requested project Use separate aggregation calls on Task/Model instead of lookup aggregation on projects in order not to hit memory limits on large tasks
[ "Returns", "the", "amount", "of", "task", "/", "models", "per", "requested", "project", "Use", "separate", "aggregation", "calls", "on", "Task", "/", "Model", "instead", "of", "lookup", "aggregation", "on", "projects", "in", "order", "not", "to", "hit", "mem...
def calc_own_contents(cls, company: str, project_ids: Sequence[str]) -> Dict[str, dict]: """ Returns the amount of task/models per requested project Use separate aggregation calls on Task/Model instead of lookup aggregation on projects in order not to hit memory limits on large tasks """ if not project_ids: return {} pipeline = [ { "$match": { "company": {"$in": [None, "", company]}, "project": {"$in": project_ids}, } }, { "$project": {"project": 1} }, { "$group": { "_id": "$project", "count": {"$sum": 1}, } } ] def get_agrregate_res(cls_: Type[AttributedDocument]) -> dict: return { data["_id"]: data["count"] for data in cls_.aggregate(pipeline) } with TimingContext("mongo", "get_security_groups"): tasks = get_agrregate_res(Task) models = get_agrregate_res(Model) return { pid: { "own_tasks": tasks.get(pid, 0), "own_models": models.get(pid, 0), } for pid in project_ids }
[ "def", "calc_own_contents", "(", "cls", ",", "company", ":", "str", ",", "project_ids", ":", "Sequence", "[", "str", "]", ")", "->", "Dict", "[", "str", ",", "dict", "]", ":", "if", "not", "project_ids", ":", "return", "{", "}", "pipeline", "=", "[",...
https://github.com/allegroai/clearml-server/blob/bc2c2ebbfdcc43f56520630eb004278642f27309/apiserver/bll/project/project_bll.py#L677-L719
jab/bidict
9441bb2e8c40349f6353639ab10146b6aacacf5b
bidict/_frozenordered.py
python
FrozenOrderedBidict.values
(self)
return self._invm._fwdm.keys()
A set-like object providing a view on the contained values. See :meth:`bidict.BidictBase.values` for more info.
A set-like object providing a view on the contained values.
[ "A", "set", "-", "like", "object", "providing", "a", "view", "on", "the", "contained", "values", "." ]
def values(self) -> KeysView[VT]: """A set-like object providing a view on the contained values. See :meth:`bidict.BidictBase.values` for more info. """ return self._invm._fwdm.keys()
[ "def", "values", "(", "self", ")", "->", "KeysView", "[", "VT", "]", ":", "return", "self", ".", "_invm", ".", "_fwdm", ".", "keys", "(", ")" ]
https://github.com/jab/bidict/blob/9441bb2e8c40349f6353639ab10146b6aacacf5b/bidict/_frozenordered.py#L66-L71
NVIDIA/semantic-segmentation
7726b144c2cc0b8e09c67eabb78f027efdf3f0fa
datasets/uniform.py
python
build_epoch
(imgs, centroids, num_classes, train)
return imgs_uniform
Generate an epoch of crops using uniform sampling. Needs to be called every epoch. Will not apply uniform sampling if not train or class uniform is off. Inputs: imgs - list of imgs centroids - list of class centroids num_classes - number of classes class_uniform_pct: % of uniform images in one epoch Outputs: imgs - list of images to use this epoch
Generate an epoch of crops using uniform sampling. Needs to be called every epoch. Will not apply uniform sampling if not train or class uniform is off.
[ "Generate", "an", "epoch", "of", "crops", "using", "uniform", "sampling", ".", "Needs", "to", "be", "called", "every", "epoch", ".", "Will", "not", "apply", "uniform", "sampling", "if", "not", "train", "or", "class", "uniform", "is", "off", "." ]
def build_epoch(imgs, centroids, num_classes, train): """ Generate an epoch of crops using uniform sampling. Needs to be called every epoch. Will not apply uniform sampling if not train or class uniform is off. Inputs: imgs - list of imgs centroids - list of class centroids num_classes - number of classes class_uniform_pct: % of uniform images in one epoch Outputs: imgs - list of images to use this epoch """ class_uniform_pct = cfg.DATASET.CLASS_UNIFORM_PCT if not (train and class_uniform_pct): return imgs logx.msg("Class Uniform Percentage: {}".format(str(class_uniform_pct))) num_epoch = int(len(imgs)) logx.msg('Class Uniform items per Epoch: {}'.format(str(num_epoch))) num_per_class = int((num_epoch * class_uniform_pct) / num_classes) class_uniform_count = num_per_class * num_classes num_rand = num_epoch - class_uniform_count # create random crops imgs_uniform = random_sampling(imgs, num_rand) # now add uniform sampling for class_id in range(num_classes): msg = "cls {} len {}".format(class_id, len(centroids[class_id])) logx.msg(msg) for class_id in range(num_classes): if cfg.DATASET.CLASS_UNIFORM_BIAS is not None: bias = cfg.DATASET.CLASS_UNIFORM_BIAS[class_id] num_per_class_biased = int(num_per_class * bias) else: num_per_class_biased = num_per_class centroid_len = len(centroids[class_id]) if centroid_len == 0: pass else: class_centroids = random_sampling(centroids[class_id], num_per_class_biased) imgs_uniform.extend(class_centroids) return imgs_uniform
[ "def", "build_epoch", "(", "imgs", ",", "centroids", ",", "num_classes", ",", "train", ")", ":", "class_uniform_pct", "=", "cfg", ".", "DATASET", ".", "CLASS_UNIFORM_PCT", "if", "not", "(", "train", "and", "class_uniform_pct", ")", ":", "return", "imgs", "lo...
https://github.com/NVIDIA/semantic-segmentation/blob/7726b144c2cc0b8e09c67eabb78f027efdf3f0fa/datasets/uniform.py#L278-L324
gkrizek/bash-lambda-layer
703b0ade8174022d44779d823172ab7ac33a5505
bin/docutils/transforms/frontmatter.py
python
TitlePromoter.candidate_index
(self, node)
Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found.
Find and return the promotion candidate and its index.
[ "Find", "and", "return", "the", "promotion", "candidate", "and", "its", "index", "." ]
def candidate_index(self, node): """ Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found. """ index = node.first_child_not_matching_class( nodes.PreBibliographic) if index is None or len(node) > (index + 1) or \ not isinstance(node[index], nodes.section): return None, None else: return node[index], index
[ "def", "candidate_index", "(", "self", ",", "node", ")", ":", "index", "=", "node", ".", "first_child_not_matching_class", "(", "nodes", ".", "PreBibliographic", ")", "if", "index", "is", "None", "or", "len", "(", "node", ")", ">", "(", "index", "+", "1"...
https://github.com/gkrizek/bash-lambda-layer/blob/703b0ade8174022d44779d823172ab7ac33a5505/bin/docutils/transforms/frontmatter.py#L123-L135
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/integrals/meijerint.py
python
_mytype
(f, x)
Create a hashable entity describing the type of f.
Create a hashable entity describing the type of f.
[ "Create", "a", "hashable", "entity", "describing", "the", "type", "of", "f", "." ]
def _mytype(f, x): """ Create a hashable entity describing the type of f. """ if x not in f.free_symbols: return () elif f.is_Function: return (type(f),) else: types = [_mytype(a, x) for a in f.args] res = [] for t in types: res += list(t) res.sort() return tuple(res)
[ "def", "_mytype", "(", "f", ",", "x", ")", ":", "if", "x", "not", "in", "f", ".", "free_symbols", ":", "return", "(", ")", "elif", "f", ".", "is_Function", ":", "return", "(", "type", "(", "f", ")", ",", ")", "else", ":", "types", "=", "[", "...
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/integrals/meijerint.py#L299-L311
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/tkinter/tix.py
python
TList.active_clear
(self)
[]
def active_clear(self): self.tk.call(self._w, 'active', 'clear')
[ "def", "active_clear", "(", "self", ")", ":", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'active'", ",", "'clear'", ")" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/tkinter/tix.py#L1432-L1433
reviewboard/reviewboard
7395902e4c181bcd1d633f61105012ffb1d18e1b
reviewboard/scmtools/cvs.py
python
CVSClient.collapse_keywords
(self, data)
return re.sub(br'\$(%s):([^\$\n\r]*)\$' % b'|'.join(self.keywords), br'$\1$', data, flags=re.IGNORECASE)
Collapse CVS/RCS keywords in string. CVS allows for several keywords (such as ``$Id$`` and ``$Revision$``) to be expanded, though these keywords are limited to a fixed set (and associated aliases) and must be enabled per-file. When we cat a file on CVS, the keywords come back collapsed, but the diffs uploaded may have expanded keywords. We use this function to collapse them back down in order to be able to apply the patch. Args: data (bytes): The file contents. Return: bytes: The resulting file contents with all keywords collapsed.
Collapse CVS/RCS keywords in string.
[ "Collapse", "CVS", "/", "RCS", "keywords", "in", "string", "." ]
def collapse_keywords(self, data): """Collapse CVS/RCS keywords in string. CVS allows for several keywords (such as ``$Id$`` and ``$Revision$``) to be expanded, though these keywords are limited to a fixed set (and associated aliases) and must be enabled per-file. When we cat a file on CVS, the keywords come back collapsed, but the diffs uploaded may have expanded keywords. We use this function to collapse them back down in order to be able to apply the patch. Args: data (bytes): The file contents. Return: bytes: The resulting file contents with all keywords collapsed. """ return re.sub(br'\$(%s):([^\$\n\r]*)\$' % b'|'.join(self.keywords), br'$\1$', data, flags=re.IGNORECASE)
[ "def", "collapse_keywords", "(", "self", ",", "data", ")", ":", "return", "re", ".", "sub", "(", "br'\\$(%s):([^\\$\\n\\r]*)\\$'", "%", "b'|'", ".", "join", "(", "self", ".", "keywords", ")", ",", "br'$\\1$'", ",", "data", ",", "flags", "=", "re", ".", ...
https://github.com/reviewboard/reviewboard/blob/7395902e4c181bcd1d633f61105012ffb1d18e1b/reviewboard/scmtools/cvs.py#L694-L716
epi052/recon-pipeline
7659658ec706ff7a523231ca5bf04ec464b5ae49
pipeline/models/db_manager.py
python
DBManager.get_endpoints_by_ip_or_hostname
(self, ip_or_host)
return endpoints
Simple helper that returns all Endpoints filtered by ip or hostname
Simple helper that returns all Endpoints filtered by ip or hostname
[ "Simple", "helper", "that", "returns", "all", "Endpoints", "filtered", "by", "ip", "or", "hostname" ]
def get_endpoints_by_ip_or_hostname(self, ip_or_host): """ Simple helper that returns all Endpoints filtered by ip or hostname """ endpoints = list() tmp_endpoints = self.session.query(Endpoint).filter(Endpoint.url.contains(ip_or_host)).all() for ep in tmp_endpoints: parsed_url = urlparse(ep.url) if parsed_url.hostname == ip_or_host: endpoints.append(ep) return endpoints
[ "def", "get_endpoints_by_ip_or_hostname", "(", "self", ",", "ip_or_host", ")", ":", "endpoints", "=", "list", "(", ")", "tmp_endpoints", "=", "self", ".", "session", ".", "query", "(", "Endpoint", ")", ".", "filter", "(", "Endpoint", ".", "url", ".", "cont...
https://github.com/epi052/recon-pipeline/blob/7659658ec706ff7a523231ca5bf04ec464b5ae49/pipeline/models/db_manager.py#L117-L128
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/multiprocessing/popen_spawn_win32.py
python
Popen.wait
(self, timeout=None)
return self.returncode
[]
def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _winapi.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _winapi.WaitForSingleObject(int(self._handle), msecs) if res == _winapi.WAIT_OBJECT_0: code = _winapi.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode
[ "def", "wait", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "returncode", "is", "None", ":", "if", "timeout", "is", "None", ":", "msecs", "=", "_winapi", ".", "INFINITE", "else", ":", "msecs", "=", "max", "(", "0", ",", ...
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/multiprocessing/popen_spawn_win32.py#L101-L115
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/pathlib.py
python
Path.touch
(self, mode=0o666, exist_ok=True)
Create this file with the given access mode, if it doesn't exist.
Create this file with the given access mode, if it doesn't exist.
[ "Create", "this", "file", "with", "the", "given", "access", "mode", "if", "it", "doesn", "t", "exist", "." ]
def touch(self, mode=0o666, exist_ok=True): """ Create this file with the given access mode, if it doesn't exist. """ if self._closed: self._raise_closed() if exist_ok: # First try to bump modification time # Implementation note: GNU touch uses the UTIME_NOW option of # the utimensat() / futimens() functions. try: self._accessor.utime(self, None) except OSError: # Avoid exception chaining pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = self._raw_open(flags, mode) os.close(fd)
[ "def", "touch", "(", "self", ",", "mode", "=", "0o666", ",", "exist_ok", "=", "True", ")", ":", "if", "self", ".", "_closed", ":", "self", ".", "_raise_closed", "(", ")", "if", "exist_ok", ":", "# First try to bump modification time", "# Implementation note: G...
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/pathlib.py#L1228-L1249
brython-dev/brython
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
www/src/Lib/email/message.py
python
Message.set_type
(self, type, header='Content-Type', requote=True)
Set the main type and subtype for the Content-Type header. type must be a string in the form "maintype/subtype", otherwise a ValueError is raised. This method replaces the Content-Type header, keeping all the parameters in place. If requote is False, this leaves the existing header's quoting as is. Otherwise, the parameters will be quoted (the default). An alternative header can be specified in the header argument. When the Content-Type header is set, we'll always also add a MIME-Version header.
Set the main type and subtype for the Content-Type header.
[ "Set", "the", "main", "type", "and", "subtype", "for", "the", "Content", "-", "Type", "header", "." ]
def set_type(self, type, header='Content-Type', requote=True): """Set the main type and subtype for the Content-Type header. type must be a string in the form "maintype/subtype", otherwise a ValueError is raised. This method replaces the Content-Type header, keeping all the parameters in place. If requote is False, this leaves the existing header's quoting as is. Otherwise, the parameters will be quoted (the default). An alternative header can be specified in the header argument. When the Content-Type header is set, we'll always also add a MIME-Version header. """ # BAW: should we be strict? if not type.count('/') == 1: raise ValueError # Set the Content-Type, you get a MIME-Version if header.lower() == 'content-type': del self['mime-version'] self['MIME-Version'] = '1.0' if header not in self: self[header] = type return params = self.get_params(header=header, unquote=requote) del self[header] self[header] = type # Skip the first param; it's the old type. for p, v in params[1:]: self.set_param(p, v, header, requote)
[ "def", "set_type", "(", "self", ",", "type", ",", "header", "=", "'Content-Type'", ",", "requote", "=", "True", ")", ":", "# BAW: should we be strict?", "if", "not", "type", ".", "count", "(", "'/'", ")", "==", "1", ":", "raise", "ValueError", "# Set the C...
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/email/message.py#L774-L804
Pymol-Scripts/Pymol-script-repo
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
modules/ADT/mglutil/math/transformation.py
python
UnitQuaternion.getAxisAndAngleDegres
(self)
return xyz, angle
Given a quaternion, compute axis and angle.
Given a quaternion, compute axis and angle.
[ "Given", "a", "quaternion", "compute", "axis", "and", "angle", "." ]
def getAxisAndAngleDegres(self): """Given a quaternion, compute axis and angle. """ theta = N.arccos(self.real) angle = 360*theta/N.pi xyz = self.pure/N.sin(theta) return xyz, angle
[ "def", "getAxisAndAngleDegres", "(", "self", ")", ":", "theta", "=", "N", ".", "arccos", "(", "self", ".", "real", ")", "angle", "=", "360", "*", "theta", "/", "N", ".", "pi", "xyz", "=", "self", ".", "pure", "/", "N", ".", "sin", "(", "theta", ...
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/ADT/mglutil/math/transformation.py#L169-L175
twisted/twisted
dee676b040dd38b847ea6fb112a712cb5e119490
src/twisted/web/microdom.py
python
getElementsByTagName
(iNode, name)
return matches
Return a list of all child elements of C{iNode} with a name matching C{name}. Note that this implementation does not conform to the DOM Level 1 Core specification because it may return C{iNode}. @param iNode: An element at which to begin searching. If C{iNode} has a name matching C{name}, it will be included in the result. @param name: A C{str} giving the name of the elements to return. @return: A C{list} of direct or indirect child elements of C{iNode} with the name C{name}. This may include C{iNode}.
Return a list of all child elements of C{iNode} with a name matching C{name}.
[ "Return", "a", "list", "of", "all", "child", "elements", "of", "C", "{", "iNode", "}", "with", "a", "name", "matching", "C", "{", "name", "}", "." ]
def getElementsByTagName(iNode, name): """ Return a list of all child elements of C{iNode} with a name matching C{name}. Note that this implementation does not conform to the DOM Level 1 Core specification because it may return C{iNode}. @param iNode: An element at which to begin searching. If C{iNode} has a name matching C{name}, it will be included in the result. @param name: A C{str} giving the name of the elements to return. @return: A C{list} of direct or indirect child elements of C{iNode} with the name C{name}. This may include C{iNode}. """ matches = [] matches_append = matches.append # faster lookup. don't do this at home slice = [iNode] while len(slice) > 0: c = slice.pop(0) if c.nodeName == name: matches_append(c) slice[:0] = c.childNodes return matches
[ "def", "getElementsByTagName", "(", "iNode", ",", "name", ")", ":", "matches", "=", "[", "]", "matches_append", "=", "matches", ".", "append", "# faster lookup. don't do this at home", "slice", "=", "[", "iNode", "]", "while", "len", "(", "slice", ")", ">", ...
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/web/microdom.py#L31-L55
number5/cloud-init
19948dbaf40309355e1a2dbef116efb0ce66245c
cloudinit/config/schema.py
python
_get_property_doc
(schema: dict, prefix=" ")
return "\n\n".join(properties)
Return restructured text describing the supported schema properties.
Return restructured text describing the supported schema properties.
[ "Return", "restructured", "text", "describing", "the", "supported", "schema", "properties", "." ]
def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + " " properties = [] property_keys = [ schema.get("properties", {}), schema.get("patternProperties", {}), ] for props in property_keys: for prop_key, prop_config in props.items(): # Define prop_name and description for SCHEMA_PROPERTY_TMPL description = prop_config.get("description", "") # Define prop_name and description for SCHEMA_PROPERTY_TMPL label = prop_config.get("label", prop_key) properties.append( SCHEMA_PROPERTY_TMPL.format( prefix=prefix, prop_name=label, description=_parse_description(description, prefix), prop_type=_get_property_type(prop_config), ) ) items = prop_config.get("items") if items: if isinstance(items, list): for item in items: properties.append( _get_property_doc(item, prefix=new_prefix) ) elif isinstance(items, dict) and ( items.get("properties") or items.get("patternProperties") ): properties.append( SCHEMA_LIST_ITEM_TMPL.format( prefix=new_prefix, prop_name=label ) ) new_prefix += " " properties.append( _get_property_doc(items, prefix=new_prefix) ) if ( "properties" in prop_config or "patternProperties" in prop_config ): properties.append( _get_property_doc(prop_config, prefix=new_prefix) ) return "\n\n".join(properties)
[ "def", "_get_property_doc", "(", "schema", ":", "dict", ",", "prefix", "=", "\" \"", ")", "->", "str", ":", "new_prefix", "=", "prefix", "+", "\" \"", "properties", "=", "[", "]", "property_keys", "=", "[", "schema", ".", "get", "(", "\"properties\""...
https://github.com/number5/cloud-init/blob/19948dbaf40309355e1a2dbef116efb0ce66245c/cloudinit/config/schema.py#L452-L502
gem/oq-engine
1bdb88f3914e390abcbd285600bfd39477aae47c
openquake/hazardlib/gsim/can15/utils.py
python
get_equivalent_distance_inslab
(mag, repi, hslab)
return rjb, rrup
:param float mag: Magnitude :param repi: A :class:`numpy.ndarray` instance containing repi values :param float hslab: Depth of the slab
:param float mag: Magnitude :param repi: A :class:`numpy.ndarray` instance containing repi values :param float hslab: Depth of the slab
[ ":", "param", "float", "mag", ":", "Magnitude", ":", "param", "repi", ":", "A", ":", "class", ":", "numpy", ".", "ndarray", "instance", "containing", "repi", "values", ":", "param", "float", "hslab", ":", "Depth", "of", "the", "slab" ]
def get_equivalent_distance_inslab(mag, repi, hslab): """ :param float mag: Magnitude :param repi: A :class:`numpy.ndarray` instance containing repi values :param float hslab: Depth of the slab """ area = 10**(-3.225+0.89*mag) radius = (area / scipy.constants.pi)**0.5 rjb = np.max([repi-radius, np.zeros_like(repi)], axis=0) rrup = (rjb**2+hslab**2)**0.5 return rjb, rrup
[ "def", "get_equivalent_distance_inslab", "(", "mag", ",", "repi", ",", "hslab", ")", ":", "area", "=", "10", "**", "(", "-", "3.225", "+", "0.89", "*", "mag", ")", "radius", "=", "(", "area", "/", "scipy", ".", "constants", ".", "pi", ")", "**", "0...
https://github.com/gem/oq-engine/blob/1bdb88f3914e390abcbd285600bfd39477aae47c/openquake/hazardlib/gsim/can15/utils.py#L92-L105
hzy46/Deep-Learning-21-Examples
15c2d9edccad090cd67b033f24a43c544e5cba3e
chapter_7/preprocessing/vgg_preprocessing.py
python
_smallest_size_at_least
(height, width, target_height, target_width)
return new_height, new_width
Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width.
Computes new shape with the smallest side equal to `smallest_side`.
[ "Computes", "new", "shape", "with", "the", "smallest", "side", "equal", "to", "smallest_side", "." ]
def _smallest_size_at_least(height, width, target_height, target_width): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ target_height = tf.convert_to_tensor(target_height, dtype=tf.int32) target_width = tf.convert_to_tensor(target_width, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) target_height = tf.to_float(target_height) target_width = tf.to_float(target_width) scale = tf.cond(tf.greater(target_height / height, target_width / width), lambda: target_height / height, lambda: target_width / width) new_height = tf.to_int32(tf.round(height * scale)) new_width = tf.to_int32(tf.round(width * scale)) return new_height, new_width
[ "def", "_smallest_size_at_least", "(", "height", ",", "width", ",", "target_height", ",", "target_width", ")", ":", "target_height", "=", "tf", ".", "convert_to_tensor", "(", "target_height", ",", "dtype", "=", "tf", ".", "int32", ")", "target_width", "=", "tf...
https://github.com/hzy46/Deep-Learning-21-Examples/blob/15c2d9edccad090cd67b033f24a43c544e5cba3e/chapter_7/preprocessing/vgg_preprocessing.py#L250-L279
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/devops_sdk/v5_1/licensing/licensing_client.py
python
LicensingClient.get_account_entitlements
(self, top=None, skip=None)
return self._deserialize('[AccountEntitlement]', self._unwrap_collection(response))
GetAccountEntitlements. [Preview API] Gets top (top) entitlements for users in the account from offset (skip) order by DateCreated ASC :param int top: number of accounts to return :param int skip: records to skip, null is interpreted as 0 :rtype: [AccountEntitlement]
GetAccountEntitlements. [Preview API] Gets top (top) entitlements for users in the account from offset (skip) order by DateCreated ASC :param int top: number of accounts to return :param int skip: records to skip, null is interpreted as 0 :rtype: [AccountEntitlement]
[ "GetAccountEntitlements", ".", "[", "Preview", "API", "]", "Gets", "top", "(", "top", ")", "entitlements", "for", "users", "in", "the", "account", "from", "offset", "(", "skip", ")", "order", "by", "DateCreated", "ASC", ":", "param", "int", "top", ":", "...
def get_account_entitlements(self, top=None, skip=None): """GetAccountEntitlements. [Preview API] Gets top (top) entitlements for users in the account from offset (skip) order by DateCreated ASC :param int top: number of accounts to return :param int skip: records to skip, null is interpreted as 0 :rtype: [AccountEntitlement] """ query_parameters = {} if top is not None: query_parameters['top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='ea37be6f-8cd7-48dd-983d-2b72d6e3da0f', version='5.1-preview.1', query_parameters=query_parameters) return self._deserialize('[AccountEntitlement]', self._unwrap_collection(response))
[ "def", "get_account_entitlements", "(", "self", ",", "top", "=", "None", ",", "skip", "=", "None", ")", ":", "query_parameters", "=", "{", "}", "if", "top", "is", "not", "None", ":", "query_parameters", "[", "'top'", "]", "=", "self", ".", "_serialize", ...
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/licensing/licensing_client.py#L119-L135
SecurityInnovation/PGPy
955d1669472b7b182571f686ee312435f93033e4
pgpy/pgp.py
python
PGPKey.decrypt
(self, message)
return decmsg
Decrypt a PGPMessage using this key. :param message: An encrypted :py:obj:`PGPMessage` :raises: :py:exc:`~errors.PGPError` if the key is not private, or protected but not unlocked. :raises: :py:exc:`~errors.PGPDecryptionError` if decryption fails for any other reason. :returns: A new :py:obj:`PGPMessage` with the decrypted contents of ``message``.
Decrypt a PGPMessage using this key.
[ "Decrypt", "a", "PGPMessage", "using", "this", "key", "." ]
def decrypt(self, message): """ Decrypt a PGPMessage using this key. :param message: An encrypted :py:obj:`PGPMessage` :raises: :py:exc:`~errors.PGPError` if the key is not private, or protected but not unlocked. :raises: :py:exc:`~errors.PGPDecryptionError` if decryption fails for any other reason. :returns: A new :py:obj:`PGPMessage` with the decrypted contents of ``message``. """ if not message.is_encrypted: warnings.warn("This message is not encrypted", stacklevel=3) return message if self.fingerprint.keyid not in message.encrypters: sks = set(self.subkeys) mis = set(message.encrypters) if sks & mis: skid = list(sks & mis)[0] return self.subkeys[skid].decrypt(message) raise PGPError("Cannot decrypt the provided message with this key") pkesk = next(pk for pk in message._sessionkeys if pk.pkalg == self.key_algorithm and pk.encrypter == self.fingerprint.keyid) alg, key = pkesk.decrypt_sk(self._key) # now that we have the symmetric cipher used and the key, we can decrypt the actual message decmsg = PGPMessage() decmsg.parse(message.message.decrypt(key, alg)) return decmsg
[ "def", "decrypt", "(", "self", ",", "message", ")", ":", "if", "not", "message", ".", "is_encrypted", ":", "warnings", ".", "warn", "(", "\"This message is not encrypted\"", ",", "stacklevel", "=", "3", ")", "return", "message", "if", "self", ".", "fingerpri...
https://github.com/SecurityInnovation/PGPy/blob/955d1669472b7b182571f686ee312435f93033e4/pgpy/pgp.py#L2473-L2502
androguard/androguard
8d091cbb309c0c50bf239f805cc1e0931b8dcddc
androguard/core/bytecodes/apk.py
python
APK.get_android_manifest_xml
(self)
Return the parsed xml object which corresponds to the AndroidManifest.xml file :rtype: :class:`~lxml.etree.Element`
Return the parsed xml object which corresponds to the AndroidManifest.xml file
[ "Return", "the", "parsed", "xml", "object", "which", "corresponds", "to", "the", "AndroidManifest", ".", "xml", "file" ]
def get_android_manifest_xml(self): """ Return the parsed xml object which corresponds to the AndroidManifest.xml file :rtype: :class:`~lxml.etree.Element` """ try: return self.xml["AndroidManifest.xml"] except KeyError: return None
[ "def", "get_android_manifest_xml", "(", "self", ")", ":", "try", ":", "return", "self", ".", "xml", "[", "\"AndroidManifest.xml\"", "]", "except", "KeyError", ":", "return", "None" ]
https://github.com/androguard/androguard/blob/8d091cbb309c0c50bf239f805cc1e0931b8dcddc/androguard/core/bytecodes/apk.py#L1538-L1547
BichenWuUCB/squeezeDet
e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa
src/utils/util.py
python
bbox_transform
(bbox)
return out_box
convert a bbox of form [cx, cy, w, h] to [xmin, ymin, xmax, ymax]. Works for numpy array or list of tensors.
convert a bbox of form [cx, cy, w, h] to [xmin, ymin, xmax, ymax]. Works for numpy array or list of tensors.
[ "convert", "a", "bbox", "of", "form", "[", "cx", "cy", "w", "h", "]", "to", "[", "xmin", "ymin", "xmax", "ymax", "]", ".", "Works", "for", "numpy", "array", "or", "list", "of", "tensors", "." ]
def bbox_transform(bbox): """convert a bbox of form [cx, cy, w, h] to [xmin, ymin, xmax, ymax]. Works for numpy array or list of tensors. """ with tf.variable_scope('bbox_transform') as scope: cx, cy, w, h = bbox out_box = [[]]*4 out_box[0] = cx-w/2 out_box[1] = cy-h/2 out_box[2] = cx+w/2 out_box[3] = cy+h/2 return out_box
[ "def", "bbox_transform", "(", "bbox", ")", ":", "with", "tf", ".", "variable_scope", "(", "'bbox_transform'", ")", "as", "scope", ":", "cx", ",", "cy", ",", "w", ",", "h", "=", "bbox", "out_box", "=", "[", "[", "]", "]", "*", "4", "out_box", "[", ...
https://github.com/BichenWuUCB/squeezeDet/blob/e7c0860eb1d141729cf02a2ec9cafc0cfb4a21aa/src/utils/util.py#L167-L179
glouppe/info8006-introduction-to-ai
8360cc9a8c418559e402be1454ec885b6c1062d7
projects/project0/dfs.py
python
PacmanAgent.get_action
(self, state)
Given a pacman game state, returns a legal move. Arguments: ---------- - `state`: the current game state. See FAQ and class `pacman.GameState`. Return: ------- - A legal move as defined in `game.Directions`.
Given a pacman game state, returns a legal move.
[ "Given", "a", "pacman", "game", "state", "returns", "a", "legal", "move", "." ]
def get_action(self, state): """ Given a pacman game state, returns a legal move. Arguments: ---------- - `state`: the current game state. See FAQ and class `pacman.GameState`. Return: ------- - A legal move as defined in `game.Directions`. """ if not self.moves: self.moves = self.dfs(state) try: return self.moves.pop(0) except IndexError: return Directions.STOP
[ "def", "get_action", "(", "self", ",", "state", ")", ":", "if", "not", "self", ".", "moves", ":", "self", ".", "moves", "=", "self", ".", "dfs", "(", "state", ")", "try", ":", "return", "self", ".", "moves", ".", "pop", "(", "0", ")", "except", ...
https://github.com/glouppe/info8006-introduction-to-ai/blob/8360cc9a8c418559e402be1454ec885b6c1062d7/projects/project0/dfs.py#L36-L57
R-ArcGIS/r-bridge-install
18ebc2edb84655f6bc67b1f462d58162329f1fcd
rtools/rpath.py
python
r_pkg_path
()
return package_path
Package path search. Locations searched: - HKCU\\Software\\Esri\\ArcGISPro\\RintegrationProPackagePath - [MYDOCUMENTS]/R/win-library/[3-9].[0-9]/ - default for user R packages - [ArcGIS]/Resources/Rintegration/arcgisbinding
Package path search. Locations searched: - HKCU\\Software\\Esri\\ArcGISPro\\RintegrationProPackagePath - [MYDOCUMENTS]/R/win-library/[3-9].[0-9]/ - default for user R packages - [ArcGIS]/Resources/Rintegration/arcgisbinding
[ "Package", "path", "search", ".", "Locations", "searched", ":", "-", "HKCU", "\\\\", "Software", "\\\\", "Esri", "\\\\", "ArcGISPro", "\\\\", "RintegrationProPackagePath", "-", "[", "MYDOCUMENTS", "]", "/", "R", "/", "win", "-", "library", "/", "[", "3", "...
def r_pkg_path(): """ Package path search. Locations searched: - HKCU\\Software\\Esri\\ArcGISPro\\RintegrationProPackagePath - [MYDOCUMENTS]/R/win-library/[3-9].[0-9]/ - default for user R packages - [ArcGIS]/Resources/Rintegration/arcgisbinding """ package_path = None package_name = 'arcgisbinding' root_key = winreg.HKEY_CURRENT_USER reg_path = "SOFTWARE\\Esri\\ArcGISPro" package_key = 'RintegrationProPackagePath' pro_reg = None try: # find the key, 64- or 32-bit we want it all pro_reg = winreg.OpenKey(root_key, reg_path, 0, READ_ACCESS) except fnf_exception as error: handle_fnf(error) if pro_reg: try: # returns a tuple of (value, type) package_path_key = winreg.QueryValueEx(pro_reg, package_key) package_path_raw = package_path_key[0] if os.path.exists(package_path_raw): package_path = package_path_raw except fnf_exception as error: handle_fnf(error) # iterate over all known library path locations, # and check for our package in each. for lib_path in r_all_lib_paths(): possible_package_path = os.path.join(lib_path, package_name) if os.path.exists(possible_package_path): package_path = possible_package_path # we want the highest-priority library, stop here break # fallback -- <ArcGIS Install>/Rintegration/arcgisbinding if not package_path: import arcpy arc_install_dir = arcpy.GetInstallInfo()['InstallDir'] arc_package_dir = os.path.join( arc_install_dir, 'Rintegration', package_name) if os.path.exists(arc_package_dir): package_path = arc_package_dir return package_path
[ "def", "r_pkg_path", "(", ")", ":", "package_path", "=", "None", "package_name", "=", "'arcgisbinding'", "root_key", "=", "winreg", ".", "HKEY_CURRENT_USER", "reg_path", "=", "\"SOFTWARE\\\\Esri\\\\ArcGISPro\"", "package_key", "=", "'RintegrationProPackagePath'", "pro_reg...
https://github.com/R-ArcGIS/r-bridge-install/blob/18ebc2edb84655f6bc67b1f462d58162329f1fcd/rtools/rpath.py#L454-L503
datacenter/ACI
9240622e6be03047f48628deacb5450212bd7ebe
configuration-python/generic_code/apicPython/setDefaultSettingForPrivateNetwork.py
python
set_default_setting_for_private_network
(fv_ctx, **args)
Set Default Setting For Private Network
Set Default Setting For Private Network
[ "Set", "Default", "Setting", "For", "Private", "Network" ]
def set_default_setting_for_private_network(fv_ctx, **args): """Set Default Setting For Private Network""" args = args['optional_args'] if 'optional_args' in args.keys() else args if 'bgp_timer' in args and args['bgp_timer'] != '': fv_rsbgpctxpol = RsBgpCtxPol(fv_ctx, tnBgpCtxPolName='' if args['bgp_timer'] != None and args['bgp_timer'].lower() == 'none' else args['bgp_timer']) if 'ospf_timer' in args and args['ospf_timer'] != '': fv_rsospfctxpol = RsOspfCtxPol(fv_ctx, tnOspfCtxPolName='' if args['ospf_timer'] != None and args['ospf_timer'].lower() == 'none' else args['ospf_timer']) if 'end_point_retention_policy' in args and args['end_point_retention_policy'] != '': fv_rsctxtoepret = RsCtxToEpRet(fv_ctx, tnFvEpRetPolName='' if args['end_point_retention_policy'] != None and args['end_point_retention_policy'].lower() == 'none' else args['end_point_retention_policy']) if 'monitoring_policy' in args and args['monitoring_policy'] != '': fv_rsctxmonpol = RsCtxMonPol(fv_ctx, tnMonEPGPolName='' if args['monitoring_policy'] != None and args['monitoring_policy'].lower() == 'none' else args['monitoring_policy'])
[ "def", "set_default_setting_for_private_network", "(", "fv_ctx", ",", "*", "*", "args", ")", ":", "args", "=", "args", "[", "'optional_args'", "]", "if", "'optional_args'", "in", "args", ".", "keys", "(", ")", "else", "args", "if", "'bgp_timer'", "in", "args...
https://github.com/datacenter/ACI/blob/9240622e6be03047f48628deacb5450212bd7ebe/configuration-python/generic_code/apicPython/setDefaultSettingForPrivateNetwork.py#L24-L34
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/object_detection/pytorch/common/utils.py
python
setup_for_distributed
(is_master)
This function disables printing when not in master process
This function disables printing when not in master process
[ "This", "function", "disables", "printing", "when", "not", "in", "master", "process" ]
def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print
[ "def", "setup_for_distributed", "(", "is_master", ")", ":", "import", "builtins", "as", "__builtin__", "builtin_print", "=", "__builtin__", ".", "print", "def", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "force", "=", "kwargs", ".", "pop...
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/object_detection/pytorch/common/utils.py#L251-L264
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/lib/arraysetops.py
python
union1d
(ar1, ar2)
return unique(np.concatenate((ar1, ar2), axis=None))
Find the union of two arrays. Return the unique, sorted array of values that are in either of the two input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. They are flattened if they are not already 1D. Returns ------- union1d : ndarray Unique, sorted union of the input arrays. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) To find the union of more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6])
Find the union of two arrays.
[ "Find", "the", "union", "of", "two", "arrays", "." ]
def union1d(ar1, ar2): """ Find the union of two arrays. Return the unique, sorted array of values that are in either of the two input arrays. Parameters ---------- ar1, ar2 : array_like Input arrays. They are flattened if they are not already 1D. Returns ------- union1d : ndarray Unique, sorted union of the input arrays. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) To find the union of more than two arrays, use functools.reduce: >>> from functools import reduce >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([1, 2, 3, 4, 6]) """ return unique(np.concatenate((ar1, ar2), axis=None))
[ "def", "union1d", "(", "ar1", ",", "ar2", ")", ":", "return", "unique", "(", "np", ".", "concatenate", "(", "(", "ar1", ",", "ar2", ")", ",", "axis", "=", "None", ")", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/lib/arraysetops.py#L707-L740
ShuangLI59/person_search
ef7d77a58a581825611e575010d9a3653b1ddf98
lib/rpn/proposal_target_layer.py
python
ProposalTargetLayer.setup
(self, bottom, top)
[]
def setup(self, bottom, top): layer_params = yaml.load(self.param_str) self._num_classes = layer_params['num_classes'] if 'bg_aux_label' in layer_params: self._bg_aux_label = layer_params['bg_aux_label'] else: self._bg_aux_label = 0 # sampled rois (0, x1, y1, x2, y2) top[0].reshape(1, 5) # labels top[1].reshape(1, 1) # bbox_targets top[2].reshape(1, self._num_classes * 4) # bbox_inside_weights top[3].reshape(1, self._num_classes * 4) # bbox_outside_weights top[4].reshape(1, self._num_classes * 4) # auxiliary label if len(top) > 5: top[5].reshape(1, 1) if DEBUG: self._count_buffer = np.zeros((1000, 3), dtype=np.int32) self._count_index = 0 self._count_buffer_full = False
[ "def", "setup", "(", "self", ",", "bottom", ",", "top", ")", ":", "layer_params", "=", "yaml", ".", "load", "(", "self", ".", "param_str", ")", "self", ".", "_num_classes", "=", "layer_params", "[", "'num_classes'", "]", "if", "'bg_aux_label'", "in", "la...
https://github.com/ShuangLI59/person_search/blob/ef7d77a58a581825611e575010d9a3653b1ddf98/lib/rpn/proposal_target_layer.py#L24-L49
zzzeek/sqlalchemy
fc5c54fcd4d868c2a4c7ac19668d72f506fe821e
lib/sqlalchemy/orm/mapper.py
python
validates
(*names, **kw)
return wrap
r"""Decorate a method as a 'validator' for one or more named properties. Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection, the value to be added to the collection. The function can then raise validation exceptions to halt the process from continuing (where Python's built-in ``ValueError`` and ``AssertionError`` exceptions are reasonable choices), or can modify or replace the value before proceeding. The function should otherwise return the given value. Note that a validator for a collection **cannot** issue a load of that collection within the validation routine - this usage raises an assertion to avoid recursion overflows. This is a reentrant condition which is not supported. :param \*names: list of attribute names to be validated. :param include_removes: if True, "remove" events will be sent as well - the validation function must accept an additional argument "is_remove" which will be a boolean. :param include_backrefs: defaults to ``True``; if ``False``, the validation function will not emit if the originator is an attribute event related via a backref. This can be used for bi-directional :func:`.validates` usage where only one validator should emit per attribute operation. .. versionadded:: 0.9.0 .. seealso:: :ref:`simple_validators` - usage examples for :func:`.validates`
r"""Decorate a method as a 'validator' for one or more named properties.
[ "r", "Decorate", "a", "method", "as", "a", "validator", "for", "one", "or", "more", "named", "properties", "." ]
def validates(*names, **kw): r"""Decorate a method as a 'validator' for one or more named properties. Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection, the value to be added to the collection. The function can then raise validation exceptions to halt the process from continuing (where Python's built-in ``ValueError`` and ``AssertionError`` exceptions are reasonable choices), or can modify or replace the value before proceeding. The function should otherwise return the given value. Note that a validator for a collection **cannot** issue a load of that collection within the validation routine - this usage raises an assertion to avoid recursion overflows. This is a reentrant condition which is not supported. :param \*names: list of attribute names to be validated. :param include_removes: if True, "remove" events will be sent as well - the validation function must accept an additional argument "is_remove" which will be a boolean. :param include_backrefs: defaults to ``True``; if ``False``, the validation function will not emit if the originator is an attribute event related via a backref. This can be used for bi-directional :func:`.validates` usage where only one validator should emit per attribute operation. .. versionadded:: 0.9.0 .. seealso:: :ref:`simple_validators` - usage examples for :func:`.validates` """ include_removes = kw.pop("include_removes", False) include_backrefs = kw.pop("include_backrefs", True) def wrap(fn): fn.__sa_validators__ = names fn.__sa_validation_opts__ = { "include_removes": include_removes, "include_backrefs": include_backrefs, } return fn return wrap
[ "def", "validates", "(", "*", "names", ",", "*", "*", "kw", ")", ":", "include_removes", "=", "kw", ".", "pop", "(", "\"include_removes\"", ",", "False", ")", "include_backrefs", "=", "kw", ".", "pop", "(", "\"include_backrefs\"", ",", "True", ")", "def"...
https://github.com/zzzeek/sqlalchemy/blob/fc5c54fcd4d868c2a4c7ac19668d72f506fe821e/lib/sqlalchemy/orm/mapper.py#L3547-L3593
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth2/draft25/__init__.py
python
UserAgentClient.parse_request_uri_response
(self, uri, state=None, scope=None)
return response
Parse the response URI fragment. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in `Section 7.1`_. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by `Section 3.3`_. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. .. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3
Parse the response URI fragment.
[ "Parse", "the", "response", "URI", "fragment", "." ]
def parse_request_uri_response(self, uri, state=None, scope=None): """Parse the response URI fragment. If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format: access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in `Section 7.1`_. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client, otherwise REQUIRED. The scope of the access token as described by `Section 3.3`_. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. .. _`Section 7.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-7.1 .. _`Section 3.3`: http://tools.ietf.org/html/draft-ietf-oauth-v2-28#section-3.3 """ response = parse_implicit_response(uri, state=state, scope=scope) self._populate_attributes(response) return response
[ "def", "parse_request_uri_response", "(", "self", ",", "uri", ",", "state", "=", "None", ",", "scope", "=", "None", ")", ":", "response", "=", "parse_implicit_response", "(", "uri", ",", "state", "=", "state", ",", "scope", "=", "scope", ")", "self", "."...
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth2/draft25/__init__.py#L334-L367
Trusted-AI/adversarial-robustness-toolbox
9fabffdbb92947efa1ecc5d825d634d30dfbaf29
art/attacks/evasion/pixel_threshold.py
python
DifferentialEvolutionSolver.x
(self)
return self._scale_parameters(self.population[0])
The best solution from the solver Returns ------- x : ndarray The best solution from the solver.
The best solution from the solver Returns ------- x : ndarray The best solution from the solver.
[ "The", "best", "solution", "from", "the", "solver", "Returns", "-------", "x", ":", "ndarray", "The", "best", "solution", "from", "the", "solver", "." ]
def x(self): """ The best solution from the solver Returns ------- x : ndarray The best solution from the solver. """ return self._scale_parameters(self.population[0])
[ "def", "x", "(", "self", ")", ":", "return", "self", ".", "_scale_parameters", "(", "self", ".", "population", "[", "0", "]", ")" ]
https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/9fabffdbb92947efa1ecc5d825d634d30dfbaf29/art/attacks/evasion/pixel_threshold.py#L1158-L1166
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/django/http/multipartparser.py
python
InterBoundaryIter.__init__
(self, stream, boundary)
[]
def __init__(self, stream, boundary): self._stream = stream self._boundary = boundary
[ "def", "__init__", "(", "self", ",", "stream", ",", "boundary", ")", ":", "self", ".", "_stream", "=", "stream", "self", ".", "_boundary", "=", "boundary" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/http/multipartparser.py#L465-L467
Abjad/abjad
d0646dfbe83db3dc5ab268f76a0950712b87b7fd
abjad/typedcollections.py
python
TypedTuple.__add__
(self, argument)
Adds typed tuple to ``argument``. Returns new typed tuple.
Adds typed tuple to ``argument``.
[ "Adds", "typed", "tuple", "to", "argument", "." ]
def __add__(self, argument): """ Adds typed tuple to ``argument``. Returns new typed tuple. """ if isinstance(argument, type(self)): items = argument._collection return new(self, items=self._collection[:] + items) elif isinstance(argument, type(self._collection)): items = argument[:] return new(self, items=self._collection[:] + items) raise NotImplementedError
[ "def", "__add__", "(", "self", ",", "argument", ")", ":", "if", "isinstance", "(", "argument", ",", "type", "(", "self", ")", ")", ":", "items", "=", "argument", ".", "_collection", "return", "new", "(", "self", ",", "items", "=", "self", ".", "_coll...
https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/typedcollections.py#L1057-L1070
entropy1337/infernal-twin
10995cd03312e39a48ade0f114ebb0ae3a711bb8
Modules/build/reportlab/build/lib.linux-i686-2.7/reportlab/lib/sequencer.py
python
Sequencer.__getitem__
(self, key)
Allows compact notation to support the format function. s['key'] gets current value, s['key+'] increments.
Allows compact notation to support the format function. s['key'] gets current value, s['key+'] increments.
[ "Allows", "compact", "notation", "to", "support", "the", "format", "function", ".", "s", "[", "key", "]", "gets", "current", "value", "s", "[", "key", "+", "]", "increments", "." ]
def __getitem__(self, key): """Allows compact notation to support the format function. s['key'] gets current value, s['key+'] increments.""" if key[-1:] == '+': counter = key[:-1] return self.nextf(counter) else: return self.thisf(key)
[ "def", "__getitem__", "(", "self", ",", "key", ")", ":", "if", "key", "[", "-", "1", ":", "]", "==", "'+'", ":", "counter", "=", "key", "[", ":", "-", "1", "]", "return", "self", ".", "nextf", "(", "counter", ")", "else", ":", "return", "self",...
https://github.com/entropy1337/infernal-twin/blob/10995cd03312e39a48ade0f114ebb0ae3a711bb8/Modules/build/reportlab/build/lib.linux-i686-2.7/reportlab/lib/sequencer.py#L230-L237
mozilla/addons-server
cbfb29e5be99539c30248d70b93bb15e1c1bc9d7
src/olympia/amo/celery.py
python
process_failure_signal
( exception, traceback, sender, task_id, signal, args, kwargs, einfo, **kw )
Catch any task failure signals from within our worker processes and log them as exceptions, so they appear in Sentry and ordinary logging output.
Catch any task failure signals from within our worker processes and log them as exceptions, so they appear in Sentry and ordinary logging output.
[ "Catch", "any", "task", "failure", "signals", "from", "within", "our", "worker", "processes", "and", "log", "them", "as", "exceptions", "so", "they", "appear", "in", "Sentry", "and", "ordinary", "logging", "output", "." ]
def process_failure_signal( exception, traceback, sender, task_id, signal, args, kwargs, einfo, **kw ): """Catch any task failure signals from within our worker processes and log them as exceptions, so they appear in Sentry and ordinary logging output.""" exc_info = (type(exception), exception, traceback) log.error( 'Celery TASK exception: {0.__name__}: {1}'.format(*exc_info), exc_info=exc_info, extra={ 'data': { 'task_id': task_id, 'sender': sender, 'args': args, 'kwargs': kwargs, } }, )
[ "def", "process_failure_signal", "(", "exception", ",", "traceback", ",", "sender", ",", "task_id", ",", "signal", ",", "args", ",", "kwargs", ",", "einfo", ",", "*", "*", "kw", ")", ":", "exc_info", "=", "(", "type", "(", "exception", ")", ",", "excep...
https://github.com/mozilla/addons-server/blob/cbfb29e5be99539c30248d70b93bb15e1c1bc9d7/src/olympia/amo/celery.py#L78-L97
sabnzbd/sabnzbd
52d21e94d3cc6e30764a833fe2a256783d1a8931
sabnzbd/downloader.py
python
Downloader.decode
(self, article, raw_data: Optional[List[bytes]])
Decode article and check the status of the decoder and the assembler
Decode article and check the status of the decoder and the assembler
[ "Decode", "article", "and", "check", "the", "status", "of", "the", "decoder", "and", "the", "assembler" ]
def decode(self, article, raw_data: Optional[List[bytes]]): """Decode article and check the status of the decoder and the assembler """ # Article was requested and fetched, update article stats for the server sabnzbd.BPSMeter.register_server_article_tried(article.fetcher.id) # Handle broken articles directly if not raw_data: if not article.search_new_server(): sabnzbd.NzbQueue.register_article(article, success=False) article.nzf.nzo.increase_bad_articles_counter("missing_articles") return # Send to decoder-queue sabnzbd.Decoder.process(article, raw_data) # See if we need to delay because the queues are full logged = False while not self.shutdown and (sabnzbd.Decoder.queue_full() or sabnzbd.Assembler.queue_full()): if not logged: # Only log once, to not waste any CPU-cycles logging.debug( "Delaying - Decoder queue: %s - Assembler queue: %s", sabnzbd.Decoder.decoder_queue.qsize(), sabnzbd.Assembler.queue.qsize(), ) logged = True time.sleep(0.01)
[ "def", "decode", "(", "self", ",", "article", ",", "raw_data", ":", "Optional", "[", "List", "[", "bytes", "]", "]", ")", ":", "# Article was requested and fetched, update article stats for the server", "sabnzbd", ".", "BPSMeter", ".", "register_server_article_tried", ...
https://github.com/sabnzbd/sabnzbd/blob/52d21e94d3cc6e30764a833fe2a256783d1a8931/sabnzbd/downloader.py#L517-L545
10XGenomics/cellranger
a83c753ce641db6409a59ad817328354fbe7187e
lib/python/cellranger/io.py
python
decode_ascii_xml
(x)
Decode a string from 7-bit ASCII + XML into unicode.
Decode a string from 7-bit ASCII + XML into unicode.
[ "Decode", "a", "string", "from", "7", "-", "bit", "ASCII", "+", "XML", "into", "unicode", "." ]
def decode_ascii_xml(x): """Decode a string from 7-bit ASCII + XML into unicode. """ if isinstance(x, six.text_type): return x elif isinstance(x, six.binary_type): return make_utf8(HTMLParser.unescape.__func__(HTMLParser, x)) else: raise ValueError('Expected string type, got type %s' % str(type(x)))
[ "def", "decode_ascii_xml", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "six", ".", "text_type", ")", ":", "return", "x", "elif", "isinstance", "(", "x", ",", "six", ".", "binary_type", ")", ":", "return", "make_utf8", "(", "HTMLParser", ".",...
https://github.com/10XGenomics/cellranger/blob/a83c753ce641db6409a59ad817328354fbe7187e/lib/python/cellranger/io.py#L371-L379
BMW-InnovationLab/BMW-TensorFlow-Training-GUI
4f10d1f00f9ac312ca833e5b28fd0f8952cfee17
training_api/research/object_detection/predictors/convolutional_box_predictor.py
python
WeightSharedConvolutionalBoxPredictor.__init__
(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, depth, num_layers_before_predictor, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False)
Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads.
Constructor.
[ "Constructor", "." ]
def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, depth, num_layers_before_predictor, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower
[ "def", "__init__", "(", "self", ",", "is_training", ",", "num_classes", ",", "box_prediction_head", ",", "class_prediction_head", ",", "other_heads", ",", "conv_hyperparams_fn", ",", "depth", ",", "num_layers_before_predictor", ",", "kernel_size", "=", "3", ",", "ap...
https://github.com/BMW-InnovationLab/BMW-TensorFlow-Training-GUI/blob/4f10d1f00f9ac312ca833e5b28fd0f8952cfee17/training_api/research/object_detection/predictors/convolutional_box_predictor.py#L195-L240
yt-project/yt
dc7b24f9b266703db4c843e329c6c8644d47b824
yt/data_objects/derived_quantities.py
python
BulkVelocity.count_values
(self, use_gas=True, use_particles=False, particle_type="nbody")
[]
def count_values(self, use_gas=True, use_particles=False, particle_type="nbody"): if use_particles and particle_type not in self.data_source.ds.particle_types: raise YTParticleTypeNotFound(particle_type, self.data_source.ds) # This is a list now self.num_vals = 0 if use_gas: self.num_vals += 4 if use_particles and "nbody" in self.data_source.ds.particle_types: self.num_vals += 4
[ "def", "count_values", "(", "self", ",", "use_gas", "=", "True", ",", "use_particles", "=", "False", ",", "particle_type", "=", "\"nbody\"", ")", ":", "if", "use_particles", "and", "particle_type", "not", "in", "self", ".", "data_source", ".", "ds", ".", "...
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/data_objects/derived_quantities.py#L327-L335
google/clusterfuzz
f358af24f414daa17a3649b143e71ea71871ef59
src/appengine/handlers/cron/cleanup.py
python
mark_testcase_as_triaged_if_needed
(testcase, issue)
Mark testcase as triage complete if both testcase and associated issue are closed.
Mark testcase as triage complete if both testcase and associated issue are closed.
[ "Mark", "testcase", "as", "triage", "complete", "if", "both", "testcase", "and", "associated", "issue", "are", "closed", "." ]
def mark_testcase_as_triaged_if_needed(testcase, issue): """Mark testcase as triage complete if both testcase and associated issue are closed.""" # Check if testcase is open. If yes, bail out. if testcase.open: return # Check if there is an associated bug in open state. If yes, bail out. if issue: # Get latest issue object to ensure our update went through. issue = issue_tracker_utils.get_issue_for_testcase(testcase) if issue.is_open: return testcase.triaged = True testcase.put()
[ "def", "mark_testcase_as_triaged_if_needed", "(", "testcase", ",", "issue", ")", ":", "# Check if testcase is open. If yes, bail out.", "if", "testcase", ".", "open", ":", "return", "# Check if there is an associated bug in open state. If yes, bail out.", "if", "issue", ":", "#...
https://github.com/google/clusterfuzz/blob/f358af24f414daa17a3649b143e71ea71871ef59/src/appengine/handlers/cron/cleanup.py#L640-L655
chainer/chainer
e9da1423255c58c37be9733f51b158aa9b39dc93
chainer/variable.py
python
VariableNode.set_creator
(self, creator)
Sets a :class:`~chainer.Function` object that created this node. This method is equivalent to ``self.creator = creator``. A :class:`~chainer.FunctionNode` object can also be passed. Args: creator (Function or FunctionNode): Function that has created this variable.
Sets a :class:`~chainer.Function` object that created this node.
[ "Sets", "a", ":", "class", ":", "~chainer", ".", "Function", "object", "that", "created", "this", "node", "." ]
def set_creator(self, creator): """Sets a :class:`~chainer.Function` object that created this node. This method is equivalent to ``self.creator = creator``. A :class:`~chainer.FunctionNode` object can also be passed. Args: creator (Function or FunctionNode): Function that has created this variable. """ self.creator = creator
[ "def", "set_creator", "(", "self", ",", "creator", ")", ":", "self", ".", "creator", "=", "creator" ]
https://github.com/chainer/chainer/blob/e9da1423255c58c37be9733f51b158aa9b39dc93/chainer/variable.py#L407-L418
log2timeline/dfvfs
4ca7bf06b15cdc000297a7122a065f0ca71de544
dfvfs/path/path_spec.py
python
PathSpec.__eq__
(self, other)
return isinstance(other, PathSpec) and self.comparable == other.comparable
Determines if the path specification is equal to the other.
Determines if the path specification is equal to the other.
[ "Determines", "if", "the", "path", "specification", "is", "equal", "to", "the", "other", "." ]
def __eq__(self, other): """Determines if the path specification is equal to the other.""" return isinstance(other, PathSpec) and self.comparable == other.comparable
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "return", "isinstance", "(", "other", ",", "PathSpec", ")", "and", "self", ".", "comparable", "==", "other", ".", "comparable" ]
https://github.com/log2timeline/dfvfs/blob/4ca7bf06b15cdc000297a7122a065f0ca71de544/dfvfs/path/path_spec.py#L40-L42
yuanjunchai/IKC
2a846cf1194cd9bace08973d55ecd8fd3179fe48
codes/train_IKC.py
python
init_dist
(backend='nccl', **kwargs)
initialization for distributed training
initialization for distributed training
[ "initialization", "for", "distributed", "training" ]
def init_dist(backend='nccl', **kwargs): ''' initialization for distributed training''' # if mp.get_start_method(allow_none=True) is None: if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows rank = int(os.environ['RANK']) #system env process ranks num_gpus = torch.cuda.device_count() #Returns the number of GPUs available torch.cuda.set_device(rank % num_gpus) dist.init_process_group(backend=backend, **kwargs)
[ "def", "init_dist", "(", "backend", "=", "'nccl'", ",", "*", "*", "kwargs", ")", ":", "# if mp.get_start_method(allow_none=True) is None:", "if", "mp", ".", "get_start_method", "(", "allow_none", "=", "True", ")", "!=", "'spawn'", ":", "#Return the name of start met...
https://github.com/yuanjunchai/IKC/blob/2a846cf1194cd9bace08973d55ecd8fd3179fe48/codes/train_IKC.py#L18-L26
kobayashi/s3monkey
cd59e6328fef94cac14f255a9755e2dd456ac302
s3monkey/pyfakefs/fake_filesystem.py
python
FakeFilesystem.splitdrive
(self, path)
return path[:0], path
Splits the path into the drive part and the rest of the path. Taken from Windows specific implementation in Python 3.5 and slightly adapted. Args: path: the full path to be splitpath. Returns: A tuple of the drive part and the rest of the path, or of an empty string and the full path if drive letters are not supported or no drive is present.
Splits the path into the drive part and the rest of the path.
[ "Splits", "the", "path", "into", "the", "drive", "part", "and", "the", "rest", "of", "the", "path", "." ]
def splitdrive(self, path): """Splits the path into the drive part and the rest of the path. Taken from Windows specific implementation in Python 3.5 and slightly adapted. Args: path: the full path to be splitpath. Returns: A tuple of the drive part and the rest of the path, or of an empty string and the full path if drive letters are not supported or no drive is present. """ if sys.version_info >= (3, 6): path = os.fspath(path) if self.is_windows_fs: if len(path) >= 2: path = self.normcase(path) sep = self._path_separator(path) # UNC path handling is here since Python 2.7.8, # back-ported from Python 3 if sys.version_info >= (2, 7, 8): if (path[0:2] == sep * 2) and ( path[2:3] != sep): # UNC path handling - splits off the mount point # instead of the drive sep_index = path.find(sep, 2) if sep_index == -1: return path[:0], path sep_index2 = path.find(sep, sep_index + 1) if sep_index2 == sep_index + 1: return path[:0], path if sep_index2 == -1: sep_index2 = len(path) return path[:sep_index2], path[sep_index2:] if path[1:2] == self._matching_string(path, ':'): return path[:2], path[2:] return path[:0], path
[ "def", "splitdrive", "(", "self", ",", "path", ")", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "6", ")", ":", "path", "=", "os", ".", "fspath", "(", "path", ")", "if", "self", ".", "is_windows_fs", ":", "if", "len", "(", "path", ...
https://github.com/kobayashi/s3monkey/blob/cd59e6328fef94cac14f255a9755e2dd456ac302/s3monkey/pyfakefs/fake_filesystem.py#L1370-L1408
dayorbyte/MongoAlchemy
e64ef0c87feff385637459707fe6090bd789e116
mongoalchemy/fields/sequence.py
python
SetField.unwrap
(self, value, session=None)
return set([self.item_type.unwrap(v, session=session) for v in value])
Unwraps the elements of ``value`` using ``SetField.item_type`` and returns them in a set
Unwraps the elements of ``value`` using ``SetField.item_type`` and returns them in a set
[ "Unwraps", "the", "elements", "of", "value", "using", "SetField", ".", "item_type", "and", "returns", "them", "in", "a", "set" ]
def unwrap(self, value, session=None): ''' Unwraps the elements of ``value`` using ``SetField.item_type`` and returns them in a set''' self.validate_unwrap(value) return set([self.item_type.unwrap(v, session=session) for v in value])
[ "def", "unwrap", "(", "self", ",", "value", ",", "session", "=", "None", ")", ":", "self", ".", "validate_unwrap", "(", "value", ")", "return", "set", "(", "[", "self", ".", "item_type", ".", "unwrap", "(", "v", ",", "session", "=", "session", ")", ...
https://github.com/dayorbyte/MongoAlchemy/blob/e64ef0c87feff385637459707fe6090bd789e116/mongoalchemy/fields/sequence.py#L235-L239
fortharris/Pcode
147962d160a834c219e12cb456abc130826468e4
rope/base/builtins.py
python
_CallContext.get_arguments
(self, argnames)
[]
def get_arguments(self, argnames): if self.args: return self.args.get_arguments(argnames)
[ "def", "get_arguments", "(", "self", ",", "argnames", ")", ":", "if", "self", ".", "args", ":", "return", "self", ".", "args", ".", "get_arguments", "(", "argnames", ")" ]
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/rope/base/builtins.py#L207-L209
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/mako/exceptions.py
python
text_error_template
(lookup=None)
return mako.template.Template(r""" <%page args="error=None, traceback=None"/> <%! from mako.exceptions import RichTraceback %>\ <% tback = RichTraceback(error=error, traceback=traceback) %>\ Traceback (most recent call last): % for (filename, lineno, function, line) in tback.traceback: File "${filename}", line ${lineno}, in ${function or '?'} ${line | trim} % endfor ${tback.errorname}: ${tback.message} """)
Provides a template that renders a stack trace in a similar format to the Python interpreter, substituting source template filenames, line numbers and code for that of the originating source template, as applicable.
Provides a template that renders a stack trace in a similar format to the Python interpreter, substituting source template filenames, line numbers and code for that of the originating source template, as applicable.
[ "Provides", "a", "template", "that", "renders", "a", "stack", "trace", "in", "a", "similar", "format", "to", "the", "Python", "interpreter", "substituting", "source", "template", "filenames", "line", "numbers", "and", "code", "for", "that", "of", "the", "origi...
def text_error_template(lookup=None): """Provides a template that renders a stack trace in a similar format to the Python interpreter, substituting source template filenames, line numbers and code for that of the originating source template, as applicable. """ import mako.template return mako.template.Template(r""" <%page args="error=None, traceback=None"/> <%! from mako.exceptions import RichTraceback %>\ <% tback = RichTraceback(error=error, traceback=traceback) %>\ Traceback (most recent call last): % for (filename, lineno, function, line) in tback.traceback: File "${filename}", line ${lineno}, in ${function or '?'} ${line | trim} % endfor ${tback.errorname}: ${tback.message} """)
[ "def", "text_error_template", "(", "lookup", "=", "None", ")", ":", "import", "mako", ".", "template", "return", "mako", ".", "template", ".", "Template", "(", "r\"\"\"\n<%page args=\"error=None, traceback=None\"/>\n<%!\n from mako.exceptions import RichTraceback\n%>\\\n<%\n...
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/mako/exceptions.py#L228-L250
zh-plus/video-to-pose3D
c1e14af8d184f08d510826852da5a06c57d4a4ec
pose_trackers/lighttrack/lib/lib_kernel/lib_nms/nms_opr.py
python
NMSKeepCran.execute
(self, inputs, outputs)
inputs: list of (x0, y0, x1, y1, score)
inputs: list of (x0, y0, x1, y1, score)
[ "inputs", ":", "list", "of", "(", "x0", "y0", "x1", "y1", "score", ")" ]
def execute(self, inputs, outputs): """ inputs: list of (x0, y0, x1, y1, score)""" in_ = inputs[0].get_value() keep = gpu_nms(in_, thresh=self._iou_threshold) outputs[0].set_value(keep)
[ "def", "execute", "(", "self", ",", "inputs", ",", "outputs", ")", ":", "in_", "=", "inputs", "[", "0", "]", ".", "get_value", "(", ")", "keep", "=", "gpu_nms", "(", "in_", ",", "thresh", "=", "self", ".", "_iou_threshold", ")", "outputs", "[", "0"...
https://github.com/zh-plus/video-to-pose3D/blob/c1e14af8d184f08d510826852da5a06c57d4a4ec/pose_trackers/lighttrack/lib/lib_kernel/lib_nms/nms_opr.py#L19-L23
jpype-project/jpype
bbdca907d053f1e04e4dcd414d4ebce8f9da6313
jpype/_jproxy.py
python
_createJProxyDeferred
(cls, *intf)
return type("proxy.%s" % cls.__name__, (cls, _jpype._JProxy), members)
(internal) Create a proxy from a Python class with @JOverride notation on methods evaluated at first instantiation.
(internal) Create a proxy from a Python class with
[ "(", "internal", ")", "Create", "a", "proxy", "from", "a", "Python", "class", "with" ]
def _createJProxyDeferred(cls, *intf): """ (internal) Create a proxy from a Python class with @JOverride notation on methods evaluated at first instantiation. """ def new(tp, *args, **kwargs): # Attach a __jpype_interfaces__ attribute to this class if # one doesn't already exist. actualIntf = getattr(tp, "__jpype_interfaces__", None) if actualIntf is None: actualIntf = _prepareInterfaces(cls, intf) tp.__jpype_interfaces__ = actualIntf return _jpype._JProxy.__new__(tp, None, actualIntf) members = {'__new__': new} # Return the augmented class return type("proxy.%s" % cls.__name__, (cls, _jpype._JProxy), members)
[ "def", "_createJProxyDeferred", "(", "cls", ",", "*", "intf", ")", ":", "def", "new", "(", "tp", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Attach a __jpype_interfaces__ attribute to this class if", "# one doesn't already exist.", "actualIntf", "=", "...
https://github.com/jpype-project/jpype/blob/bbdca907d053f1e04e4dcd414d4ebce8f9da6313/jpype/_jproxy.py#L57-L73
Pyomo/pyomo
dbd4faee151084f343b893cc2b0c04cf2b76fd92
pyomo/contrib/pyros/uncertainty_sets.py
python
CardinalitySet.point_in_set
(self, point)
Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. Args: point: the point being checked for membership in the set
Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False.
[ "Calculates", "if", "supplied", "point", "is", "contained", "in", "the", "uncertainty", "set", ".", "Returns", "True", "or", "False", "." ]
def point_in_set(self, point): """ Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. Args: point: the point being checked for membership in the set """ cassis = [] for i in range(self.dim): if self.positive_deviation[i] > 0: cassis.append((point[i] - self.origin[i])/self.positive_deviation[i]) if sum(cassi for cassi in cassis) <= self.gamma and \ all(cassi >= 0 and cassi <= 1 for cassi in cassis): return True else: return False
[ "def", "point_in_set", "(", "self", ",", "point", ")", ":", "cassis", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "dim", ")", ":", "if", "self", ".", "positive_deviation", "[", "i", "]", ">", "0", ":", "cassis", ".", "append", "("...
https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/contrib/pyros/uncertainty_sets.py#L355-L372
freewizard/SublimeFormatSQL
88771e4b77ab186a09560b495294d11c29cd878c
sqlparse/sql.py
python
TokenList.group_tokens
(self, grp_cls, tokens, ignore_ws=False)
return grp
Replace tokens by an instance of *grp_cls*.
Replace tokens by an instance of *grp_cls*.
[ "Replace", "tokens", "by", "an", "instance", "of", "*", "grp_cls", "*", "." ]
def group_tokens(self, grp_cls, tokens, ignore_ws=False): """Replace tokens by an instance of *grp_cls*.""" idx = self.token_index(tokens[0]) if ignore_ws: while tokens and tokens[-1].is_whitespace(): tokens = tokens[:-1] for t in tokens: self.tokens.remove(t) grp = grp_cls(tokens) for token in tokens: token.parent = grp grp.parent = self self.tokens.insert(idx, grp) return grp
[ "def", "group_tokens", "(", "self", ",", "grp_cls", ",", "tokens", ",", "ignore_ws", "=", "False", ")", ":", "idx", "=", "self", ".", "token_index", "(", "tokens", "[", "0", "]", ")", "if", "ignore_ws", ":", "while", "tokens", "and", "tokens", "[", "...
https://github.com/freewizard/SublimeFormatSQL/blob/88771e4b77ab186a09560b495294d11c29cd878c/sqlparse/sql.py#L312-L325
hellohaptik/chatbot_ner
742104790170ae5b73c583c94db6786549337dc4
ner_v1/detectors/temporal/time/time_detection.py
python
TimeDetector._detect_restricted_24_hour_format
(self, time_list=None, original_list=None)
return time_list, original_list
Detects time in the following format format: <hour><separator><minutes><any character except digits and meridiems> where each part is in of one of the formats given against them hour: hh (24 hour; 13 - 23, or 00) minute: m, mm separator: ":", ".", space Args: time_list (list): Optional, list to store dictionaries of detected time entities original_list (list): Optional, list to store corresponding substrings of given text which were detected as time entities Returns: A tuple of two lists with first list containing the detected time entities and second list containing their corresponding substrings in the given text.
Detects time in the following format
[ "Detects", "time", "in", "the", "following", "format" ]
def _detect_restricted_24_hour_format(self, time_list=None, original_list=None): """ Detects time in the following format format: <hour><separator><minutes><any character except digits and meridiems> where each part is in of one of the formats given against them hour: hh (24 hour; 13 - 23, or 00) minute: m, mm separator: ":", ".", space Args: time_list (list): Optional, list to store dictionaries of detected time entities original_list (list): Optional, list to store corresponding substrings of given text which were detected as time entities Returns: A tuple of two lists with first list containing the detected time entities and second list containing their corresponding substrings in the given text. """ if time_list is None: time_list = [] if original_list is None: original_list = [] patterns = re.findall(r'\D((00|1[3-9]?|2[0-3])[:.\s]([0-5][0-9]))[^am|pm|a.m|p.m|\d]', self.processed_text.lower()) for pattern in patterns: original = pattern[0] t1 = pattern[1] t2 = pattern[2] meridiem = self._get_meridiem(int(t1), int(t2)) time = { 'hh': int(t1), 'mm': int(t2), 'nn': meridiem } time_list.append(time) original_list.append(original) return time_list, original_list
[ "def", "_detect_restricted_24_hour_format", "(", "self", ",", "time_list", "=", "None", ",", "original_list", "=", "None", ")", ":", "if", "time_list", "is", "None", ":", "time_list", "=", "[", "]", "if", "original_list", "is", "None", ":", "original_list", ...
https://github.com/hellohaptik/chatbot_ner/blob/742104790170ae5b73c583c94db6786549337dc4/ner_v1/detectors/temporal/time/time_detection.py#L847-L885
har07/PySastrawi
01afc81c579bde14dcb41c33686b26af8afab121
src/Sastrawi/Morphology/Disambiguator/DisambiguatorPrefixRule20.py
python
DisambiguatorPrefixRule20.disambiguate
(self, word)
Disambiguate Prefix Rule 20 Rule 20 : pe{w|y}V -> pe-{w|y}V
Disambiguate Prefix Rule 20 Rule 20 : pe{w|y}V -> pe-{w|y}V
[ "Disambiguate", "Prefix", "Rule", "20", "Rule", "20", ":", "pe", "{", "w|y", "}", "V", "-", ">", "pe", "-", "{", "w|y", "}", "V" ]
def disambiguate(self, word): """Disambiguate Prefix Rule 20 Rule 20 : pe{w|y}V -> pe-{w|y}V """ matches = re.match(r'^pe([wy])([aiueo])(.*)$', word) if matches: return matches.group(1) + matches.group(2) + matches.group(3)
[ "def", "disambiguate", "(", "self", ",", "word", ")", ":", "matches", "=", "re", ".", "match", "(", "r'^pe([wy])([aiueo])(.*)$'", ",", "word", ")", "if", "matches", ":", "return", "matches", ".", "group", "(", "1", ")", "+", "matches", ".", "group", "(...
https://github.com/har07/PySastrawi/blob/01afc81c579bde14dcb41c33686b26af8afab121/src/Sastrawi/Morphology/Disambiguator/DisambiguatorPrefixRule20.py#L8-L14
puremourning/vimspector
bc57b1dd14214cf3e3a476ef75e9dcb56cf0c76d
python3/vimspector/debug_session.py
python
DebugSession._StopDebugAdapter
( self, interactive = False, callback = None )
[]
def _StopDebugAdapter( self, interactive = False, callback = None ): arguments = {} def disconnect(): self._splash_screen = utils.DisplaySplash( self._api_prefix, self._splash_screen, "Shutting down debug adapter..." ) def handler( *args ): self._splash_screen = utils.HideSplash( self._api_prefix, self._splash_screen ) if callback: self._logger.debug( "Setting server exit handler before disconnect" ) assert not self._run_on_server_exit self._run_on_server_exit = callback vim.eval( 'vimspector#internal#{}#StopDebugSession()'.format( self._connection_type ) ) self._connection.DoRequest( handler, { 'command': 'disconnect', 'arguments': arguments, }, failure_handler = handler, timeout = 5000 ) if not interactive: disconnect() elif not self._server_capabilities.get( 'supportTerminateDebuggee' ): disconnect() elif not self._stackTraceView.AnyThreadsRunning(): disconnect() else: def handle_choice( choice ): if choice == 1: # yes arguments[ 'terminateDebuggee' ] = True elif choice == 2: # no arguments[ 'terminateDebuggee' ] = False elif choice <= 0: # Abort return # Else, use server default disconnect() utils.Confirm( self._api_prefix, "Terminate debuggee?", handle_choice, default_value = 3, options = [ '(Y)es', '(N)o', '(D)efault' ], keys = [ 'y', 'n', 'd' ] )
[ "def", "_StopDebugAdapter", "(", "self", ",", "interactive", "=", "False", ",", "callback", "=", "None", ")", ":", "arguments", "=", "{", "}", "def", "disconnect", "(", ")", ":", "self", ".", "_splash_screen", "=", "utils", ".", "DisplaySplash", "(", "se...
https://github.com/puremourning/vimspector/blob/bc57b1dd14214cf3e3a476ef75e9dcb56cf0c76d/python3/vimspector/debug_session.py#L1153-L1205
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/urllib/parse.py
python
SplitResult.geturl
(self)
return urlunsplit(self)
[]
def geturl(self): return urlunsplit(self)
[ "def", "geturl", "(", "self", ")", ":", "return", "urlunsplit", "(", "self", ")" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/urllib/parse.py#L245-L246
Pymol-Scripts/Pymol-script-repo
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
modules/ADT/mglutil/gui/BasicWidgets/Tk/tablemaker.py
python
LUT.update_dottags
(self, ind1, ind2)
update tags of the dots when a dot or a shape added to/ (deleted from) the canvas
update tags of the dots when a dot or a shape added to/ (deleted from) the canvas
[ "update", "tags", "of", "the", "dots", "when", "a", "dot", "or", "a", "shape", "added", "to", "/", "(", "deleted", "from", ")", "the", "canvas" ]
def update_dottags(self, ind1, ind2): """update tags of the dots when a dot or a shape added to/ (deleted from) the canvas""" dot_ind =range(ind1, self.line_count) if len(dot_ind) == 0: return r_edge = None if ind2 > 0: dot_ind.reverse() if 'edge' in self.canvas.gettags('dot%d'%(dot_ind[0],)): self.canvas.itemconfigure('dot%d'%(dot_ind[0],), tags=('dot', 'dot%d'%(dot_ind[0]+ind2,),'edge')) dot_ind.pop(0) else: if 'edge' in self.canvas.gettags('dot%d'%(dot_ind[-1],)): r_edge = dot_ind.pop(-1) for i in dot_ind: self.canvas.itemconfigure('dot%d'%(i,), tags=('dot', 'dot%d'%(i+ind2,))) if r_edge: self.canvas.itemconfigure('dot%d'%(r_edge,), tags=('dot', 'dot%d'%(r_edge+ind2,),'edge'))
[ "def", "update_dottags", "(", "self", ",", "ind1", ",", "ind2", ")", ":", "dot_ind", "=", "range", "(", "ind1", ",", "self", ".", "line_count", ")", "if", "len", "(", "dot_ind", ")", "==", "0", ":", "return", "r_edge", "=", "None", "if", "ind2", ">...
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/ADT/mglutil/gui/BasicWidgets/Tk/tablemaker.py#L1717-L1737
pgjones/hypercorn
633137a5ce63db73e4c03fe70a2d0268d8ccfa20
src/hypercorn/config.py
python
Config.from_toml
(cls: Type["Config"], filename: FilePath)
return cls.from_mapping(data)
Load the configuration values from a TOML formatted file. This allows configuration to be loaded as so .. code-block:: python Config.from_toml('config.toml') Arguments: filename: The filename which gives the path to the file.
Load the configuration values from a TOML formatted file.
[ "Load", "the", "configuration", "values", "from", "a", "TOML", "formatted", "file", "." ]
def from_toml(cls: Type["Config"], filename: FilePath) -> "Config": """Load the configuration values from a TOML formatted file. This allows configuration to be loaded as so .. code-block:: python Config.from_toml('config.toml') Arguments: filename: The filename which gives the path to the file. """ file_path = os.fspath(filename) with open(file_path) as file_: data = toml.load(file_) return cls.from_mapping(data)
[ "def", "from_toml", "(", "cls", ":", "Type", "[", "\"Config\"", "]", ",", "filename", ":", "FilePath", ")", "->", "\"Config\"", ":", "file_path", "=", "os", ".", "fspath", "(", "filename", ")", "with", "open", "(", "file_path", ")", "as", "file_", ":",...
https://github.com/pgjones/hypercorn/blob/633137a5ce63db73e4c03fe70a2d0268d8ccfa20/src/hypercorn/config.py#L341-L356
IntelAI/models
1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c
models/language_modeling/tensorflow/bert_large/training/bfloat16/modeling.py
python
get_assignment_map_from_checkpoint
(tvars, init_checkpoint, task="Pretraining")
return (assignment_map, initialized_variable_names)
Compute the union of the current variables and checkpoint variables.
Compute the union of the current variables and checkpoint variables.
[ "Compute", "the", "union", "of", "the", "current", "variables", "and", "checkpoint", "variables", "." ]
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, task="Pretraining"): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} map1, map2 = get_remaps(task) name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var name = apply_remaps(name, map1, map2) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: continue assignment_map[name] = name_to_variable[name] ivar = name_to_variable[name] initialized_variable_names[ivar.name] = 1 initialized_variable_names[ivar.name + ":0"] = 1 # Check if all model vars are loaded from Checkpoint check_model_validity(tvars, assignment_map) #for name, var in assignment_map.items(): # print(name, "--->", var) return (assignment_map, initialized_variable_names)
[ "def", "get_assignment_map_from_checkpoint", "(", "tvars", ",", "init_checkpoint", ",", "task", "=", "\"Pretraining\"", ")", ":", "assignment_map", "=", "{", "}", "initialized_variable_names", "=", "{", "}", "map1", ",", "map2", "=", "get_remaps", "(", "task", "...
https://github.com/IntelAI/models/blob/1d7a53ccfad3e6f0e7378c9e3c8840895d63df8c/models/language_modeling/tensorflow/bert_large/training/bfloat16/modeling.py#L387-L421
OpenEIT/OpenEIT
0448694e8092361ae5ccb45fba81dee543a6244b
OpenEIT/backend/bluetooth/Adafruit_BluefruitLE/interfaces/device.py
python
Device.discover
(self, service_uuids, char_uuids, timeout_sec=30)
Wait up to timeout_sec for the specified services and characteristics to be discovered on the device. If the timeout is exceeded without discovering the services and characteristics then an exception is thrown.
Wait up to timeout_sec for the specified services and characteristics to be discovered on the device. If the timeout is exceeded without discovering the services and characteristics then an exception is thrown.
[ "Wait", "up", "to", "timeout_sec", "for", "the", "specified", "services", "and", "characteristics", "to", "be", "discovered", "on", "the", "device", ".", "If", "the", "timeout", "is", "exceeded", "without", "discovering", "the", "services", "and", "characteristi...
def discover(self, service_uuids, char_uuids, timeout_sec=30): """Wait up to timeout_sec for the specified services and characteristics to be discovered on the device. If the timeout is exceeded without discovering the services and characteristics then an exception is thrown. """ raise NotImplementedError
[ "def", "discover", "(", "self", ",", "service_uuids", ",", "char_uuids", ",", "timeout_sec", "=", "30", ")", ":", "raise", "NotImplementedError" ]
https://github.com/OpenEIT/OpenEIT/blob/0448694e8092361ae5ccb45fba81dee543a6244b/OpenEIT/backend/bluetooth/Adafruit_BluefruitLE/interfaces/device.py#L49-L54
avocado-framework/avocado
1f9b3192e8ba47d029c33fe21266bd113d17811f
avocado/utils/vmimage.py
python
Image.__init__
(self, name, url, version, arch, build, checksum, algorithm, cache_dir, snapshot_dir=None)
Creates an instance of Image class. :param name: Name of image. :type name: str :param url: The url where the image can be fetched from. :type url: str :param version: Version of image. :type version: int :param arch: Architecture of the system image. :type arch: str :param build: Build of the system image. :type build: str :param checksum: Hash of the system image to match after download. :type checksum: str :param algorithm: Hash type, used when the checksum is provided. :type algorithm: str :param cache_dir: Local system path where the base images will be held. :type cache_dir: str or iterable :param snapshot_dir: Local system path where the snapshot images will be held. Defaults to cache_dir if none is given. :type snapshot_dir: str
Creates an instance of Image class.
[ "Creates", "an", "instance", "of", "Image", "class", "." ]
def __init__(self, name, url, version, arch, build, checksum, algorithm, cache_dir, snapshot_dir=None): """ Creates an instance of Image class. :param name: Name of image. :type name: str :param url: The url where the image can be fetched from. :type url: str :param version: Version of image. :type version: int :param arch: Architecture of the system image. :type arch: str :param build: Build of the system image. :type build: str :param checksum: Hash of the system image to match after download. :type checksum: str :param algorithm: Hash type, used when the checksum is provided. :type algorithm: str :param cache_dir: Local system path where the base images will be held. :type cache_dir: str or iterable :param snapshot_dir: Local system path where the snapshot images will be held. Defaults to cache_dir if none is given. :type snapshot_dir: str """ self.name = name self.url = url self.version = version self.arch = arch self.build = build self.checksum = checksum self.algorithm = algorithm self.cache_dir = cache_dir self.snapshot_dir = snapshot_dir self._path = None self._base_image = None
[ "def", "__init__", "(", "self", ",", "name", ",", "url", ",", "version", ",", "arch", ",", "build", ",", "checksum", ",", "algorithm", ",", "cache_dir", ",", "snapshot_dir", "=", "None", ")", ":", "self", ".", "name", "=", "name", "self", ".", "url",...
https://github.com/avocado-framework/avocado/blob/1f9b3192e8ba47d029c33fe21266bd113d17811f/avocado/utils/vmimage.py#L439-L475
mozilla/pontoon
d26999eea57902a30b5c15e9b77277fe7e76a60f
pontoon/sync/vcs/repositories.py
python
get_svn_env
()
Return an environment dict for running SVN in.
Return an environment dict for running SVN in.
[ "Return", "an", "environment", "dict", "for", "running", "SVN", "in", "." ]
def get_svn_env(): """Return an environment dict for running SVN in.""" if settings.SVN_LD_LIBRARY_PATH: env = os.environ.copy() env["LD_LIBRARY_PATH"] = ( settings.SVN_LD_LIBRARY_PATH + ":" + env["LD_LIBRARY_PATH"] ) return env else: return None
[ "def", "get_svn_env", "(", ")", ":", "if", "settings", ".", "SVN_LD_LIBRARY_PATH", ":", "env", "=", "os", ".", "environ", ".", "copy", "(", ")", "env", "[", "\"LD_LIBRARY_PATH\"", "]", "=", "(", "settings", ".", "SVN_LD_LIBRARY_PATH", "+", "\":\"", "+", ...
https://github.com/mozilla/pontoon/blob/d26999eea57902a30b5c15e9b77277fe7e76a60f/pontoon/sync/vcs/repositories.py#L287-L296
urwid/urwid
e2423b5069f51d318ea1ac0f355a0efe5448f7eb
urwid/decoration.py
python
simplify_valign
(valign_type, valign_amount)
return valign_type
Recombine (valign_type, valign_amount) into an valign value. Inverse of normalize_valign.
Recombine (valign_type, valign_amount) into an valign value. Inverse of normalize_valign.
[ "Recombine", "(", "valign_type", "valign_amount", ")", "into", "an", "valign", "value", ".", "Inverse", "of", "normalize_valign", "." ]
def simplify_valign(valign_type, valign_amount): """ Recombine (valign_type, valign_amount) into an valign value. Inverse of normalize_valign. """ if valign_type == RELATIVE: return (valign_type, valign_amount) return valign_type
[ "def", "simplify_valign", "(", "valign_type", ",", "valign_amount", ")", ":", "if", "valign_type", "==", "RELATIVE", ":", "return", "(", "valign_type", ",", "valign_amount", ")", "return", "valign_type" ]
https://github.com/urwid/urwid/blob/e2423b5069f51d318ea1ac0f355a0efe5448f7eb/urwid/decoration.py#L991-L998
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/jinja2/filters.py
python
do_sort
(environment, value, reverse=False, case_sensitive=False, attribute=None)
return sorted(value, key=sort_func, reverse=reverse)
Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added.
Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting.
[ "Sort", "an", "iterable", ".", "Per", "default", "it", "sorts", "ascending", "if", "you", "pass", "it", "true", "as", "first", "argument", "it", "will", "reverse", "the", "sorting", "." ]
def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None): """Sort an iterable. Per default it sorts ascending, if you pass it true as first argument it will reverse the sorting. If the iterable is made of strings the third parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} It is also possible to sort by an attribute (for example to sort by the date of an object) by specifying the `attribute` parameter: .. sourcecode:: jinja {% for item in iterable|sort(attribute='date') %} ... {% endfor %} .. versionchanged:: 2.6 The `attribute` parameter was added. """ if not case_sensitive: def sort_func(item): if isinstance(item, string_types): item = item.lower() return item else: sort_func = None if attribute is not None: getter = make_attrgetter(environment, attribute) def sort_func(item, processor=sort_func or (lambda x: x)): return processor(getter(item)) return sorted(value, key=sort_func, reverse=reverse)
[ "def", "do_sort", "(", "environment", ",", "value", ",", "reverse", "=", "False", ",", "case_sensitive", "=", "False", ",", "attribute", "=", "None", ")", ":", "if", "not", "case_sensitive", ":", "def", "sort_func", "(", "item", ")", ":", "if", "isinstan...
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/jinja2/filters.py#L227-L265
Geovation/tiler
1de83ef55ec9da8593064b319e37afb88043d0ea
tiler/tiler-scripts/globalmaptiles.py
python
GlobalGeodetic.Resolution
(self, zoom )
return 180 / 256.0 / 2**zoom
Resolution (arc/pixel) for given zoom level (measured at Equator)
Resolution (arc/pixel) for given zoom level (measured at Equator)
[ "Resolution", "(", "arc", "/", "pixel", ")", "for", "given", "zoom", "level", "(", "measured", "at", "Equator", ")" ]
def Resolution(self, zoom ): "Resolution (arc/pixel) for given zoom level (measured at Equator)" return 180 / 256.0 / 2**zoom
[ "def", "Resolution", "(", "self", ",", "zoom", ")", ":", "return", "180", "/", "256.0", "/", "2", "**", "zoom" ]
https://github.com/Geovation/tiler/blob/1de83ef55ec9da8593064b319e37afb88043d0ea/tiler/tiler-scripts/globalmaptiles.py#L333-L336
bhoov/exbert
d27b6236aa51b185f7d3fed904f25cabe3baeb1a
server/transformers/src/transformers/tokenization_openai.py
python
OpenAIGPTTokenizer.save_vocabulary
(self, save_directory)
return vocab_file, merge_file
Save the vocabulary and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved.
Save the vocabulary and special tokens file to a directory.
[ "Save", "the", "vocabulary", "and", "special", "tokens", "file", "to", "a", "directory", "." ]
def save_vocabulary(self, save_directory): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES["merges_file"]) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file
[ "def", "save_vocabulary", "(", "self", ",", "save_directory", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "save_directory", ")", ":", "logger", ".", "error", "(", "\"Vocabulary path ({}) should be a directory\"", ".", "format", "(", "save_direct...
https://github.com/bhoov/exbert/blob/d27b6236aa51b185f7d3fed904f25cabe3baeb1a/server/transformers/src/transformers/tokenization_openai.py#L202-L235
bitcoin-core/HWI
6871946c2176f2f9777b6ac8f0614d96d99bfa0e
hwilib/devices/trezorlib/firmware.py
python
header_digest
(header: c.Container, hash_function: Callable = blake2s)
return hash_function(header_bytes).digest()
[]
def header_digest(header: c.Container, hash_function: Callable = blake2s) -> bytes: stripped_header = header.copy() stripped_header.sigmask = 0 stripped_header.signature = b"\0" * 64 stripped_header.v1_key_indexes = [0, 0, 0] stripped_header.v1_signatures = [b"\0" * 64] * 3 if header.magic == b"TRZV": header_type = VendorHeader else: header_type = FirmwareHeader header_bytes = header_type.build(stripped_header) return hash_function(header_bytes).digest()
[ "def", "header_digest", "(", "header", ":", "c", ".", "Container", ",", "hash_function", ":", "Callable", "=", "blake2s", ")", "->", "bytes", ":", "stripped_header", "=", "header", ".", "copy", "(", ")", "stripped_header", ".", "sigmask", "=", "0", "stripp...
https://github.com/bitcoin-core/HWI/blob/6871946c2176f2f9777b6ac8f0614d96d99bfa0e/hwilib/devices/trezorlib/firmware.py#L325-L336
aker-gateway/Aker
c9559c72ff7522e1f3fb142a31c80421acc69b41
pyte/screens.py
python
Screen.debug
(self, *args, **kwargs)
Endpoint for unrecognized escape sequences. By default is a noop.
Endpoint for unrecognized escape sequences.
[ "Endpoint", "for", "unrecognized", "escape", "sequences", "." ]
def debug(self, *args, **kwargs): """Endpoint for unrecognized escape sequences. By default is a noop. """
[ "def", "debug", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":" ]
https://github.com/aker-gateway/Aker/blob/c9559c72ff7522e1f3fb142a31c80421acc69b41/pyte/screens.py#L1012-L1016
espnet/espnet
ea411f3f627b8f101c211e107d0ff7053344ac80
espnet/nets/pytorch_backend/transducer/rnn_encoder.py
python
RNN.__init__
( self, idim: int, rnn_type: str, elayers: int, eunits: int, eprojs: int, dropout_rate: float, aux_output_layers: List = [], )
Initialize RNN module.
Initialize RNN module.
[ "Initialize", "RNN", "module", "." ]
def __init__( self, idim: int, rnn_type: str, elayers: int, eunits: int, eprojs: int, dropout_rate: float, aux_output_layers: List = [], ): """Initialize RNN module.""" super().__init__() bidir = rnn_type[0] == "b" for i in range(elayers): if i == 0: input_dim = idim else: input_dim = eunits rnn_layer = torch.nn.LSTM if "lstm" in rnn_type else torch.nn.GRU rnn = rnn_layer( input_dim, eunits, num_layers=1, bidirectional=bidir, batch_first=True ) setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn) self.dropout = torch.nn.Dropout(p=dropout_rate) self.elayers = elayers self.eunits = eunits self.eprojs = eprojs self.rnn_type = rnn_type self.bidir = bidir self.l_last = torch.nn.Linear(eunits, eprojs) self.aux_output_layers = aux_output_layers
[ "def", "__init__", "(", "self", ",", "idim", ":", "int", ",", "rnn_type", ":", "str", ",", "elayers", ":", "int", ",", "eunits", ":", "int", ",", "eprojs", ":", "int", ",", "dropout_rate", ":", "float", ",", "aux_output_layers", ":", "List", "=", "["...
https://github.com/espnet/espnet/blob/ea411f3f627b8f101c211e107d0ff7053344ac80/espnet/nets/pytorch_backend/transducer/rnn_encoder.py#L181-L219
hyperspy/hyperspy
1ffb3fab33e607045a37f30c1463350b72617e10
hyperspy/misc/ipython_tools.py
python
get_ipython
()
return ip
Get the global InteractiveShell instance. Returns None if no InteractiveShell instance is registered.
Get the global InteractiveShell instance.
[ "Get", "the", "global", "InteractiveShell", "instance", "." ]
def get_ipython(): """Get the global InteractiveShell instance. Returns None if no InteractiveShell instance is registered. """ if is_it_running_from_ipython is False: return None import IPython ipy_version = Version(IPython.__version__) if ipy_version < Version("0.11"): from IPython import ipapi ip = ipapi.get() elif ipy_version < Version("1.0"): from IPython.core import ipapi ip = ipapi.get() else: ip = IPython.get_ipython() return ip
[ "def", "get_ipython", "(", ")", ":", "if", "is_it_running_from_ipython", "is", "False", ":", "return", "None", "import", "IPython", "ipy_version", "=", "Version", "(", "IPython", ".", "__version__", ")", "if", "ipy_version", "<", "Version", "(", "\"0.11\"", ")...
https://github.com/hyperspy/hyperspy/blob/1ffb3fab33e607045a37f30c1463350b72617e10/hyperspy/misc/ipython_tools.py#L26-L43
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1beta1_certificate_signing_request_spec.py
python
V1beta1CertificateSigningRequestSpec.__repr__
(self)
return self.to_str()
For `print` and `pprint`
For `print` and `pprint`
[ "For", "print", "and", "pprint" ]
def __repr__(self): """For `print` and `pprint`""" return self.to_str()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "to_str", "(", ")" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1beta1_certificate_signing_request_spec.py#L278-L280
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/plex/media_player.py
python
PlexMediaPlayer.media_series_title
(self)
return self.session.media_series_title
Return the title of the series of current playing media.
Return the title of the series of current playing media.
[ "Return", "the", "title", "of", "the", "series", "of", "current", "playing", "media", "." ]
def media_series_title(self): """Return the title of the series of current playing media.""" return self.session.media_series_title
[ "def", "media_series_title", "(", "self", ")", ":", "return", "self", ".", "session", ".", "media_series_title" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/plex/media_player.py#L377-L379
puremourning/vimspector
bc57b1dd14214cf3e3a476ef75e9dcb56cf0c76d
python3/vimspector/vendor/cpuinfo.py
python
_get_cpu_info_from_sysinfo_v1
()
Returns the CPU info gathered from sysinfo. Returns {} if sysinfo is not found.
Returns the CPU info gathered from sysinfo. Returns {} if sysinfo is not found.
[ "Returns", "the", "CPU", "info", "gathered", "from", "sysinfo", ".", "Returns", "{}", "if", "sysinfo", "is", "not", "found", "." ]
def _get_cpu_info_from_sysinfo_v1(): ''' Returns the CPU info gathered from sysinfo. Returns {} if sysinfo is not found. ''' g_trace.header('Tying to get info from sysinfo version 1 ...') try: # Just return {} if there is no sysinfo if not DataSource.has_sysinfo(): g_trace.fail('Failed to find sysinfo. Skipping ...') return {} # If sysinfo fails return {} returncode, output = DataSource.sysinfo_cpu() if output == None or returncode != 0: g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...') return {} # Various fields vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ') processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip() cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size') stepping = int(output.split(', stepping ')[1].split(',')[0].strip()) model = int(output.split(', model ')[1].split(',')[0].strip()) family = int(output.split(', family ')[1].split(',')[0].strip()) # Flags flags = [] for line in output.split('\n'): if line.startswith('\t\t'): for flag in line.strip().lower().split(): flags.append(flag) flags.sort() # Convert from GHz/MHz string to Hz hz_advertised, scale = _parse_cpu_brand_string(processor_brand) hz_actual = hz_advertised info = { 'vendor_id_raw' : vendor_id, 'brand_raw' : processor_brand, 'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale), 'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale), 'hz_advertised' : _hz_short_to_full(hz_advertised, scale), 'hz_actual' : _hz_short_to_full(hz_actual, scale), 'l2_cache_size' : _to_friendly_bytes(cache_size), 'stepping' : stepping, 'model' : model, 'family' : family, 'flags' : flags } info = _filter_dict_keys_with_empty_values(info) g_trace.success() return info except Exception as err: g_trace.fail(err) #raise # NOTE: To have this throw on error, uncomment this line return {}
[ "def", "_get_cpu_info_from_sysinfo_v1", "(", ")", ":", "g_trace", ".", "header", "(", "'Tying to get info from sysinfo version 1 ...'", ")", "try", ":", "# Just return {} if there is no sysinfo", "if", "not", "DataSource", ".", "has_sysinfo", "(", ")", ":", "g_trace", "...
https://github.com/puremourning/vimspector/blob/bc57b1dd14214cf3e3a476ef75e9dcb56cf0c76d/python3/vimspector/vendor/cpuinfo.py#L2199-L2262
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/physics/quantum/state.py
python
StateBase._pretty_brackets
(self, height, use_unicode=True)
return brackets
[]
def _pretty_brackets(self, height, use_unicode=True): # Return pretty printed brackets for the state # Ideally, this could be done by pform.parens but it does not support the angled < and > # Setup for unicode vs ascii if use_unicode: lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode slash, bslash, vert = u('\u2571'), u('\u2572'), u('\u2502') else: lbracket, rbracket = self.lbracket, self.rbracket slash, bslash, vert = '/', '\\', '|' # If height is 1, just return brackets if height == 1: return stringPict(lbracket), stringPict(rbracket) # Make height even height += (height % 2) brackets = [] for bracket in lbracket, rbracket: # Create left bracket if bracket in set([_lbracket, _lbracket_ucode]): bracket_args = [ ' ' * (height//2 - i - 1) + slash for i in range(height // 2)] bracket_args.extend( [ ' ' * i + bslash for i in range(height // 2)]) # Create right bracket elif bracket in set([_rbracket, _rbracket_ucode]): bracket_args = [ ' ' * i + bslash for i in range(height // 2)] bracket_args.extend([ ' ' * ( height//2 - i - 1) + slash for i in range(height // 2)]) # Create straight bracket elif bracket in set([_straight_bracket, _straight_bracket_ucode]): bracket_args = [vert for i in range(height)] else: raise ValueError(bracket) brackets.append( stringPict('\n'.join(bracket_args), baseline=height//2)) return brackets
[ "def", "_pretty_brackets", "(", "self", ",", "height", ",", "use_unicode", "=", "True", ")", ":", "# Return pretty printed brackets for the state", "# Ideally, this could be done by pform.parens but it does not support the angled < and >", "# Setup for unicode vs ascii", "if", "use_u...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/physics/quantum/state.py#L129-L167
OpenMDAO/OpenMDAO-Framework
f2e37b7de3edeaaeb2d251b375917adec059db9b
openmdao.lib/src/openmdao/lib/drivers/doedriver.py
python
DOEdriver.execute
(self)
Generate and evaluate cases.
Generate and evaluate cases.
[ "Generate", "and", "evaluate", "cases", "." ]
def execute(self): """Generate and evaluate cases.""" self.set_inputs(self._get_cases()) self._csv_file = None try: super(DOEdriver, self).execute() finally: if self._csv_file is not None: self._csv_file.close()
[ "def", "execute", "(", "self", ")", ":", "self", ".", "set_inputs", "(", "self", ".", "_get_cases", "(", ")", ")", "self", ".", "_csv_file", "=", "None", "try", ":", "super", "(", "DOEdriver", ",", "self", ")", ".", "execute", "(", ")", "finally", ...
https://github.com/OpenMDAO/OpenMDAO-Framework/blob/f2e37b7de3edeaaeb2d251b375917adec059db9b/openmdao.lib/src/openmdao/lib/drivers/doedriver.py#L83-L91
bbc/brave
88d4454412ee5acfa5ecf2ac5bc8cf75766c7be5
brave/overlays/overlay.py
python
Overlay.summarise
(self, for_config_file=False)
return s
[]
def summarise(self, for_config_file=False): s = super().summarise(for_config_file) s['source'] = self.source.uid if self.source else None return s
[ "def", "summarise", "(", "self", ",", "for_config_file", "=", "False", ")", ":", "s", "=", "super", "(", ")", ".", "summarise", "(", "for_config_file", ")", "s", "[", "'source'", "]", "=", "self", ".", "source", ".", "uid", "if", "self", ".", "source...
https://github.com/bbc/brave/blob/88d4454412ee5acfa5ecf2ac5bc8cf75766c7be5/brave/overlays/overlay.py#L27-L30
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/urllib3/util/retry.py
python
Retry.from_int
(cls, retries, redirect=True, default=None)
return new_retries
Backwards-compatibility for the old retries format.
Backwards-compatibility for the old retries format.
[ "Backwards", "-", "compatibility", "for", "the", "old", "retries", "format", "." ]
def from_int(cls, retries, redirect=True, default=None): """ Backwards-compatibility for the old retries format.""" if retries is None: retries = default if default is not None else cls.DEFAULT if isinstance(retries, Retry): return retries redirect = bool(redirect) and None new_retries = cls(retries, redirect=redirect) log.debug("Converted retries value: %r -> %r", retries, new_retries) return new_retries
[ "def", "from_int", "(", "cls", ",", "retries", ",", "redirect", "=", "True", ",", "default", "=", "None", ")", ":", "if", "retries", "is", "None", ":", "retries", "=", "default", "if", "default", "is", "not", "None", "else", "cls", ".", "DEFAULT", "i...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/urllib3/util/retry.py#L200-L211
googlefonts/fontmake
d09c860a0093392c4960c9a97d18cddb87fdcc83
Lib/fontmake/ttfautohint.py
python
ttfautohint
(in_file, out_file, args=None, **kwargs)
Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments.
Thin wrapper around the ttfautohint command line tool.
[ "Thin", "wrapper", "around", "the", "ttfautohint", "command", "line", "tool", "." ]
def ttfautohint(in_file, out_file, args=None, **kwargs): """Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments. """ arg_list = ["ttfautohint"] file_args = [in_file, out_file] if args is not None: if kwargs: raise TypeError("Should not provide both cmd args and kwargs.") try: rv = subprocess.call(arg_list + args.split() + file_args) except OSError as e: raise FontmakeError( "Could not launch ttfautohint (is it installed?)", in_file ) from e if rv != 0: raise TTFAError(rv, in_file) return boolean_options = ( "debug", "composites", "dehint", "help", "ignore_restrictions", "detailed_info", "no_info", "adjust_subglyphs", "symbol", "ttfa_table", "verbose", "version", "windows_compatibility", ) other_options = ( "default_script", "fallback_script", "family_suffix", "hinting_limit", "fallback_stem_width", "hinting_range_min", "control_file", "hinting_range_max", "strong_stem_width", "increase_x_height", "x_height_snapping_exceptions", ) for option in boolean_options: if kwargs.pop(option, False): arg_list.append("--" + option.replace("_", "-")) for option in other_options: arg = kwargs.pop(option, None) if arg is not None: arg_list.append("--{}={}".format(option.replace("_", "-"), arg)) if kwargs: raise TypeError("Unexpected argument(s): " + ", ".join(kwargs.keys())) rv = subprocess.call(arg_list + file_args) if rv != 0: raise TTFAError(rv, in_file)
[ "def", "ttfautohint", "(", "in_file", ",", "out_file", ",", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "arg_list", "=", "[", "\"ttfautohint\"", "]", "file_args", "=", "[", "in_file", ",", "out_file", "]", "if", "args", "is", "not", "None",...
https://github.com/googlefonts/fontmake/blob/d09c860a0093392c4960c9a97d18cddb87fdcc83/Lib/fontmake/ttfautohint.py#L21-L87
GDQuest/blender-power-sequencer
0af17abcc1f7c049bcbe48550d428561ad6f80a0
operators/gap_remove.py
python
POWER_SEQUENCER_OT_gap_remove.find_gap_frame
(self, context, frame, sorted_sequences)
return gap_frame
Finds and returns the frame at which the gap starts. Takes a list sequences sorted by frame_final_start.
Finds and returns the frame at which the gap starts. Takes a list sequences sorted by frame_final_start.
[ "Finds", "and", "returns", "the", "frame", "at", "which", "the", "gap", "starts", ".", "Takes", "a", "list", "sequences", "sorted", "by", "frame_final_start", "." ]
def find_gap_frame(self, context, frame, sorted_sequences): """ Finds and returns the frame at which the gap starts. Takes a list sequences sorted by frame_final_start. """ strips_start = min(sorted_sequences, key=attrgetter("frame_final_start")).frame_final_start strips_end = max(sorted_sequences, key=attrgetter("frame_final_end")).frame_final_end gap_frame = -1 if strips_start > frame: strips_before_frame_start = [s for s in context.sequences if s.frame_final_end <= frame] frame_target = 0 if strips_before_frame_start: frame_target = max( strips_before_frame_start, key=attrgetter("frame_final_end") ).frame_final_end gap_frame = frame_target if frame_target < strips_start else frame else: gap_frame = strips_end return gap_frame
[ "def", "find_gap_frame", "(", "self", ",", "context", ",", "frame", ",", "sorted_sequences", ")", ":", "strips_start", "=", "min", "(", "sorted_sequences", ",", "key", "=", "attrgetter", "(", "\"frame_final_start\"", ")", ")", ".", "frame_final_start", "strips_e...
https://github.com/GDQuest/blender-power-sequencer/blob/0af17abcc1f7c049bcbe48550d428561ad6f80a0/operators/gap_remove.py#L96-L115
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/pydoc.py
python
Doc.getdocloc
(self, object)
return docloc
Return the location of module docs or None
Return the location of module docs or None
[ "Return", "the", "location", "of", "module", "docs", "or", "None" ]
def getdocloc(self, object): """Return the location of module docs or None""" try: file = inspect.getabsfile(object) except TypeError: file = '(built-in)' docloc = os.environ.get("PYTHONDOCS", "http://docs.python.org/library") basedir = os.path.join(sys.exec_prefix, "lib", "python"+sys.version[0:3]) if (isinstance(object, type(os)) and (object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith("http://"): docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__) else: docloc = os.path.join(docloc, object.__name__ + ".html") else: docloc = None return docloc
[ "def", "getdocloc", "(", "self", ",", "object", ")", ":", "try", ":", "file", "=", "inspect", ".", "getabsfile", "(", "object", ")", "except", "TypeError", ":", "file", "=", "'(built-in)'", "docloc", "=", "os", ".", "environ", ".", "get", "(", "\"PYTHO...
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/pydoc.py#L345-L370
Stiivi/bubbles
b3c9332b8a9534655bd77821586e45a5086b1ddc
bubbles/backends/sql/objects.py
python
default_store
(url=None, connectable=None, schema=None)
return store
Gets a default store for connectable or URL. If store does not exist one is created and added to shared default store pool.
Gets a default store for connectable or URL. If store does not exist one is created and added to shared default store pool.
[ "Gets", "a", "default", "store", "for", "connectable", "or", "URL", ".", "If", "store", "does", "not", "exist", "one", "is", "created", "and", "added", "to", "shared", "default", "store", "pool", "." ]
def default_store(url=None, connectable=None, schema=None): """Gets a default store for connectable or URL. If store does not exist one is created and added to shared default store pool.""" if url and connectable: raise ArgumentError("Only one of URL or connectable should be " \ "specified, not both") if url: try: store = _default_stores[url] except KeyError: store = SQLDataStore(url=url, schema=schema) _default_stores[url] = store _default_stores[store.connectable] = store else: try: store = _default_stores[connectable] except KeyError: store = SQLDataStore(connectable=connectable) _default_stores[store.connectable] = store return store
[ "def", "default_store", "(", "url", "=", "None", ",", "connectable", "=", "None", ",", "schema", "=", "None", ")", ":", "if", "url", "and", "connectable", ":", "raise", "ArgumentError", "(", "\"Only one of URL or connectable should be \"", "\"specified, not both\"",...
https://github.com/Stiivi/bubbles/blob/b3c9332b8a9534655bd77821586e45a5086b1ddc/bubbles/backends/sql/objects.py#L154-L176
python-acoustics/python-acoustics
af72e7f88003f0bba06934ea38c98e8993c4a6c6
acoustics/building.py
python
stc_curve
(tl)
return top_curve
Calculate the Sound Transmission Class (STC) curve from a NumPy array `tl` with third octave data between 125 Hz and 4 kHz. :param tl: Transmission Loss
Calculate the Sound Transmission Class (STC) curve from a NumPy array `tl` with third octave data between 125 Hz and 4 kHz.
[ "Calculate", "the", "Sound", "Transmission", "Class", "(", "STC", ")", "curve", "from", "a", "NumPy", "array", "tl", "with", "third", "octave", "data", "between", "125", "Hz", "and", "4", "kHz", "." ]
def stc_curve(tl): """ Calculate the Sound Transmission Class (STC) curve from a NumPy array `tl` with third octave data between 125 Hz and 4 kHz. :param tl: Transmission Loss """ ref_curve = np.array([0, 3, 6, 9, 12, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20]) top_curve = ref_curve res_sum = 0 while True: diff = tl - top_curve residuals = np.clip(diff, np.min(diff), 0) res_sum = np.sum(residuals) if res_sum < -32: if np.any(residuals > -8): top_curve -= 1 break top_curve += 1 return top_curve
[ "def", "stc_curve", "(", "tl", ")", ":", "ref_curve", "=", "np", ".", "array", "(", "[", "0", ",", "3", ",", "6", ",", "9", ",", "12", ",", "15", ",", "16", ",", "17", ",", "18", ",", "19", ",", "20", ",", "20", ",", "20", ",", "20", ",...
https://github.com/python-acoustics/python-acoustics/blob/af72e7f88003f0bba06934ea38c98e8993c4a6c6/acoustics/building.py#L63-L82
CouchPotato/CouchPotatoV1
135b3331d1b88ef645e29b76f2d4cc4a732c9232
library/sqlalchemy/engine/base.py
python
ResultProxy.last_inserted_params
(self)
return self.context.last_inserted_params()
Return ``last_inserted_params()`` from the underlying ExecutionContext. See ExecutionContext for details.
Return ``last_inserted_params()`` from the underlying ExecutionContext.
[ "Return", "last_inserted_params", "()", "from", "the", "underlying", "ExecutionContext", "." ]
def last_inserted_params(self): """Return ``last_inserted_params()`` from the underlying ExecutionContext. See ExecutionContext for details. """ return self.context.last_inserted_params()
[ "def", "last_inserted_params", "(", "self", ")", ":", "return", "self", ".", "context", ".", "last_inserted_params", "(", ")" ]
https://github.com/CouchPotato/CouchPotatoV1/blob/135b3331d1b88ef645e29b76f2d4cc4a732c9232/library/sqlalchemy/engine/base.py#L2404-L2411
serverdensity/sd-agent
66f0031b6be369c28e69414eb6172b5685a5110e
checks/libs/wmi/counter_type.py
python
calculate_perf_counter_counter
(previous, current, property_name)
return (n1 - n0) / ((d1 - d0) / f)
PERF_COUNTER_COUNTER https://technet.microsoft.com/en-us/library/cc740048(v=ws.10).aspx
PERF_COUNTER_COUNTER
[ "PERF_COUNTER_COUNTER" ]
def calculate_perf_counter_counter(previous, current, property_name): """ PERF_COUNTER_COUNTER https://technet.microsoft.com/en-us/library/cc740048(v=ws.10).aspx """ n0 = previous[property_name] n1 = current[property_name] d0 = previous["Timestamp_Sys100NS"] d1 = current["Timestamp_Sys100NS"] f = current["Frequency_Sys100NS"] if n0 is None or n1 is None: return return (n1 - n0) / ((d1 - d0) / f)
[ "def", "calculate_perf_counter_counter", "(", "previous", ",", "current", ",", "property_name", ")", ":", "n0", "=", "previous", "[", "property_name", "]", "n1", "=", "current", "[", "property_name", "]", "d0", "=", "previous", "[", "\"Timestamp_Sys100NS\"", "]"...
https://github.com/serverdensity/sd-agent/blob/66f0031b6be369c28e69414eb6172b5685a5110e/checks/libs/wmi/counter_type.py#L123-L138
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/PyAMF-0.6.1/pyamf/amf3.py
python
DataOutput.writeBoolean
(self, value)
Writes a Boolean value. @type value: C{bool} @param value: A C{Boolean} value determining which byte is written. If the parameter is C{True}, C{1} is written; if C{False}, C{0} is written. @raise ValueError: Non-boolean value found.
Writes a Boolean value.
[ "Writes", "a", "Boolean", "value", "." ]
def writeBoolean(self, value): """ Writes a Boolean value. @type value: C{bool} @param value: A C{Boolean} value determining which byte is written. If the parameter is C{True}, C{1} is written; if C{False}, C{0} is written. @raise ValueError: Non-boolean value found. """ if not isinstance(value, bool): raise ValueError("Non-boolean value found") if value is True: self.stream.write_uchar(1) else: self.stream.write_uchar(0)
[ "def", "writeBoolean", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "ValueError", "(", "\"Non-boolean value found\"", ")", "if", "value", "is", "True", ":", "self", ".", "stream", ".", "wri...
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/PyAMF-0.6.1/pyamf/amf3.py#L173-L190
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/multiprocessing/synchronize.py
python
SemLock._make_name
()
return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand))
[]
def _make_name(): return '%s-%s' % (process.current_process()._config['semprefix'], next(SemLock._rand))
[ "def", "_make_name", "(", ")", ":", "return", "'%s-%s'", "%", "(", "process", ".", "current_process", "(", ")", ".", "_config", "[", "'semprefix'", "]", ",", "next", "(", "SemLock", ".", "_rand", ")", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/multiprocessing/synchronize.py#L115-L117
PaddlePaddle/Research
2da0bd6c72d60e9df403aff23a7802779561c4a1
ST_DM/GenRegion/src/generate/gen/regionalg.py
python
Region.__get_linkss
(self)
return ret
Desc: 将region分解成link list Args: regions : list of region Return: list of list of Link, links is all link in a region Raise: None
Desc: 将region分解成link list Args: regions : list of region Return: list of list of Link, links is all link in a region Raise: None
[ "Desc", ":", "将region分解成link", "list", "Args", ":", "regions", ":", "list", "of", "region", "Return", ":", "list", "of", "list", "of", "Link", "links", "is", "all", "link", "in", "a", "region", "Raise", ":", "None" ]
def __get_linkss(self): """ Desc: 将region分解成link list Args: regions : list of region Return: list of list of Link, links is all link in a region Raise: None """ links = self.__trans_points_to_links(self.points) ret = [links] for hole in self.holes: links = self.__trans_points_to_links(hole) ret.append(links) return ret
[ "def", "__get_linkss", "(", "self", ")", ":", "links", "=", "self", ".", "__trans_points_to_links", "(", "self", ".", "points", ")", "ret", "=", "[", "links", "]", "for", "hole", "in", "self", ".", "holes", ":", "links", "=", "self", ".", "__trans_poin...
https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/ST_DM/GenRegion/src/generate/gen/regionalg.py#L277-L292
lululxvi/deepxde
730c97282636e86c845ce2ba3253482f2178469e
deepxde/model.py
python
Model._outputs_losses
(self, training, inputs, targets, auxiliary_vars)
return utils.to_numpy(outs)
[]
def _outputs_losses(self, training, inputs, targets, auxiliary_vars): if backend_name == "tensorflow.compat.v1": feed_dict = self.net.feed_dict(training, inputs, targets, auxiliary_vars) return self.sess.run(self.outputs_losses, feed_dict=feed_dict) if backend_name == "tensorflow": outs = self.outputs_losses(training, inputs, targets, auxiliary_vars) elif backend_name == "pytorch": # TODO: auxiliary_vars self.net.requires_grad_(requires_grad=False) outs = self.outputs_losses(training, inputs, targets) self.net.requires_grad_() return utils.to_numpy(outs)
[ "def", "_outputs_losses", "(", "self", ",", "training", ",", "inputs", ",", "targets", ",", "auxiliary_vars", ")", ":", "if", "backend_name", "==", "\"tensorflow.compat.v1\"", ":", "feed_dict", "=", "self", ".", "net", ".", "feed_dict", "(", "training", ",", ...
https://github.com/lululxvi/deepxde/blob/730c97282636e86c845ce2ba3253482f2178469e/deepxde/model.py#L275-L286
riffnshred/nhl-led-scoreboard
14baa7f0691ca507e4c6f7f2ec02e50ccd1ed9e1
src/boards/team_summary.py
python
TeamSummary.__init__
(self, data, matrix,sleepEvent)
TODO: Need to move the Previous/Next game info in the data section. I think loading it in the data section and then taking that info here would make sense
TODO: Need to move the Previous/Next game info in the data section. I think loading it in the data section and then taking that info here would make sense
[ "TODO", ":", "Need", "to", "move", "the", "Previous", "/", "Next", "game", "info", "in", "the", "data", "section", ".", "I", "think", "loading", "it", "in", "the", "data", "section", "and", "then", "taking", "that", "info", "here", "would", "make", "se...
def __init__(self, data, matrix,sleepEvent): ''' TODO: Need to move the Previous/Next game info in the data section. I think loading it in the data section and then taking that info here would make sense ''' self.data = data self.teams_info = data.teams_info self.preferred_teams = data.pref_teams self.matrix = matrix self.team_colors = data.config.team_colors self.font = data.config.layout.font self.layout = data.config.config.layout.get_board_layout('team_summary') self.sleepEvent = sleepEvent self.sleepEvent.clear()
[ "def", "__init__", "(", "self", ",", "data", ",", "matrix", ",", "sleepEvent", ")", ":", "self", ".", "data", "=", "data", "self", ".", "teams_info", "=", "data", ".", "teams_info", "self", ".", "preferred_teams", "=", "data", ".", "pref_teams", "self", ...
https://github.com/riffnshred/nhl-led-scoreboard/blob/14baa7f0691ca507e4c6f7f2ec02e50ccd1ed9e1/src/boards/team_summary.py#L15-L31
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/_pydecimal.py
python
Context.subtract
(self, a, b)
Return the difference between the two operands. >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) Decimal('0.23') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) Decimal('0.00') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) Decimal('-0.77') >>> ExtendedContext.subtract(8, 5) Decimal('3') >>> ExtendedContext.subtract(Decimal(8), 5) Decimal('3') >>> ExtendedContext.subtract(8, Decimal(5)) Decimal('3')
Return the difference between the two operands.
[ "Return", "the", "difference", "between", "the", "two", "operands", "." ]
def subtract(self, a, b): """Return the difference between the two operands. >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) Decimal('0.23') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) Decimal('0.00') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) Decimal('-0.77') >>> ExtendedContext.subtract(8, 5) Decimal('3') >>> ExtendedContext.subtract(Decimal(8), 5) Decimal('3') >>> ExtendedContext.subtract(8, Decimal(5)) Decimal('3') """ a = _convert_other(a, raiseit=True) r = a.__sub__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r
[ "def", "subtract", "(", "self", ",", "a", ",", "b", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "r", "=", "a", ".", "__sub__", "(", "b", ",", "context", "=", "self", ")", "if", "r", "is", "NotImplemented", ...
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/_pydecimal.py#L5489-L5510
robcarver17/pysystemtrade
b0385705b7135c52d39cb6d2400feece881bcca9
sysexecution/orders/broker_orders.py
python
brokerOrder.log_with_attributes
(self, log)
return new_log
Returns a new log object with broker_order attributes added :param log: logger :return: log
Returns a new log object with broker_order attributes added
[ "Returns", "a", "new", "log", "object", "with", "broker_order", "attributes", "added" ]
def log_with_attributes(self, log): """ Returns a new log object with broker_order attributes added :param log: logger :return: log """ broker_order = self new_log = log.setup( strategy_name=broker_order.strategy_name, instrument_code=broker_order.instrument_code, contract_order_id=object_to_none(broker_order.parent, no_parent), broker_order_id=object_to_none(broker_order.order_id, no_order_id), ) return new_log
[ "def", "log_with_attributes", "(", "self", ",", "log", ")", ":", "broker_order", "=", "self", "new_log", "=", "log", ".", "setup", "(", "strategy_name", "=", "broker_order", ".", "strategy_name", ",", "instrument_code", "=", "broker_order", ".", "instrument_code...
https://github.com/robcarver17/pysystemtrade/blob/b0385705b7135c52d39cb6d2400feece881bcca9/sysexecution/orders/broker_orders.py#L313-L328