nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
yongzhuo/Keras-TextClassification
640e3f44f90d9d8046546f7e1a93a29ebe5c8d30
keras_textclassification/m03_CharCNN/graph_yoon_kim.py
python
highway_keras
(x)
return high_way
[]
def highway_keras(x): # writter by my own # paper; Highway Network(http://arxiv.org/abs/1505.00387). # 公式 # 1. s = sigmoid(Wx + b) # 2. z = s * relu(Wx + b) + (1 - s) * x # x shape : [N * time_depth, sum(filters)] # Table 1. CIFAR-10 test set accuracy of convolutional highway networks with # rectified linear activation and sigmoid gates. # For comparison, results reported by Romero et al. (2014) # using maxout networks are also shown. # Fitnets were trained using a two step training procedure using soft targets from the trained Teacher network, # which was trained using backpropagation. We trained all highway networks directly using backpropagation. # * indicates networks which were trained only on a set of 40K out of 50K examples in the training set. # Figure 2. Visualization of certain internals of the blocks in the best 50 hidden layer highway networks trained on MNIST # (top row) and CIFAR-100 (bottom row). The first hidden layer is a plain layer which changes the dimensionality of the representation to 50. Each of # the 49 highway layers (y-axis) consists of 50 blocks (x-axis). # The first column shows the transform gate biases, which were initialized to -2 and -4 respectively. # In the second column the mean output of the transform gate over 10,000 training examples is depicted. # The third and forth columns show the output of the transform gates and # the block outputs for a single random training sample. gate_transform = Dense(units=K.int_shape(x)[1], activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer=keras.initializers.Constant(value=-2))(x) gate_cross = 1 - gate_transform block_state = Dense(units=K.int_shape(x)[1], activation='relu', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zero')(x) high_way = gate_transform * block_state + gate_cross * x return high_way
[ "def", "highway_keras", "(", "x", ")", ":", "# writter by my own", "# paper; Highway Network(http://arxiv.org/abs/1505.00387).", "# 公式", "# 1. s = sigmoid(Wx + b)", "# 2. z = s * relu(Wx + b) + (1 - s) * x", "# x shape : [N * time_depth, sum(filters)]", "# Table 1. CIFAR-10 test set accuracy...
https://github.com/yongzhuo/Keras-TextClassification/blob/640e3f44f90d9d8046546f7e1a93a29ebe5c8d30/keras_textclassification/m03_CharCNN/graph_yoon_kim.py#L90-L129
tensorflow/models
6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3
research/seq_flow_lite/models/sgnn/sgnn.py
python
sgnn
(texts, hash_seed, ngram_size)
return tf.cast(tf.concat(projection_layer, -1), tf.float32)
Projects the string text to float features. It first generasts N ngrams of the tokens from given text, then projects each ngram tensor with a partion of the seeds. Args: texts: a string tensor, in shape of [batch_size]. hash_seed: a list of integers, in shape of [projection_size]. ngram_size: max size of ngram to generate features. Returns: A float tensor that projects ngrams to the space represented by hash_seed, in shape of [batch_size, projection_size].
Projects the string text to float features.
[ "Projects", "the", "string", "text", "to", "float", "features", "." ]
def sgnn(texts, hash_seed, ngram_size): """Projects the string text to float features. It first generasts N ngrams of the tokens from given text, then projects each ngram tensor with a partion of the seeds. Args: texts: a string tensor, in shape of [batch_size]. hash_seed: a list of integers, in shape of [projection_size]. ngram_size: max size of ngram to generate features. Returns: A float tensor that projects ngrams to the space represented by hash_seed, in shape of [batch_size, projection_size]. """ projection_size = len(hash_seed) partition_size = int(projection_size / ((ngram_size + 1) * ngram_size / 2)) if partition_size == 0: raise ValueError( 'projection size %d is not enough for %d ngram partitions' % (projection_size, ngram_size)) indices = [int(i * (i + 1) / 2) * partition_size for i in range(ngram_size)] indices.append(projection_size) projection_layer = [] tokens = preprocess(texts) for i in range(ngram_size): ngram = get_ngrams(tokens, i + 1) projection = fused_project(ngram, hash_seed[indices[i]:indices[i + 1]], 0x7FFFFFFF) projection_layer.append(projection) return tf.cast(tf.concat(projection_layer, -1), tf.float32)
[ "def", "sgnn", "(", "texts", ",", "hash_seed", ",", "ngram_size", ")", ":", "projection_size", "=", "len", "(", "hash_seed", ")", "partition_size", "=", "int", "(", "projection_size", "/", "(", "(", "ngram_size", "+", "1", ")", "*", "ngram_size", "/", "2...
https://github.com/tensorflow/models/blob/6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3/research/seq_flow_lite/models/sgnn/sgnn.py#L146-L178
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
static/paddlex/cv/transforms/seg_transforms.py
python
ResizeByShort.__init__
(self, short_size=800, max_size=1333)
[]
def __init__(self, short_size=800, max_size=1333): self.max_size = int(max_size) if not isinstance(short_size, int): raise TypeError( "Type of short_size is invalid. Must be Integer, now is {}". format(type(short_size))) self.short_size = short_size if not (isinstance(self.max_size, int)): raise TypeError("max_size: input type is invalid.")
[ "def", "__init__", "(", "self", ",", "short_size", "=", "800", ",", "max_size", "=", "1333", ")", ":", "self", ".", "max_size", "=", "int", "(", "max_size", ")", "if", "not", "isinstance", "(", "short_size", ",", "int", ")", ":", "raise", "TypeError", ...
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/static/paddlex/cv/transforms/seg_transforms.py#L445-L453
F8LEFT/DecLLVM
d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c
python/idaapi.py
python
udt_member_t.set_baseclass
(self, *args)
return _idaapi.udt_member_t_set_baseclass(self, *args)
set_baseclass(self)
set_baseclass(self)
[ "set_baseclass", "(", "self", ")" ]
def set_baseclass(self, *args): """ set_baseclass(self) """ return _idaapi.udt_member_t_set_baseclass(self, *args)
[ "def", "set_baseclass", "(", "self", ",", "*", "args", ")", ":", "return", "_idaapi", ".", "udt_member_t_set_baseclass", "(", "self", ",", "*", "args", ")" ]
https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L31515-L31519
Hrabal/TemPy
03b9f22239118f322ba66084b15090133405ae9c
tempy/widgets/tempytable.py
python
TempyTable.pop_row
(self, idr=None, tags=False)
return row if tags else [cell.childs[0] for cell in row]
Pops a row, default the last
Pops a row, default the last
[ "Pops", "a", "row", "default", "the", "last" ]
def pop_row(self, idr=None, tags=False): """Pops a row, default the last""" idr = idr if idr is not None else len(self.body) - 1 row = self.body.pop(idr) return row if tags else [cell.childs[0] for cell in row]
[ "def", "pop_row", "(", "self", ",", "idr", "=", "None", ",", "tags", "=", "False", ")", ":", "idr", "=", "idr", "if", "idr", "is", "not", "None", "else", "len", "(", "self", ".", "body", ")", "-", "1", "row", "=", "self", ".", "body", ".", "p...
https://github.com/Hrabal/TemPy/blob/03b9f22239118f322ba66084b15090133405ae9c/tempy/widgets/tempytable.py#L106-L110
IronLanguages/ironpython3
7a7bb2a872eeab0d1009fc8a6e24dca43f65b693
Src/StdLib/Lib/encodings/cp865.py
python
IncrementalDecoder.decode
(self, input, final=False)
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
[]
def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0]
[ "def", "decode", "(", "self", ",", "input", ",", "final", "=", "False", ")", ":", "return", "codecs", ".", "charmap_decode", "(", "input", ",", "self", ".", "errors", ",", "decoding_table", ")", "[", "0", "]" ]
https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/encodings/cp865.py#L22-L23
cantools/cantools
8d86d61bc010f328cf414150331fecfd4b6f4dc3
cantools/database/can/formats/arxml.py
python
SystemLoader._get_arxml_children
(self, base_elems, children_location)
return base_elems
Locate a set of ElementTree child nodes at a given location. This is a method that retrieves a list of ElementTree nodes that match a given ARXML location. An ARXML location is a list of strings that specify the nesting order of the XML tag names; potential references for entries are preceeded by an '&': If a sub-element exhibits the specified name, it is used directly and if there is a sub-node called '{child_tag_name}-REF', it is assumed to contain an ARXML reference. This reference is then resolved and the remaining location specification is relative to the result of that resolution. If a location atom is preceeded by '*', then multiple sub-elements are possible. The '&' and '*' qualifiers may be combined. Example: .. code:: text # Return all frame triggerings in any physical channel of a # CAN cluster, where each conditional, each the physical # channel and its individual frame triggerings can be # references loader._get_arxml_children(can_cluster, [ 'CAN-CLUSTER-VARIANTS', '*&CAN-CLUSTER-CONDITIONAL', 'PHYSICAL-CHANNELS', '*&CAN-PHYSICAL-CHANNEL', 'FRAME-TRIGGERINGS', '*&CAN-FRAME-TRIGGERING' ])
Locate a set of ElementTree child nodes at a given location.
[ "Locate", "a", "set", "of", "ElementTree", "child", "nodes", "at", "a", "given", "location", "." ]
def _get_arxml_children(self, base_elems, children_location): """Locate a set of ElementTree child nodes at a given location. This is a method that retrieves a list of ElementTree nodes that match a given ARXML location. An ARXML location is a list of strings that specify the nesting order of the XML tag names; potential references for entries are preceeded by an '&': If a sub-element exhibits the specified name, it is used directly and if there is a sub-node called '{child_tag_name}-REF', it is assumed to contain an ARXML reference. This reference is then resolved and the remaining location specification is relative to the result of that resolution. If a location atom is preceeded by '*', then multiple sub-elements are possible. The '&' and '*' qualifiers may be combined. Example: .. code:: text # Return all frame triggerings in any physical channel of a # CAN cluster, where each conditional, each the physical # channel and its individual frame triggerings can be # references loader._get_arxml_children(can_cluster, [ 'CAN-CLUSTER-VARIANTS', '*&CAN-CLUSTER-CONDITIONAL', 'PHYSICAL-CHANNELS', '*&CAN-PHYSICAL-CHANNEL', 'FRAME-TRIGGERINGS', '*&CAN-FRAME-TRIGGERING' ]) """ if base_elems is None: raise ValueError( 'Cannot retrieve a child element of a non-existing node!') # make sure that the children_location is a list. for convenience we # also allow it to be a string. In this case we take it that a # direct child node needs to be found. if isinstance(children_location, str): children_location = [ children_location ] # make sure that the base elements are iterable. for # convenience we also allow it to be an individiual node. if type(base_elems).__name__ == 'Element': base_elems = [base_elems] for child_tag_name in children_location: if len(base_elems) == 0: return [] # the base elements left are the empty set... # handle the set and reference specifiers of the current # sub-location allow_references = '&' in child_tag_name[:2] is_nodeset = '*' in child_tag_name[:2] if allow_references: child_tag_name = child_tag_name[1:] if is_nodeset: child_tag_name = child_tag_name[1:] # traverse the specified path one level deeper result = [] for base_elem in base_elems: local_result = [] for child_elem in base_elem: ctt = f'{{{self.xml_namespace}}}{child_tag_name}' cttr = f'{{{self.xml_namespace}}}{child_tag_name}-REF' if child_elem.tag == ctt: local_result.append(child_elem) elif child_elem.tag == cttr: tmp = self._follow_arxml_reference( base_elem=base_elem, arxml_path=child_elem.text, dest_tag_name=child_elem.attrib.get('DEST'), refbase_name=child_elem.attrib.get('BASE')) if tmp is None: raise ValueError(f'Encountered dangling reference ' f'{child_tag_name}-REF: ' f'{child_elem.text}') local_result.append(tmp) if not is_nodeset and len(local_result) > 1: raise ValueError(f'Encountered a a non-unique child node ' f'of type {child_tag_name} which ought to ' f'be unique') result.extend(local_result) base_elems = result return base_elems
[ "def", "_get_arxml_children", "(", "self", ",", "base_elems", ",", "children_location", ")", ":", "if", "base_elems", "is", "None", ":", "raise", "ValueError", "(", "'Cannot retrieve a child element of a non-existing node!'", ")", "# make sure that the children_location is a ...
https://github.com/cantools/cantools/blob/8d86d61bc010f328cf414150331fecfd4b6f4dc3/cantools/database/can/formats/arxml.py#L1560-L1662
datafolklabs/cement
2d44d2c1821bda6bdfcfe605d244dc2dfb0b19a6
cement/cli/contrib/jinja2/debug.py
python
make_traceback
(exc_info, source_hint=None)
return translate_exception(exc_info, initial_skip)
Creates a processed traceback object from the exc_info.
Creates a processed traceback object from the exc_info.
[ "Creates", "a", "processed", "traceback", "object", "from", "the", "exc_info", "." ]
def make_traceback(exc_info, source_hint=None): """Creates a processed traceback object from the exc_info.""" exc_type, exc_value, tb = exc_info if isinstance(exc_value, TemplateSyntaxError): exc_info = translate_syntax_error(exc_value, source_hint) initial_skip = 0 else: initial_skip = 1 return translate_exception(exc_info, initial_skip)
[ "def", "make_traceback", "(", "exc_info", ",", "source_hint", "=", "None", ")", ":", "exc_type", ",", "exc_value", ",", "tb", "=", "exc_info", "if", "isinstance", "(", "exc_value", ",", "TemplateSyntaxError", ")", ":", "exc_info", "=", "translate_syntax_error", ...
https://github.com/datafolklabs/cement/blob/2d44d2c1821bda6bdfcfe605d244dc2dfb0b19a6/cement/cli/contrib/jinja2/debug.py#L132-L140
Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks
40c985b9a9ff1189ddf278462440b120cf96b196
libs/scapy/contrib/isotp.py
python
ISOTPSocketImplementation.check_recv
(self)
return not self.rx_queue.empty()
Implementation for SelectableObject
Implementation for SelectableObject
[ "Implementation", "for", "SelectableObject" ]
def check_recv(self): """Implementation for SelectableObject""" return not self.rx_queue.empty()
[ "def", "check_recv", "(", "self", ")", ":", "return", "not", "self", ".", "rx_queue", ".", "empty", "(", ")" ]
https://github.com/Matheus-Garbelini/sweyntooth_bluetooth_low_energy_attacks/blob/40c985b9a9ff1189ddf278462440b120cf96b196/libs/scapy/contrib/isotp.py#L1506-L1508
implus/PytorchInsight
2864528f8b83f52c3df76f7c3804aa468b91e5cf
classification/models/imagenet/resnet_se.py
python
Bottleneck.__init__
(self, inplanes, planes, stride=1, downsample=None)
[]
def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = conv1x1(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = conv1x1(planes, planes * self.expansion) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.se = SELayer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride
[ "def", "__init__", "(", "self", ",", "inplanes", ",", "planes", ",", "stride", "=", "1", ",", "downsample", "=", "None", ")", ":", "super", "(", "Bottleneck", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "conv1", "=", "conv1x1", "(", "...
https://github.com/implus/PytorchInsight/blob/2864528f8b83f52c3df76f7c3804aa468b91e5cf/classification/models/imagenet/resnet_se.py#L70-L81
wwqgtxx/wwqLyParse
33136508e52821babd9294fdecffbdf02d73a6fc
wwqLyParse/lib/flask_lib/werkzeug/wsgi.py
python
get_current_url
(environ, root_only=False, strip_querystring=False, host_only=False, trusted_hosts=None)
return uri_to_iri(''.join(tmp))
A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here's an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. Note that the string returned might contain unicode characters as the representation is an IRI not an URI. If you need an ASCII only representation you can use the :func:`~werkzeug.urls.iri_to_uri` function: >>> from werkzeug.urls import iri_to_uri >>> iri_to_uri(get_current_url(env)) 'http://localhost/script/?param=foo' :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information.
A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here's an example:
[ "A", "handy", "helper", "function", "that", "recreates", "the", "full", "URL", "as", "IRI", "for", "the", "current", "request", "or", "parts", "of", "it", ".", "Here", "s", "an", "example", ":" ]
def get_current_url(environ, root_only=False, strip_querystring=False, host_only=False, trusted_hosts=None): """A handy helper function that recreates the full URL as IRI for the current request or parts of it. Here's an example: >>> from werkzeug.test import create_environ >>> env = create_environ("/?param=foo", "http://localhost/script") >>> get_current_url(env) 'http://localhost/script/?param=foo' >>> get_current_url(env, root_only=True) 'http://localhost/script/' >>> get_current_url(env, host_only=True) 'http://localhost/' >>> get_current_url(env, strip_querystring=True) 'http://localhost/script/' This optionally it verifies that the host is in a list of trusted hosts. If the host is not in there it will raise a :exc:`~werkzeug.exceptions.SecurityError`. Note that the string returned might contain unicode characters as the representation is an IRI not an URI. If you need an ASCII only representation you can use the :func:`~werkzeug.urls.iri_to_uri` function: >>> from werkzeug.urls import iri_to_uri >>> iri_to_uri(get_current_url(env)) 'http://localhost/script/?param=foo' :param environ: the WSGI environment to get the current URL from. :param root_only: set `True` if you only want the root URL. :param strip_querystring: set to `True` if you don't want the querystring. :param host_only: set to `True` if the host URL should be returned. :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` for more information. """ tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)] cat = tmp.append if host_only: return uri_to_iri(''.join(tmp) + '/') cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/')) cat('/') if not root_only: cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/'))) if not strip_querystring: qs = get_query_string(environ) if qs: cat('?' + qs) return uri_to_iri(''.join(tmp))
[ "def", "get_current_url", "(", "environ", ",", "root_only", "=", "False", ",", "strip_querystring", "=", "False", ",", "host_only", "=", "False", ",", "trusted_hosts", "=", "None", ")", ":", "tmp", "=", "[", "environ", "[", "'wsgi.url_scheme'", "]", ",", "...
https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/flask_lib/werkzeug/wsgi.py#L51-L99
awslabs/gluon-ts
066ec3b7f47aa4ee4c061a28f35db7edbad05a98
src/gluonts/nursery/sagemaker_sdk/model.py
python
GluonTSPredictor.__init__
( self, endpoint_name: str, sagemaker_session: session.Session = None )
Initialize an ``GluonTSPredictor``. Parameters ---------- endpoint_name: The name of the endpoint to perform inference on. sagemaker_session : Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain.
Initialize an ``GluonTSPredictor``.
[ "Initialize", "an", "GluonTSPredictor", "." ]
def __init__( self, endpoint_name: str, sagemaker_session: session.Session = None ): """Initialize an ``GluonTSPredictor``. Parameters ---------- endpoint_name: The name of the endpoint to perform inference on. sagemaker_session : Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain. """ # TODO: implement custom data serializer and deserializer: convert between gluonts dataset and bytes # Use the default functions from MXNet (they handle more than we need # (e.g: np.ndarrays), but that should be fine) super(GluonTSPredictor, self).__init__( endpoint_name, sagemaker_session, json_serializer, # change this json_deserializer, # change this )
[ "def", "__init__", "(", "self", ",", "endpoint_name", ":", "str", ",", "sagemaker_session", ":", "session", ".", "Session", "=", "None", ")", ":", "# TODO: implement custom data serializer and deserializer: convert between gluonts dataset and bytes", "# Use the default function...
https://github.com/awslabs/gluon-ts/blob/066ec3b7f47aa4ee4c061a28f35db7edbad05a98/src/gluonts/nursery/sagemaker_sdk/model.py#L40-L63
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/lib-tk/ttk.py
python
Treeview.selection
(self, selop=None, items=None)
return self.tk.call(self._w, 'selection', selop, items)
If selop is not specified, returns selected items.
If selop is not specified, returns selected items.
[ "If", "selop", "is", "not", "specified", "returns", "selected", "items", "." ]
def selection(self, selop=None, items=None): """If selop is not specified, returns selected items.""" return self.tk.call(self._w, 'selection', selop, items)
[ "def", "selection", "(", "self", ",", "selop", "=", "None", ",", "items", "=", "None", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'selection'", ",", "selop", ",", "items", ")" ]
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/lib-tk/ttk.py#L1264-L1266
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
digsby/src/gui/infobox/emailpanels.py
python
EmailList.OnMouseOut
(self, event)
Unselects the email and hide links on mouse out of list.
Unselects the email and hide links on mouse out of list.
[ "Unselects", "the", "email", "and", "hide", "links", "on", "mouse", "out", "of", "list", "." ]
def OnMouseOut(self, event): 'Unselects the email and hide links on mouse out of list.' if not self.linkage.Rect.Contains(event.Position) and self.Selection != -1: self.RefreshLine(self.Selection) self.Selection = -1 self.linkage.Show(False)
[ "def", "OnMouseOut", "(", "self", ",", "event", ")", ":", "if", "not", "self", ".", "linkage", ".", "Rect", ".", "Contains", "(", "event", ".", "Position", ")", "and", "self", ".", "Selection", "!=", "-", "1", ":", "self", ".", "RefreshLine", "(", ...
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/gui/infobox/emailpanels.py#L306-L312
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/logging/__init__.py
python
Handler.createLock
(self)
Acquire a thread lock for serializing access to the underlying I/O.
Acquire a thread lock for serializing access to the underlying I/O.
[ "Acquire", "a", "thread", "lock", "for", "serializing", "access", "to", "the", "underlying", "I", "/", "O", "." ]
def createLock(self): """ Acquire a thread lock for serializing access to the underlying I/O. """ if thread: self.lock = threading.RLock() else: self.lock = None
[ "def", "createLock", "(", "self", ")", ":", "if", "thread", ":", "self", ".", "lock", "=", "threading", ".", "RLock", "(", ")", "else", ":", "self", ".", "lock", "=", "None" ]
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/logging/__init__.py#L705-L712
bbc/brave
88d4454412ee5acfa5ecf2ac5bc8cf75766c7be5
brave/outputs/file.py
python
FileOutput.set_pipeline_state
(self, new_state)
return super().set_pipeline_state(new_state)
[]
def set_pipeline_state(self, new_state): sent_eos = False # If this is ending the file creation (identified by moving to READY or NULL) # we must send an EOS so that the file is completed correctly. if (new_state == Gst.State.READY or new_state == Gst.State.NULL): for encoder_name in ['video_encoder', 'audio_encoder']: if hasattr(self, encoder_name): encoder = getattr(self, encoder_name) encoder_state = encoder.get_state(0).state if encoder_state in [Gst.State.PAUSED, Gst.State.PLAYING]: if encoder.send_event(Gst.Event.new_eos()): self.logger.debug('Successfully send EOS event to the ' + encoder_name) sent_eos = True else: self.logger.warning('Failed to send EOS event to the %s' % encoder_name) # If we've sent an EOS, allow that to propogate the pipeline. # (Separate code will then catch the EOS successful message and cause a state change.) # Otherwise, lets go ahead and set the state of the pipeline. if sent_eos: return return super().set_pipeline_state(new_state)
[ "def", "set_pipeline_state", "(", "self", ",", "new_state", ")", ":", "sent_eos", "=", "False", "# If this is ending the file creation (identified by moving to READY or NULL)", "# we must send an EOS so that the file is completed correctly.", "if", "(", "new_state", "==", "Gst", ...
https://github.com/bbc/brave/blob/88d4454412ee5acfa5ecf2ac5bc8cf75766c7be5/brave/outputs/file.py#L53-L76
artefactual/archivematica
4f4605453d5a8796f6a739fa9664921bdb3418f2
src/dashboard/src/fpr/utils.py
python
determine_what_replaces_model_instance
(model, instance)
return replaces
Determine what object, if any, will be replaced by creating a new revision.
Determine what object, if any, will be replaced by creating a new revision.
[ "Determine", "what", "object", "if", "any", "will", "be", "replaced", "by", "creating", "a", "new", "revision", "." ]
def determine_what_replaces_model_instance(model, instance): """Determine what object, if any, will be replaced by creating a new revision.""" if instance: # if replacing the latest version or base on old version if instance.enabled: replaces = model.objects.get(pk=instance.pk) else: replaces = get_current_revision_using_ancestor(model, instance.uuid) else: replaces = None return replaces
[ "def", "determine_what_replaces_model_instance", "(", "model", ",", "instance", ")", ":", "if", "instance", ":", "# if replacing the latest version or base on old version", "if", "instance", ".", "enabled", ":", "replaces", "=", "model", ".", "objects", ".", "get", "(...
https://github.com/artefactual/archivematica/blob/4f4605453d5a8796f6a739fa9664921bdb3418f2/src/dashboard/src/fpr/utils.py#L65-L77
proycon/pynlpl
7707f69a91caaa6cde037f0d0379f1d42500a68b
pynlpl/formats/folia.py
python
AbstractElement.setparents
(self)
Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`
Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`
[ "Correct", "all", "parent", "relations", "for", "elements", "within", "the", "scop", ".", "There", "is", "sually", "no", "need", "to", "call", "this", "directly", "invoked", "implicitly", "by", ":", "meth", ":", "copy" ]
def setparents(self): """Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`""" for c in self: if isinstance(c, AbstractElement): c.parent = self c.setparents()
[ "def", "setparents", "(", "self", ")", ":", "for", "c", "in", "self", ":", "if", "isinstance", "(", "c", ",", "AbstractElement", ")", ":", "c", ".", "parent", "=", "self", "c", ".", "setparents", "(", ")" ]
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1273-L1278
materialsproject/pymatgen
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
pymatgen/io/vasp/outputs.py
python
VolumetricData.write_file
(self, file_name, vasp4_compatible=False)
Write the VolumetricData object to a vasp compatible file. Args: file_name (str): Path to a file vasp4_compatible (bool): True if the format is vasp4 compatible
Write the VolumetricData object to a vasp compatible file.
[ "Write", "the", "VolumetricData", "object", "to", "a", "vasp", "compatible", "file", "." ]
def write_file(self, file_name, vasp4_compatible=False): """ Write the VolumetricData object to a vasp compatible file. Args: file_name (str): Path to a file vasp4_compatible (bool): True if the format is vasp4 compatible """ def _print_fortran_float(f): """ Fortran codes print floats with a leading zero in scientific notation. When writing CHGCAR files, we adopt this convention to ensure written CHGCAR files are byte-to-byte identical to their input files as far as possible. :param f: float :return: str """ s = f"{f:.10E}" if f >= 0: return "0." + s[0] + s[2:12] + "E" + f"{int(s[13:]) + 1:+03}" return "-." + s[1] + s[3:13] + "E" + f"{int(s[14:]) + 1:+03}" with zopen(file_name, "wt") as f: p = Poscar(self.structure) # use original name if it's been set (e.g. from Chgcar) comment = getattr(self, "name", p.comment) lines = comment + "\n" lines += " 1.00000000000000\n" latt = self.structure.lattice.matrix lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :]) lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :]) lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :]) if not vasp4_compatible: lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n" lines += "".join(["%6d" % x for x in p.natoms]) + "\n" lines += "Direct\n" for site in self.structure: lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords) lines += " \n" f.write(lines) a = self.dim def write_spin(data_type): lines = [] count = 0 f.write(f" {a[0]} {a[1]} {a[2]}\n") for (k, j, i) in itertools.product(list(range(a[2])), list(range(a[1])), list(range(a[0]))): lines.append(_print_fortran_float(self.data[data_type][i, j, k])) count += 1 if count % 5 == 0: f.write(" " + "".join(lines) + "\n") lines = [] else: lines.append(" ") if count % 5 != 0: f.write(" " + "".join(lines) + " \n") f.write("".join(self.data_aug.get(data_type, []))) write_spin("total") if self.is_spin_polarized and self.is_soc: write_spin("diff_x") write_spin("diff_y") write_spin("diff_z") elif self.is_spin_polarized: write_spin("diff")
[ "def", "write_file", "(", "self", ",", "file_name", ",", "vasp4_compatible", "=", "False", ")", ":", "def", "_print_fortran_float", "(", "f", ")", ":", "\"\"\"\n Fortran codes print floats with a leading zero in scientific\n notation. When writing CHGCAR fil...
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/io/vasp/outputs.py#L3618-L3685
sametmax/Django--an-app-at-a-time
99eddf12ead76e6dfbeb09ce0bae61e282e22f8a
ignore_this_directory/django/views/generic/list.py
python
MultipleObjectMixin.get_context_data
(self, *, object_list=None, **kwargs)
return super().get_context_data(**context)
Get the context for this view.
Get the context for this view.
[ "Get", "the", "context", "for", "this", "view", "." ]
def get_context_data(self, *, object_list=None, **kwargs): """Get the context for this view.""" queryset = object_list if object_list is not None else self.object_list page_size = self.get_paginate_by(queryset) context_object_name = self.get_context_object_name(queryset) if page_size: paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size) context = { 'paginator': paginator, 'page_obj': page, 'is_paginated': is_paginated, 'object_list': queryset } else: context = { 'paginator': None, 'page_obj': None, 'is_paginated': False, 'object_list': queryset } if context_object_name is not None: context[context_object_name] = queryset context.update(kwargs) return super().get_context_data(**context)
[ "def", "get_context_data", "(", "self", ",", "*", ",", "object_list", "=", "None", ",", "*", "*", "kwargs", ")", ":", "queryset", "=", "object_list", "if", "object_list", "is", "not", "None", "else", "self", ".", "object_list", "page_size", "=", "self", ...
https://github.com/sametmax/Django--an-app-at-a-time/blob/99eddf12ead76e6dfbeb09ce0bae61e282e22f8a/ignore_this_directory/django/views/generic/list.py#L113-L136
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_service.py
python
OCService.update
(self)
return self._replace_content(self.kind, self.config.name, self.user_svc.yaml_dict)
create a service
create a service
[ "create", "a", "service" ]
def update(self): '''create a service ''' # Need to copy over the portalIP and the serviceIP settings self.user_svc.add_cluster_ip(self.service.get('spec.clusterIP')) self.user_svc.add_portal_ip(self.service.get('spec.portalIP')) return self._replace_content(self.kind, self.config.name, self.user_svc.yaml_dict)
[ "def", "update", "(", "self", ")", ":", "# Need to copy over the portalIP and the serviceIP settings", "self", ".", "user_svc", ".", "add_cluster_ip", "(", "self", ".", "service", ".", "get", "(", "'spec.clusterIP'", ")", ")", "self", ".", "user_svc", ".", "add_po...
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_service.py#L1794-L1800
Sarcasm/compdb
62b4c0f6daa0654256a0ae9036d59cd5026f280f
compdb/backend/json.py
python
JSONCompilationDatabase.probe_directory
(cls, directory)
return super(JSONCompilationDatabase, cls).probe_directory(directory)
Automatically create a CompilationDatabase from build directory.
Automatically create a CompilationDatabase from build directory.
[ "Automatically", "create", "a", "CompilationDatabase", "from", "build", "directory", "." ]
def probe_directory(cls, directory): """Automatically create a CompilationDatabase from build directory.""" db_path = os.path.join(directory, 'compile_commands.json') if os.path.exists(db_path): return cls(db_path) return super(JSONCompilationDatabase, cls).probe_directory(directory)
[ "def", "probe_directory", "(", "cls", ",", "directory", ")", ":", "db_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'compile_commands.json'", ")", "if", "os", ".", "path", ".", "exists", "(", "db_path", ")", ":", "return", "cls", ...
https://github.com/Sarcasm/compdb/blob/62b4c0f6daa0654256a0ae9036d59cd5026f280f/compdb/backend/json.py#L19-L24
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/difflib.py
python
unified_diff
(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n')
r""" Compare two sequences of lines; generate the delta as a unified diff. Unified diffs are a compact way of showing line changes and a few lines of context. The number of context lines is set by 'n' which defaults to three. By default, the diff control lines (those with ---, +++, or @@) are created with a trailing newline. This is helpful so that inputs created from file.readlines() result in diffs that are suitable for file.writelines() since both the inputs and outputs have trailing newlines. For inputs that do not have trailing newlines, set the lineterm argument to "" so that the output will be uniformly newline free. The unidiff format normally has a header for filenames and modification times. Any or all of these may be specified using strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification times are normally expressed in the ISO 8601 format. Example: >>> for line in unified_diff('one two three four'.split(), ... 'zero one tree four'.split(), 'Original', 'Current', ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', ... lineterm=''): ... print(line) # doctest: +NORMALIZE_WHITESPACE --- Original 2005-01-26 23:30:50 +++ Current 2010-04-02 10:20:52 @@ -1,4 +1,4 @@ +zero one -two -three +tree four
r""" Compare two sequences of lines; generate the delta as a unified diff.
[ "r", "Compare", "two", "sequences", "of", "lines", ";", "generate", "the", "delta", "as", "a", "unified", "diff", "." ]
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n'): r""" Compare two sequences of lines; generate the delta as a unified diff. Unified diffs are a compact way of showing line changes and a few lines of context. The number of context lines is set by 'n' which defaults to three. By default, the diff control lines (those with ---, +++, or @@) are created with a trailing newline. This is helpful so that inputs created from file.readlines() result in diffs that are suitable for file.writelines() since both the inputs and outputs have trailing newlines. For inputs that do not have trailing newlines, set the lineterm argument to "" so that the output will be uniformly newline free. The unidiff format normally has a header for filenames and modification times. Any or all of these may be specified using strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification times are normally expressed in the ISO 8601 format. Example: >>> for line in unified_diff('one two three four'.split(), ... 'zero one tree four'.split(), 'Original', 'Current', ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', ... lineterm=''): ... print(line) # doctest: +NORMALIZE_WHITESPACE --- Original 2005-01-26 23:30:50 +++ Current 2010-04-02 10:20:52 @@ -1,4 +1,4 @@ +zero one -two -three +tree four """ started = False for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): if not started: started = True fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' todate = '\t{}'.format(tofiledate) if tofiledate else '' yield '--- {}{}{}'.format(fromfile, fromdate, lineterm) yield '+++ {}{}{}'.format(tofile, todate, lineterm) first, last = group[0], group[-1] file1_range = _format_range_unified(first[1], last[2]) file2_range = _format_range_unified(first[3], last[4]) yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm) for tag, i1, i2, j1, j2 in group: if tag == 'equal': for line in a[i1:i2]: yield ' ' + line continue if tag in {'replace', 'delete'}: for line in a[i1:i2]: yield '-' + line if tag in {'replace', 'insert'}: for line in b[j1:j2]: yield '+' + line
[ "def", "unified_diff", "(", "a", ",", "b", ",", "fromfile", "=", "''", ",", "tofile", "=", "''", ",", "fromfiledate", "=", "''", ",", "tofiledate", "=", "''", ",", "n", "=", "3", ",", "lineterm", "=", "'\\n'", ")", ":", "started", "=", "False", "...
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/difflib.py#L1140-L1205
BillBillBillBill/Tickeys-linux
2df31b8665004c58a5d4ab05277f245267d96364
tickeys/kivy/core/image/__init__.py
python
ImageData.iterate_mipmaps
(self)
Iterate over all mipmap images available. .. versionadded:: 1.0.7
Iterate over all mipmap images available.
[ "Iterate", "over", "all", "mipmap", "images", "available", "." ]
def iterate_mipmaps(self): '''Iterate over all mipmap images available. .. versionadded:: 1.0.7 ''' mm = self.mipmaps for x in range(len(mm)): item = mm.get(x, None) if item is None: raise Exception('Invalid mipmap level, found empty one') yield x, item[0], item[1], item[2], item[3]
[ "def", "iterate_mipmaps", "(", "self", ")", ":", "mm", "=", "self", ".", "mipmaps", "for", "x", "in", "range", "(", "len", "(", "mm", ")", ")", ":", "item", "=", "mm", ".", "get", "(", "x", ",", "None", ")", "if", "item", "is", "None", ":", "...
https://github.com/BillBillBillBill/Tickeys-linux/blob/2df31b8665004c58a5d4ab05277f245267d96364/tickeys/kivy/core/image/__init__.py#L168-L178
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/setuptools/command/install.py
python
install._called_from_setup
(run_frame)
return ( caller_module == 'distutils.dist' and info.function == 'run_commands' )
Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise.
Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise.
[ "Attempt", "to", "detect", "whether", "run", "()", "was", "called", "from", "setup", "()", "or", "by", "another", "command", ".", "If", "called", "by", "setup", "()", "the", "parent", "caller", "will", "be", "the", "run_command", "method", "in", "distutils...
def _called_from_setup(run_frame): """ Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise. """ if run_frame is None: msg = "Call stack not available. bdist_* commands may fail." warnings.warn(msg) if platform.python_implementation() == 'IronPython': msg = "For best results, pass -X:Frames to enable call stack." warnings.warn(msg) return True res = inspect.getouterframes(run_frame)[2] caller, = res[:1] info = inspect.getframeinfo(caller) caller_module = caller.f_globals.get('__name__', '') return ( caller_module == 'distutils.dist' and info.function == 'run_commands' )
[ "def", "_called_from_setup", "(", "run_frame", ")", ":", "if", "run_frame", "is", "None", ":", "msg", "=", "\"Call stack not available. bdist_* commands may fail.\"", "warnings", ".", "warn", "(", "msg", ")", "if", "platform", ".", "python_implementation", "(", ")",...
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/setuptools/command/install.py#L70-L94
SFDO-Tooling/CumulusCI
825ae1f122b25dc41761c52a4ddfa1938d2a4b6e
cumulusci/core/config/project_config.py
python
BaseProjectConfig.construct_subproject_config
(self, **kwargs)
return self.__class__( self.universal_config_obj, included_sources=self.included_sources, **kwargs )
Construct another project config for an external source
Construct another project config for an external source
[ "Construct", "another", "project", "config", "for", "an", "external", "source" ]
def construct_subproject_config(self, **kwargs): """Construct another project config for an external source""" return self.__class__( self.universal_config_obj, included_sources=self.included_sources, **kwargs )
[ "def", "construct_subproject_config", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "universal_config_obj", ",", "included_sources", "=", "self", ".", "included_sources", ",", "*", "*", "kwargs", ")" ]
https://github.com/SFDO-Tooling/CumulusCI/blob/825ae1f122b25dc41761c52a4ddfa1938d2a4b6e/cumulusci/core/config/project_config.py#L572-L576
ask/carrot
5889a25cd2e274642071c9bba39772f4b3e3d9da
carrot/backends/base.py
python
BaseBackend.consume
(self, *args, **kwargs)
Iterate over the declared consumers.
Iterate over the declared consumers.
[ "Iterate", "over", "the", "declared", "consumers", "." ]
def consume(self, *args, **kwargs): """Iterate over the declared consumers.""" pass
[ "def", "consume", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pass" ]
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/base.py#L129-L131
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/x86/psutil/_pslinux.py
python
Process.open_files
(self)
return retlist
[]
def open_files(self): retlist = [] files = os.listdir("/proc/%s/fd" % self.pid) hit_enoent = False for fd in files: file = "/proc/%s/fd/%s" % (self.pid, fd) try: file = os.readlink(file) except OSError as err: # ENOENT == file which is gone in the meantime if err.errno in (errno.ENOENT, errno.ESRCH): hit_enoent = True continue elif err.errno == errno.EINVAL: # not a link continue else: raise else: # If file is not an absolute path there's no way # to tell whether it's a regular file or not, # so we skip it. A regular file is always supposed # to be absolutized though. if file.startswith('/') and isfile_strict(file): ntuple = _common.popenfile(file, int(fd)) retlist.append(ntuple) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return retlist
[ "def", "open_files", "(", "self", ")", ":", "retlist", "=", "[", "]", "files", "=", "os", ".", "listdir", "(", "\"/proc/%s/fd\"", "%", "self", ".", "pid", ")", "hit_enoent", "=", "False", "for", "fd", "in", "files", ":", "file", "=", "\"/proc/%s/fd/%s\...
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /scripts/sshbackdoors/backdoors/shell/pupy/pupy/packages/windows/x86/psutil/_pslinux.py#L1145-L1174
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/CentrifyVault/Integrations/CentrifyVault/CentrifyVault.py
python
Client.get_token_request
(self)
return token_response
Sends token request :rtype ``str`` :return: bearer token
Sends token request
[ "Sends", "token", "request" ]
def get_token_request(self): """ Sends token request :rtype ``str`` :return: bearer token """ urlSuffix = '/oauth2/token/' + self.app_id fullUrl = f'{self._base_url}{urlSuffix}' body = self.payload headers = { 'X-CENTRIFY-NATIVE-CLIENT': 'true' } token_response = self._http_request(method='POST', full_url=fullUrl, url_suffix='', data=body, headers=headers) if not token_response: err_msg = 'Authorization Error: User has no authorization to create a token.' \ ' Please make sure you entered the credentials correctly.' raise Exception(err_msg) return token_response
[ "def", "get_token_request", "(", "self", ")", ":", "urlSuffix", "=", "'/oauth2/token/'", "+", "self", ".", "app_id", "fullUrl", "=", "f'{self._base_url}{urlSuffix}'", "body", "=", "self", ".", "payload", "headers", "=", "{", "'X-CENTRIFY-NATIVE-CLIENT'", ":", "'tr...
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/CentrifyVault/Integrations/CentrifyVault/CentrifyVault.py#L68-L87
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/physics/quantum/matrixutils.py
python
_scipy_sparse_tensor_product
(*product)
return sparse.csr_matrix(answer)
scipy.sparse version of tensor product of multiple arguments.
scipy.sparse version of tensor product of multiple arguments.
[ "scipy", ".", "sparse", "version", "of", "tensor", "product", "of", "multiple", "arguments", "." ]
def _scipy_sparse_tensor_product(*product): """scipy.sparse version of tensor product of multiple arguments.""" if not sparse: raise ImportError answer = product[0] for item in product[1:]: answer = sparse.kron(answer, item) # The final matrices will just be multiplied, so csr is a good final # sparse format. return sparse.csr_matrix(answer)
[ "def", "_scipy_sparse_tensor_product", "(", "*", "product", ")", ":", "if", "not", "sparse", ":", "raise", "ImportError", "answer", "=", "product", "[", "0", "]", "for", "item", "in", "product", "[", "1", ":", "]", ":", "answer", "=", "sparse", ".", "k...
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/physics/quantum/matrixutils.py#L231-L240
gitpython-developers/GitPython
fac603789d66c0fd7c26e75debb41b06136c5026
git/config.py
python
GitConfigParser.items_all
(self, section_name: str)
return rv.items_all()
:return: list((option, [values...]), ...) pairs of all items in the given section
:return: list((option, [values...]), ...) pairs of all items in the given section
[ ":", "return", ":", "list", "((", "option", "[", "values", "...", "]", ")", "...", ")", "pairs", "of", "all", "items", "in", "the", "given", "section" ]
def items_all(self, section_name: str) -> List[Tuple[str, List[str]]]: """:return: list((option, [values...]), ...) pairs of all items in the given section""" rv = _OMD(self._defaults) for k, vs in self._sections[section_name].items_all(): if k == '__name__': continue if k in rv and rv.getall(k) == vs: continue for v in vs: rv.add(k, v) return rv.items_all()
[ "def", "items_all", "(", "self", ",", "section_name", ":", "str", ")", "->", "List", "[", "Tuple", "[", "str", ",", "List", "[", "str", "]", "]", "]", ":", "rv", "=", "_OMD", "(", "self", ".", "_defaults", ")", "for", "k", ",", "vs", "in", "sel...
https://github.com/gitpython-developers/GitPython/blob/fac603789d66c0fd7c26e75debb41b06136c5026/git/config.py#L643-L657
fooying/3102
0faee38c30b2e24154f41e68457cfd8f7a61c040
thirdparty/dns/resolver.py
python
Cache._maybe_clean
(self)
Clean the cache if it's time to do so.
Clean the cache if it's time to do so.
[ "Clean", "the", "cache", "if", "it", "s", "time", "to", "do", "so", "." ]
def _maybe_clean(self): """Clean the cache if it's time to do so.""" now = time.time() if self.next_cleaning <= now: keys_to_delete = [] for (k, v) in self.data.iteritems(): if v.expiration <= now: keys_to_delete.append(k) for k in keys_to_delete: del self.data[k] now = time.time() self.next_cleaning = now + self.cleaning_interval
[ "def", "_maybe_clean", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "self", ".", "next_cleaning", "<=", "now", ":", "keys_to_delete", "=", "[", "]", "for", "(", "k", ",", "v", ")", "in", "self", ".", "data", ".", "iteri...
https://github.com/fooying/3102/blob/0faee38c30b2e24154f41e68457cfd8f7a61c040/thirdparty/dns/resolver.py#L227-L239
lxtGH/OctaveConv_pytorch
079f7da29d55c2eeed8985d33f0b2f765d7a469e
libs/nn/resnet_sk.py
python
conv1x1
(in_planes, out_planes, stride=1)
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
1x1 convolution
1x1 convolution
[ "1x1", "convolution" ]
def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
[ "def", "conv1x1", "(", "in_planes", ",", "out_planes", ",", "stride", "=", "1", ")", ":", "return", "nn", ".", "Conv2d", "(", "in_planes", ",", "out_planes", ",", "kernel_size", "=", "1", ",", "stride", "=", "stride", ",", "bias", "=", "False", ")" ]
https://github.com/lxtGH/OctaveConv_pytorch/blob/079f7da29d55c2eeed8985d33f0b2f765d7a469e/libs/nn/resnet_sk.py#L17-L19
PyLops/pylops
33eb807c6f429dd2efe697627c0d3955328af81f
pylops/waveeqprocessing/wavedecomposition.py
python
UpDownComposition3D
( nt, nr, dt, dr, rho, vel, nffts=(None, None, None), critical=100.0, ntaper=10, scaling=1.0, backend="numpy", dtype="complex128", )
return UDop
r"""3D Up-down wavefield composition. Apply multi-component seismic wavefield composition from its up- and down-going constituents. The input model required by the operator should be created by flattening the separated wavefields of size :math:`\lbrack n_{r_y} \times n_{r_x} \times n_t \rbrack` concatenated along the first spatial axis. Similarly, the data is also a flattened concatenation of pressure and vertical particle velocity wavefields. Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`tuple` Number of samples along the receiver axes dt : :obj:`float` Sampling along the time axis dr : :obj:`tuple` Samplings along the receiver array rho : :obj:`float` Density along the receiver array (must be constant) vel : :obj:`float` Velocity along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumbers and frequency axes (for the wavenumbers axes the same order as ``nr`` and ``dr`` must be followed) critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\sqrt{k_y^2 + k_x^2} < \frac{\omega}{vel}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle scaling : :obj:`float`, optional Scaling to apply to the operator (see Notes for more details) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. Returns ------- UDop : :obj:`pylops.LinearOperator` Up-down wavefield composition operator See Also -------- UpDownComposition2D: 2D Wavefield composition WavefieldDecomposition: Wavefield decomposition Notes ----- Multi-component seismic data (:math:`p(y, x, t)` and :math:`v_z(y, x, t)`) can be synthesized in the frequency-wavenumber domain as the superposition of the up- and downgoing constituents of the pressure wavefield (:math:`p^-(y, x, t)` and :math:`p^+(y, x, t)`) as described :class:`pylops.waveeqprocessing.UpDownComposition2D`. Here the vertical wavenumber :math:`k_z` is defined as :math:`k_z=\sqrt{\omega^2/c^2 - k_y^2 - k_x^2}`.
r"""3D Up-down wavefield composition.
[ "r", "3D", "Up", "-", "down", "wavefield", "composition", "." ]
def UpDownComposition3D( nt, nr, dt, dr, rho, vel, nffts=(None, None, None), critical=100.0, ntaper=10, scaling=1.0, backend="numpy", dtype="complex128", ): r"""3D Up-down wavefield composition. Apply multi-component seismic wavefield composition from its up- and down-going constituents. The input model required by the operator should be created by flattening the separated wavefields of size :math:`\lbrack n_{r_y} \times n_{r_x} \times n_t \rbrack` concatenated along the first spatial axis. Similarly, the data is also a flattened concatenation of pressure and vertical particle velocity wavefields. Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`tuple` Number of samples along the receiver axes dt : :obj:`float` Sampling along the time axis dr : :obj:`tuple` Samplings along the receiver array rho : :obj:`float` Density along the receiver array (must be constant) vel : :obj:`float` Velocity along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumbers and frequency axes (for the wavenumbers axes the same order as ``nr`` and ``dr`` must be followed) critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\sqrt{k_y^2 + k_x^2} < \frac{\omega}{vel}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle scaling : :obj:`float`, optional Scaling to apply to the operator (see Notes for more details) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. Returns ------- UDop : :obj:`pylops.LinearOperator` Up-down wavefield composition operator See Also -------- UpDownComposition2D: 2D Wavefield composition WavefieldDecomposition: Wavefield decomposition Notes ----- Multi-component seismic data (:math:`p(y, x, t)` and :math:`v_z(y, x, t)`) can be synthesized in the frequency-wavenumber domain as the superposition of the up- and downgoing constituents of the pressure wavefield (:math:`p^-(y, x, t)` and :math:`p^+(y, x, t)`) as described :class:`pylops.waveeqprocessing.UpDownComposition2D`. Here the vertical wavenumber :math:`k_z` is defined as :math:`k_z=\sqrt{\omega^2/c^2 - k_y^2 - k_x^2}`. """ nffts = ( int(nffts[0]) if nffts[0] is not None else nr[0], int(nffts[1]) if nffts[1] is not None else nr[1], int(nffts[2]) if nffts[2] is not None else nt, ) # create obliquity operator FFTop, OBLop = _obliquity3D( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, composition=True, backend=backend, dtype=dtype, ) # create up-down modelling operator UDop = ( BlockDiag([FFTop.H, scaling * FFTop.H]) * Block( [ [ Identity(nffts[0] * nffts[1] * nffts[2], dtype=dtype), Identity(nffts[0] * nffts[1] * nffts[2], dtype=dtype), ], [OBLop, -OBLop], ] ) * BlockDiag([FFTop, FFTop]) ) return UDop
[ "def", "UpDownComposition3D", "(", "nt", ",", "nr", ",", "dt", ",", "dr", ",", "rho", ",", "vel", ",", "nffts", "=", "(", "None", ",", "None", ",", "None", ")", ",", "critical", "=", "100.0", ",", "ntaper", "=", "10", ",", "scaling", "=", "1.0", ...
https://github.com/PyLops/pylops/blob/33eb807c6f429dd2efe697627c0d3955328af81f/pylops/waveeqprocessing/wavedecomposition.py#L510-L625
maurosoria/dirsearch
b83e68c8fdf360ab06be670d7b92b263262ee5b1
thirdparty/jinja2/compiler.py
python
UndeclaredNameVisitor.visit_Block
(self, node: nodes.Block)
Stop visiting a blocks.
Stop visiting a blocks.
[ "Stop", "visiting", "a", "blocks", "." ]
def visit_Block(self, node: nodes.Block) -> None: """Stop visiting a blocks."""
[ "def", "visit_Block", "(", "self", ",", "node", ":", "nodes", ".", "Block", ")", "->", "None", ":" ]
https://github.com/maurosoria/dirsearch/blob/b83e68c8fdf360ab06be670d7b92b263262ee5b1/thirdparty/jinja2/compiler.py#L289-L290
Qirky/Troop
529c5eb14e456f683e6d23fd4adcddc8446aa115
src/OSC3.py
python
OSCStreamingServer.start
(self)
Start the server thread.
Start the server thread.
[ "Start", "the", "server", "thread", "." ]
def start(self): """ Start the server thread. """ self._server_thread = threading.Thread(target=self.serve_forever) self._server_thread.setDaemon(True) self._server_thread.start()
[ "def", "start", "(", "self", ")", ":", "self", ".", "_server_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "serve_forever", ")", "self", ".", "_server_thread", ".", "setDaemon", "(", "True", ")", "self", ".", "_server_thread", ...
https://github.com/Qirky/Troop/blob/529c5eb14e456f683e6d23fd4adcddc8446aa115/src/OSC3.py#L2661-L2665
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/api/v2010/account/incoming_phone_number/mobile.py
python
MobileInstance.status_callback_method
(self)
return self._properties['status_callback_method']
:returns: The HTTP method we use to call status_callback :rtype: unicode
:returns: The HTTP method we use to call status_callback :rtype: unicode
[ ":", "returns", ":", "The", "HTTP", "method", "we", "use", "to", "call", "status_callback", ":", "rtype", ":", "unicode" ]
def status_callback_method(self): """ :returns: The HTTP method we use to call status_callback :rtype: unicode """ return self._properties['status_callback_method']
[ "def", "status_callback_method", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'status_callback_method'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/incoming_phone_number/mobile.py#L500-L505
jaychsu/algorithm
87dac5456b74a515dd97507ac68e9b8588066a04
leetcode/388_longest_absolute_file_path.py
python
Solution.lengthLongestPath
(self, path)
return ans
:type path: str :rtype: int
:type path: str :rtype: int
[ ":", "type", "path", ":", "str", ":", "rtype", ":", "int" ]
def lengthLongestPath(self, path): """ :type path: str :rtype: int """ ans = 0 if not path: return ans dep2size = {0: 0} for line in path.split('\n'): name = line.lstrip('\t') size = len(name) depth = len(line) - len(name) if '.' in name: ans = max(ans, dep2size[depth] + size) else: dep2size[depth + 1] = dep2size[depth] + size + 1 return ans
[ "def", "lengthLongestPath", "(", "self", ",", "path", ")", ":", "ans", "=", "0", "if", "not", "path", ":", "return", "ans", "dep2size", "=", "{", "0", ":", "0", "}", "for", "line", "in", "path", ".", "split", "(", "'\\n'", ")", ":", "name", "=", ...
https://github.com/jaychsu/algorithm/blob/87dac5456b74a515dd97507ac68e9b8588066a04/leetcode/388_longest_absolute_file_path.py#L29-L50
rll/rllab
ba78e4c16dc492982e648f117875b22af3965579
rllab/spaces/base.py
python
Space.flat_dim
(self)
The dimension of the flattened vector of the tensor representation
The dimension of the flattened vector of the tensor representation
[ "The", "dimension", "of", "the", "flattened", "vector", "of", "the", "tensor", "representation" ]
def flat_dim(self): """ The dimension of the flattened vector of the tensor representation """ raise NotImplementedError
[ "def", "flat_dim", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/rll/rllab/blob/ba78e4c16dc492982e648f117875b22af3965579/rllab/spaces/base.py#L37-L41
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/locale.py
python
setlocale
(category, locale=None)
return _setlocale(category, locale)
Set the locale for the given category. The locale can be a string, an iterable of two strings (language code and encoding), or None. Iterables are converted to strings using the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values.
Set the locale for the given category. The locale can be a string, an iterable of two strings (language code and encoding), or None.
[ "Set", "the", "locale", "for", "the", "given", "category", ".", "The", "locale", "can", "be", "a", "string", "an", "iterable", "of", "two", "strings", "(", "language", "code", "and", "encoding", ")", "or", "None", "." ]
def setlocale(category, locale=None): """ Set the locale for the given category. The locale can be a string, an iterable of two strings (language code and encoding), or None. Iterables are converted to strings using the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values. """ if locale and type(locale) is not type(""): # convert to string locale = normalize(_build_localename(locale)) return _setlocale(category, locale)
[ "def", "setlocale", "(", "category", ",", "locale", "=", "None", ")", ":", "if", "locale", "and", "type", "(", "locale", ")", "is", "not", "type", "(", "\"\"", ")", ":", "# convert to string", "locale", "=", "normalize", "(", "_build_localename", "(", "l...
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/locale.py#L532-L547
NoGameNoLife00/mybolg
afe17ea5bfe405e33766e5682c43a4262232ee12
libs/wtforms/form.py
python
FormMeta.__call__
(cls, *args, **kwargs)
return type.__call__(cls, *args, **kwargs)
Construct a new `Form` instance. Creates the `_unbound_fields` list and the internal `_wtforms_meta` subclass of the class Meta in order to allow a proper inheritance hierarchy.
Construct a new `Form` instance.
[ "Construct", "a", "new", "Form", "instance", "." ]
def __call__(cls, *args, **kwargs): """ Construct a new `Form` instance. Creates the `_unbound_fields` list and the internal `_wtforms_meta` subclass of the class Meta in order to allow a proper inheritance hierarchy. """ if cls._unbound_fields is None: fields = [] for name in dir(cls): if not name.startswith('_'): unbound_field = getattr(cls, name) if hasattr(unbound_field, '_formfield'): fields.append((name, unbound_field)) # We keep the name as the second element of the sort # to ensure a stable sort. fields.sort(key=lambda x: (x[1].creation_counter, x[0])) cls._unbound_fields = fields # Create a subclass of the 'class Meta' using all the ancestors. if cls._wtforms_meta is None: bases = [] for mro_class in cls.__mro__: if 'Meta' in mro_class.__dict__: bases.append(mro_class.Meta) cls._wtforms_meta = type('Meta', tuple(bases), {}) return type.__call__(cls, *args, **kwargs)
[ "def", "__call__", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "_unbound_fields", "is", "None", ":", "fields", "=", "[", "]", "for", "name", "in", "dir", "(", "cls", ")", ":", "if", "not", "name", ".", "st...
https://github.com/NoGameNoLife00/mybolg/blob/afe17ea5bfe405e33766e5682c43a4262232ee12/libs/wtforms/form.py#L185-L212
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/email/iterators.py
python
body_line_iterator
(msg, decode=False)
Iterate over the parts, returning string payloads line-by-line. Optional decode (default False) is passed through to .get_payload().
Iterate over the parts, returning string payloads line-by-line.
[ "Iterate", "over", "the", "parts", "returning", "string", "payloads", "line", "-", "by", "-", "line", "." ]
def body_line_iterator(msg, decode=False): """Iterate over the parts, returning string payloads line-by-line. Optional decode (default False) is passed through to .get_payload(). """ for subpart in msg.walk(): payload = subpart.get_payload(decode=decode) if isinstance(payload, basestring): for line in StringIO(payload): yield line
[ "def", "body_line_iterator", "(", "msg", ",", "decode", "=", "False", ")", ":", "for", "subpart", "in", "msg", ".", "walk", "(", ")", ":", "payload", "=", "subpart", ".", "get_payload", "(", "decode", "=", "decode", ")", "if", "isinstance", "(", "paylo...
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/email/iterators.py#L35-L44
jaraco/inflect
98e19e3eb3ad28f9fa882baaad01674c44b59952
inflect.py
python
joinstem
(cutpoint: Optional[int] = 0, words: Optional[Iterable[str]] = None)
return enclose("|".join(w[:cutpoint] for w in words))
join stem of each word in words into a string for regex each word is truncated at cutpoint cutpoint is usually negative indicating the number of letters to remove from the end of each word e.g. joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns (?:ephemer|ir|.*it)
join stem of each word in words into a string for regex each word is truncated at cutpoint cutpoint is usually negative indicating the number of letters to remove from the end of each word
[ "join", "stem", "of", "each", "word", "in", "words", "into", "a", "string", "for", "regex", "each", "word", "is", "truncated", "at", "cutpoint", "cutpoint", "is", "usually", "negative", "indicating", "the", "number", "of", "letters", "to", "remove", "from", ...
def joinstem(cutpoint: Optional[int] = 0, words: Optional[Iterable[str]] = None) -> str: """ join stem of each word in words into a string for regex each word is truncated at cutpoint cutpoint is usually negative indicating the number of letters to remove from the end of each word e.g. joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns (?:ephemer|ir|.*it) """ if words is None: words = "" return enclose("|".join(w[:cutpoint] for w in words))
[ "def", "joinstem", "(", "cutpoint", ":", "Optional", "[", "int", "]", "=", "0", ",", "words", ":", "Optional", "[", "Iterable", "[", "str", "]", "]", "=", "None", ")", "->", "str", ":", "if", "words", "is", "None", ":", "words", "=", "\"\"", "ret...
https://github.com/jaraco/inflect/blob/98e19e3eb3ad28f9fa882baaad01674c44b59952/inflect.py#L96-L110
coursera/dataduct
83aea17c1b1abd376270bc8fd4a180ce09181cc5
dataduct/s3/s3_path.py
python
S3Path.append
(self, new_key, is_directory=False)
Appends new key to the current key Args: new_key (str): Key for the S3 path is_directory (bool): Is the specified S3 path a directory
Appends new key to the current key
[ "Appends", "new", "key", "to", "the", "current", "key" ]
def append(self, new_key, is_directory=False): """Appends new key to the current key Args: new_key (str): Key for the S3 path is_directory (bool): Is the specified S3 path a directory """ assert self.is_directory or self.key is None, \ 'Can only append to path that is directory' # If new key is list we want to flatten it out if isinstance(new_key, list): new_key = join(*new_key) # Remove duplicate, leading, and trailing '/' new_key = [a for a in new_key.split("/") if a != ''] # AWS prevents us from using periods in paths # Substitute them with '_' if is_directory: directory_path = new_key file_name = '' else: directory_path = new_key[:-1] file_name = new_key[-1] # Remove periods new_key = [sub(r'\.', '_', a) for a in directory_path] new_key.append(file_name) new_key = join(*new_key) if self.key: self.key = join(self.key, new_key) else: self.key = new_key self.is_directory = is_directory
[ "def", "append", "(", "self", ",", "new_key", ",", "is_directory", "=", "False", ")", ":", "assert", "self", ".", "is_directory", "or", "self", ".", "key", "is", "None", ",", "'Can only append to path that is directory'", "# If new key is list we want to flatten it ou...
https://github.com/coursera/dataduct/blob/83aea17c1b1abd376270bc8fd4a180ce09181cc5/dataduct/s3/s3_path.py#L63-L99
joelgrus/data-science-from-scratch
d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1
first-edition/code-python3/databases.py
python
Table.limit
(self, num_rows=None)
return limit_table
return only the first num_rows rows
return only the first num_rows rows
[ "return", "only", "the", "first", "num_rows", "rows" ]
def limit(self, num_rows=None): """return only the first num_rows rows""" limit_table = Table(self.columns) limit_table.rows = (self.rows[:num_rows] if num_rows is not None else self.rows) return limit_table
[ "def", "limit", "(", "self", ",", "num_rows", "=", "None", ")", ":", "limit_table", "=", "Table", "(", "self", ".", "columns", ")", "limit_table", ".", "rows", "=", "(", "self", ".", "rows", "[", ":", "num_rows", "]", "if", "num_rows", "is", "not", ...
https://github.com/joelgrus/data-science-from-scratch/blob/d5d0f117f41b3ccab3b07f1ee1fa21cfcf69afa1/first-edition/code-python3/databases.py#L55-L61
scipy/scipy
e0a749f01e79046642ccfdc419edbf9e7ca141ad
scipy/integrate/_quadrature.py
python
_cached_roots_legendre
(n)
return _cached_roots_legendre.cache[n]
Cache roots_legendre results to speed up calls of the fixed_quad function.
Cache roots_legendre results to speed up calls of the fixed_quad function.
[ "Cache", "roots_legendre", "results", "to", "speed", "up", "calls", "of", "the", "fixed_quad", "function", "." ]
def _cached_roots_legendre(n): """ Cache roots_legendre results to speed up calls of the fixed_quad function. """ if n in _cached_roots_legendre.cache: return _cached_roots_legendre.cache[n] _cached_roots_legendre.cache[n] = roots_legendre(n) return _cached_roots_legendre.cache[n]
[ "def", "_cached_roots_legendre", "(", "n", ")", ":", "if", "n", "in", "_cached_roots_legendre", ".", "cache", ":", "return", "_cached_roots_legendre", ".", "cache", "[", "n", "]", "_cached_roots_legendre", ".", "cache", "[", "n", "]", "=", "roots_legendre", "(...
https://github.com/scipy/scipy/blob/e0a749f01e79046642ccfdc419edbf9e7ca141ad/scipy/integrate/_quadrature.py#L68-L77
pinterest/mysql_utils
7ab237699b85de8b503b09f36e0309ac807689fe
mysql_backup_csv.py
python
mysql_backup_csv.mysql_backup_one_partition
(self, table_tuple, tmp_dir_db, conn)
Back up a single partition of a single table Args: table_tuple - the table_tuple (db, partition name, partition number) to be backed up tmp_dir_db - temporary storage used for all tables in the db conn - a connection the the mysql instance
Back up a single partition of a single table
[ "Back", "up", "a", "single", "partition", "of", "a", "single", "table" ]
def mysql_backup_one_partition(self, table_tuple, tmp_dir_db, conn): """ Back up a single partition of a single table Args: table_tuple - the table_tuple (db, partition name, partition number) to be backed up tmp_dir_db - temporary storage used for all tables in the db conn - a connection the the mysql instance """ proc_id = multiprocessing.current_process().name (_, data_path, _) = backup.get_csv_backup_paths(self.instance, *table_tuple[0].split('.'), date=self.datestamp, partition_number=table_tuple[2]) log.debug('{proc_id}: {tbl} partition {p} dump to {path} started' ''.format(proc_id=proc_id, tbl=table_tuple[0], p=table_tuple[2], path=data_path)) self.upload_schema(*table_tuple[0].split('.'), tmp_dir_db=tmp_dir_db) fifo = os.path.join(tmp_dir_db, '{tbl}{part}'.format(tbl=table_tuple[0].split('.')[1], part=table_tuple[2])) procs = dict() try: # giant try so we can try to clean things up in case of errors self.create_fifo(fifo) # Start creating processes procs['cat'] = subprocess.Popen(['cat', fifo], stdout=subprocess.PIPE) procs['nullescape'] = subprocess.Popen(['nullescape'], stdin=procs['cat'].stdout, stdout=subprocess.PIPE) procs['lzop'] = subprocess.Popen(['lzop'], stdin=procs['nullescape'].stdout, stdout=subprocess.PIPE) # Start dump query return_value = set() query_thread = threading.Thread(target=self.run_dump_query, args=(table_tuple, fifo, conn, procs['cat'], return_value)) query_thread.daemon = True query_thread.start() # And run the upload safe_uploader.safe_upload(precursor_procs=procs, stdin=procs['lzop'].stdout, bucket=self.upload_bucket, key=data_path, check_func=self.check_dump_success, check_arg=return_value) os.remove(fifo) log.debug('{proc_id}: {tbl} partition {p} clean up complete' ''.format(proc_id=proc_id, tbl=table_tuple[0], p=table_tuple[2])) except: log.debug('{}: in exception handling for failed table ' 'upload'.format(proc_id)) if os.path.exists(fifo): self.cleanup_fifo(fifo) raise
[ "def", "mysql_backup_one_partition", "(", "self", ",", "table_tuple", ",", "tmp_dir_db", ",", "conn", ")", ":", "proc_id", "=", "multiprocessing", ".", "current_process", "(", ")", ".", "name", "(", "_", ",", "data_path", ",", "_", ")", "=", "backup", ".",...
https://github.com/pinterest/mysql_utils/blob/7ab237699b85de8b503b09f36e0309ac807689fe/mysql_backup_csv.py#L294-L358
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/django/views/generic/dates.py
python
MonthMixin._get_current_month
(self, date)
return date.replace(day=1)
Return the start date of the previous interval.
Return the start date of the previous interval.
[ "Return", "the", "start", "date", "of", "the", "previous", "interval", "." ]
def _get_current_month(self, date): """ Return the start date of the previous interval. """ return date.replace(day=1)
[ "def", "_get_current_month", "(", "self", ",", "date", ")", ":", "return", "date", ".", "replace", "(", "day", "=", "1", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/views/generic/dates.py#L124-L128
msracver/Deep-Exemplar-based-Colorization
964026106bb51171a3c58be58d5c117e2f62bb4c
colorization_subnet/lib/functional.py
python
crop
(img, i, j, h, w)
return img.crop((j, i, j + w, i + h))
Crop the given PIL Image. Args: img (PIL Image): Image to be cropped. i: Upper pixel coordinate. j: Left pixel coordinate. h: Height of the cropped image. w: Width of the cropped image. Returns: PIL Image: Cropped image.
Crop the given PIL Image.
[ "Crop", "the", "given", "PIL", "Image", "." ]
def crop(img, i, j, h, w): """Crop the given PIL Image. Args: img (PIL Image): Image to be cropped. i: Upper pixel coordinate. j: Left pixel coordinate. h: Height of the cropped image. w: Width of the cropped image. Returns: PIL Image: Cropped image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.crop((j, i, j + w, i + h))
[ "def", "crop", "(", "img", ",", "i", ",", "j", ",", "h", ",", "w", ")", ":", "if", "not", "_is_pil_image", "(", "img", ")", ":", "raise", "TypeError", "(", "'img should be PIL Image. Got {}'", ".", "format", "(", "type", "(", "img", ")", ")", ")", ...
https://github.com/msracver/Deep-Exemplar-based-Colorization/blob/964026106bb51171a3c58be58d5c117e2f62bb4c/colorization_subnet/lib/functional.py#L266-L282
djblets/djblets
0496e1ec49e43d43d776768c9fc5b6f8af56ec2c
djblets/registries/registry.py
python
Registry.populated
(self)
return self._populated
Whether or not the registry is populated. Returns: bool: Whether or not the registry is populated.
Whether or not the registry is populated.
[ "Whether", "or", "not", "the", "registry", "is", "populated", "." ]
def populated(self): """Whether or not the registry is populated. Returns: bool: Whether or not the registry is populated. """ return self._populated
[ "def", "populated", "(", "self", ")", ":", "return", "self", ".", "_populated" ]
https://github.com/djblets/djblets/blob/0496e1ec49e43d43d776768c9fc5b6f8af56ec2c/djblets/registries/registry.py#L95-L101
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/xlnet/classifier_utils.py
python
convert_single_example_for_inews
(ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example)
return feature
[]
def convert_single_example_for_inews(ex_index, tokens_a, tokens_b, label_map, max_seq_length, tokenizer, example): if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for two [SEP] & one [CLS] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for one [SEP] & one [CLS] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:max_seq_length - 2] tokens = [] segment_ids = [] for token in tokens_a: tokens.append(token) segment_ids.append(SEG_ID_A) tokens.append(SEP_ID) segment_ids.append(SEG_ID_A) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(SEG_ID_B) tokens.append(SEP_ID) segment_ids.append(SEG_ID_B) tokens.append(CLS_ID) segment_ids.append(SEG_ID_CLS) input_ids = tokens # The mask has 0 for real tokens and 1 for padding tokens. Only real # tokens are attended to. input_mask = [0] * len(input_ids) # Zero-pad up to the sequence length. if len(input_ids) < max_seq_length: delta_len = max_seq_length - len(input_ids) input_ids = [0] * delta_len + input_ids input_mask = [1] * delta_len + input_mask segment_ids = [SEG_ID_PAD] * delta_len + segment_ids assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if label_map is not None: label_id = label_map[example.label] else: label_id = example.label if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: {} (id = {})".format(example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) return feature
[ "def", "convert_single_example_for_inews", "(", "ex_index", ",", "tokens_a", ",", "tokens_b", ",", "label_map", ",", "max_seq_length", ",", "tokenizer", ",", "example", ")", ":", "if", "tokens_b", ":", "# Modifies `tokens_a` and `tokens_b` in place so that the total", "# ...
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/xlnet/classifier_utils.py#L147-L211
onnx/onnx-tensorflow
6194294c9f2f1c9270a614f6ae5078f2095587b7
onnx_tf/backend.py
python
TensorflowBackend.prepare
(cls, model, device='CPU', strict=True, logging_level='INFO', auto_cast=False, **kwargs)
return cls.onnx_model_to_tensorflow_rep(model, strict, **kwargs)
Prepare an ONNX model for Tensorflow Backend. This function converts an ONNX model to an internel representation of the computational graph called TensorflowRep and returns the converted representation. :param model: The ONNX model to be converted. :param device: The device to execute this model on. It can be either CPU (default) or CUDA. :param strict: Whether to enforce semantic equivalence between the original model and the converted tensorflow model, defaults to True (yes, enforce semantic equivalence). Changing to False is strongly discouraged. Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops. :param logging_level: The logging level, default is INFO. Change it to DEBUG to see more conversion details or to WARNING to see less :param auto_cast: Whether to auto cast data types that might lose precision for the tensors with types not natively supported by Tensorflow, default is False :returns: A TensorflowRep class object representing the ONNX model
Prepare an ONNX model for Tensorflow Backend.
[ "Prepare", "an", "ONNX", "model", "for", "Tensorflow", "Backend", "." ]
def prepare(cls, model, device='CPU', strict=True, logging_level='INFO', auto_cast=False, **kwargs): """Prepare an ONNX model for Tensorflow Backend. This function converts an ONNX model to an internel representation of the computational graph called TensorflowRep and returns the converted representation. :param model: The ONNX model to be converted. :param device: The device to execute this model on. It can be either CPU (default) or CUDA. :param strict: Whether to enforce semantic equivalence between the original model and the converted tensorflow model, defaults to True (yes, enforce semantic equivalence). Changing to False is strongly discouraged. Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops. :param logging_level: The logging level, default is INFO. Change it to DEBUG to see more conversion details or to WARNING to see less :param auto_cast: Whether to auto cast data types that might lose precision for the tensors with types not natively supported by Tensorflow, default is False :returns: A TensorflowRep class object representing the ONNX model """ super(TensorflowBackend, cls).prepare(model, device, **kwargs) common.logger.setLevel(logging_level) common.logger.handlers[0].setLevel(logging_level) common.sys_config.auto_cast = auto_cast common.sys_config.device = device return cls.onnx_model_to_tensorflow_rep(model, strict, **kwargs)
[ "def", "prepare", "(", "cls", ",", "model", ",", "device", "=", "'CPU'", ",", "strict", "=", "True", ",", "logging_level", "=", "'INFO'", ",", "auto_cast", "=", "False", ",", "*", "*", "kwargs", ")", ":", "super", "(", "TensorflowBackend", ",", "cls", ...
https://github.com/onnx/onnx-tensorflow/blob/6194294c9f2f1c9270a614f6ae5078f2095587b7/onnx_tf/backend.py#L41-L73
kumar-shridhar/PyTorch-BayesianCNN
d93bad543c3226cd0fe05c0cb0ba033c41b3caa6
Mixtures/gmm.py
python
GaussianMixture.__p_k
(self, x, mu, var)
return prefactor * exponent
Returns a tensor with dimensions (n, k, 1) indicating the likelihood of data belonging to the k-th Gaussian. args: x: torch.Tensor (n, k, d) mu: torch.Tensor (1, k, d) var: torch.Tensor (1, k, d) returns: p_k: torch.Tensor (n, k, 1)
Returns a tensor with dimensions (n, k, 1) indicating the likelihood of data belonging to the k-th Gaussian. args: x: torch.Tensor (n, k, d) mu: torch.Tensor (1, k, d) var: torch.Tensor (1, k, d) returns: p_k: torch.Tensor (n, k, 1)
[ "Returns", "a", "tensor", "with", "dimensions", "(", "n", "k", "1", ")", "indicating", "the", "likelihood", "of", "data", "belonging", "to", "the", "k", "-", "th", "Gaussian", ".", "args", ":", "x", ":", "torch", ".", "Tensor", "(", "n", "k", "d", ...
def __p_k(self, x, mu, var): """ Returns a tensor with dimensions (n, k, 1) indicating the likelihood of data belonging to the k-th Gaussian. args: x: torch.Tensor (n, k, d) mu: torch.Tensor (1, k, d) var: torch.Tensor (1, k, d) returns: p_k: torch.Tensor (n, k, 1) """ # (1, k, d) --> (n, k, d) mu = mu.expand(x.size(0), self.n_components, self.n_features) var = var.expand(x.size(0), self.n_components, self.n_features) # (n, k, d) --> (n, k, 1) exponent = torch.exp(-.5 * torch.sum((x - mu) * (x - mu) / var, 2, keepdim=True)) # (n, k, d) --> (n, k, 1) prefactor = torch.rsqrt(((2. * pi) ** self.n_features) * torch.prod(var, dim=2, keepdim=True) + self.eps) return prefactor * exponent
[ "def", "__p_k", "(", "self", ",", "x", ",", "mu", ",", "var", ")", ":", "# (1, k, d) --> (n, k, d)", "mu", "=", "mu", ".", "expand", "(", "x", ".", "size", "(", "0", ")", ",", "self", ".", "n_components", ",", "self", ".", "n_features", ")", "var",...
https://github.com/kumar-shridhar/PyTorch-BayesianCNN/blob/d93bad543c3226cd0fe05c0cb0ba033c41b3caa6/Mixtures/gmm.py#L176-L196
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/canine/bert_optimization.py
python
AdamWeightDecayOptimizer.__init__
(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer")
Constructs a AdamWeightDecayOptimizer.
Constructs a AdamWeightDecayOptimizer.
[ "Constructs", "a", "AdamWeightDecayOptimizer", "." ]
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer"): """Constructs a AdamWeightDecayOptimizer.""" super(AdamWeightDecayOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.weight_decay_rate = weight_decay_rate self.beta_1 = beta_1 self.beta_2 = beta_2 self.epsilon = epsilon self.exclude_from_weight_decay = exclude_from_weight_decay
[ "def", "__init__", "(", "self", ",", "learning_rate", ",", "weight_decay_rate", "=", "0.0", ",", "beta_1", "=", "0.9", ",", "beta_2", "=", "0.999", ",", "epsilon", "=", "1e-6", ",", "exclude_from_weight_decay", "=", "None", ",", "name", "=", "\"AdamWeightDec...
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/canine/bert_optimization.py#L89-L105
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/asyncio/windows_events.py
python
_BaseWaitHandleFuture.set_exception
(self, exception)
[]
def set_exception(self, exception): self._unregister_wait() super().set_exception(exception)
[ "def", "set_exception", "(", "self", ",", "exception", ")", ":", "self", ".", "_unregister_wait", "(", ")", "super", "(", ")", ".", "set_exception", "(", "exception", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/asyncio/windows_events.py#L156-L158
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/plot/plot3d/tachyon.py
python
FCylinder.__init__
(self, base, apex, radius, texture)
r""" Create a finite cylinder object. EXAMPLES:: sage: from sage.plot.plot3d.tachyon import FCylinder sage: fc = FCylinder((0,0,0),(1,1,1),.1,'s') sage: fc.str() '\n fcylinder base 0.0 0.0 0.0 apex 1.0 1.0 1.0 rad 0.1 s\n '
r""" Create a finite cylinder object.
[ "r", "Create", "a", "finite", "cylinder", "object", "." ]
def __init__(self, base, apex, radius, texture): r""" Create a finite cylinder object. EXAMPLES:: sage: from sage.plot.plot3d.tachyon import FCylinder sage: fc = FCylinder((0,0,0),(1,1,1),.1,'s') sage: fc.str() '\n fcylinder base 0.0 0.0 0.0 apex 1.0 1.0 1.0 rad 0.1 s\n ' """ x, y, z = base self._center = (float(x), float(y), float(z)) x, y, z = apex self._axis = (float(x), float(y), float(z)) self._radius = float(radius) self._texture = texture
[ "def", "__init__", "(", "self", ",", "base", ",", "apex", ",", "radius", ",", "texture", ")", ":", "x", ",", "y", ",", "z", "=", "base", "self", ".", "_center", "=", "(", "float", "(", "x", ")", ",", "float", "(", "y", ")", ",", "float", "(",...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/plot/plot3d/tachyon.py#L1449-L1465
zachwill/flask-engine
7c8ad4bfe36382a8c9286d873ec7b785715832a4
libs/werkzeug/routing.py
python
RuleFactory.get_rules
(self, map)
Subclasses of `RuleFactory` have to override this method and return an iterable of rules.
Subclasses of `RuleFactory` have to override this method and return an iterable of rules.
[ "Subclasses", "of", "RuleFactory", "have", "to", "override", "this", "method", "and", "return", "an", "iterable", "of", "rules", "." ]
def get_rules(self, map): """Subclasses of `RuleFactory` have to override this method and return an iterable of rules.""" raise NotImplementedError()
[ "def", "get_rules", "(", "self", ",", "map", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/zachwill/flask-engine/blob/7c8ad4bfe36382a8c9286d873ec7b785715832a4/libs/werkzeug/routing.py#L225-L228
fonttools/fonttools
892322aaff6a89bea5927379ec06bc0da3dfb7df
Lib/fontTools/ttLib/ttFont.py
python
_TTGlyphSet.__init__
(self, ttFont, glyphs, glyphType)
Construct a new glyphset. Args: font (TTFont): The font object (used to get metrics). glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects. glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``.
Construct a new glyphset.
[ "Construct", "a", "new", "glyphset", "." ]
def __init__(self, ttFont, glyphs, glyphType): """Construct a new glyphset. Args: font (TTFont): The font object (used to get metrics). glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects. glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``. """ self._glyphs = glyphs self._hmtx = ttFont['hmtx'] self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None self._glyphType = glyphType
[ "def", "__init__", "(", "self", ",", "ttFont", ",", "glyphs", ",", "glyphType", ")", ":", "self", ".", "_glyphs", "=", "glyphs", "self", ".", "_hmtx", "=", "ttFont", "[", "'hmtx'", "]", "self", ".", "_vmtx", "=", "ttFont", "[", "'vmtx'", "]", "if", ...
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/ttLib/ttFont.py#L717-L728
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/lockfile/pidlockfile.py
python
PIDLockFile.is_locked
(self)
return os.path.exists(self.path)
Test if the lock is currently held. The lock is held if the PID file for this lock exists.
Test if the lock is currently held.
[ "Test", "if", "the", "lock", "is", "currently", "held", "." ]
def is_locked(self): """ Test if the lock is currently held. The lock is held if the PID file for this lock exists. """ return os.path.exists(self.path)
[ "def", "is_locked", "(", "self", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/lockfile/pidlockfile.py#L50-L56
PythonJS/PythonJS
591a80afd8233fb715493591db2b68f1748558d9
pythonjs/lib/python2.7/StringIO.py
python
StringIO.seek
(self, pos, mode = 0)
Set the file's current position. The mode argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file's end). There is no return value.
Set the file's current position.
[ "Set", "the", "file", "s", "current", "position", "." ]
def seek(self, pos, mode = 0): """Set the file's current position. The mode argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file's end). There is no return value. """ _complain_ifclosed(self.closed) if self.buflist: self.buf += ''.join(self.buflist) self.buflist = [] if mode == 1: pos += self.pos elif mode == 2: pos += self.len self.pos = max(0, pos)
[ "def", "seek", "(", "self", ",", "pos", ",", "mode", "=", "0", ")", ":", "_complain_ifclosed", "(", "self", ".", "closed", ")", "if", "self", ".", "buflist", ":", "self", ".", "buf", "+=", "''", ".", "join", "(", "self", ".", "buflist", ")", "sel...
https://github.com/PythonJS/PythonJS/blob/591a80afd8233fb715493591db2b68f1748558d9/pythonjs/lib/python2.7/StringIO.py#L95-L112
sebastien/cuisine
f6f70268ef1361db66815383017f7c8969002154
src/cuisine.py
python
stringify
( value )
Turns the given value in a user-friendly string that can be displayed
Turns the given value in a user-friendly string that can be displayed
[ "Turns", "the", "given", "value", "in", "a", "user", "-", "friendly", "string", "that", "can", "be", "displayed" ]
def stringify( value ): """Turns the given value in a user-friendly string that can be displayed""" if type(value) in (str, unicode, bytes) and len(value) > STRINGIFY_MAXSTRING: return "{0}...".format(value[0:STRINGIFY_MAXSTRING]) elif type(value) in (list, tuple) and len(value) > 10: return"[{0},...]".format(", ".join([stringify(_) for _ in value[0:STRINGIFY_MAXLISTSTRING]])) else: return str(value)
[ "def", "stringify", "(", "value", ")", ":", "if", "type", "(", "value", ")", "in", "(", "str", ",", "unicode", ",", "bytes", ")", "and", "len", "(", "value", ")", ">", "STRINGIFY_MAXSTRING", ":", "return", "\"{0}...\"", ".", "format", "(", "value", "...
https://github.com/sebastien/cuisine/blob/f6f70268ef1361db66815383017f7c8969002154/src/cuisine.py#L130-L137
freedombox/FreedomBox
335a7f92cc08f27981f838a7cddfc67740598e54
plinth/glib.py
python
_run
()
Connect to D-Bus and run main loop.
Connect to D-Bus and run main loop.
[ "Connect", "to", "D", "-", "Bus", "and", "run", "main", "loop", "." ]
def _run(): """Connect to D-Bus and run main loop.""" logger.info('Started new thread for glib main loop.') # Initialize all modules that use glib main loop dbus.init() network.init() global _main_loop _main_loop = glib.MainLoop() _main_loop.run() _main_loop = None logger.info('Glib main loop thread exited.')
[ "def", "_run", "(", ")", ":", "logger", ".", "info", "(", "'Started new thread for glib main loop.'", ")", "# Initialize all modules that use glib main loop", "dbus", ".", "init", "(", ")", "network", ".", "init", "(", ")", "global", "_main_loop", "_main_loop", "=",...
https://github.com/freedombox/FreedomBox/blob/335a7f92cc08f27981f838a7cddfc67740598e54/plinth/glib.py#L34-L47
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/colors.py
python
LinearSegmentedColormap.from_list
(name, colors, N=256, gamma=1.0)
return LinearSegmentedColormap(name, cdict, N, gamma)
Make a linear segmented colormap with *name* from a sequence of *colors* which evenly transitions from colors[0] at val=0 to colors[-1] at val=1. *N* is the number of rgb quantization levels. Alternatively, a list of (value, color) tuples can be given to divide the range unevenly.
Make a linear segmented colormap with *name* from a sequence of *colors* which evenly transitions from colors[0] at val=0 to colors[-1] at val=1. *N* is the number of rgb quantization levels. Alternatively, a list of (value, color) tuples can be given to divide the range unevenly.
[ "Make", "a", "linear", "segmented", "colormap", "with", "*", "name", "*", "from", "a", "sequence", "of", "*", "colors", "*", "which", "evenly", "transitions", "from", "colors", "[", "0", "]", "at", "val", "=", "0", "to", "colors", "[", "-", "1", "]",...
def from_list(name, colors, N=256, gamma=1.0): """ Make a linear segmented colormap with *name* from a sequence of *colors* which evenly transitions from colors[0] at val=0 to colors[-1] at val=1. *N* is the number of rgb quantization levels. Alternatively, a list of (value, color) tuples can be given to divide the range unevenly. """ if not cbook.iterable(colors): raise ValueError('colors must be iterable') if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \ not cbook.is_string_like(colors[0]): # List of value, color pairs vals, colors = zip(*colors) else: vals = np.linspace(0., 1., len(colors)) cdict = dict(red=[], green=[], blue=[], alpha=[]) for val, color in zip(vals, colors): r, g, b, a = colorConverter.to_rgba(color) cdict['red'].append((val, r, r)) cdict['green'].append((val, g, g)) cdict['blue'].append((val, b, b)) cdict['alpha'].append((val, a, a)) return LinearSegmentedColormap(name, cdict, N, gamma)
[ "def", "from_list", "(", "name", ",", "colors", ",", "N", "=", "256", ",", "gamma", "=", "1.0", ")", ":", "if", "not", "cbook", ".", "iterable", "(", "colors", ")", ":", "raise", "ValueError", "(", "'colors must be iterable'", ")", "if", "cbook", ".", ...
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/colors.py#L744-L772
ipython/ipython
c0abea7a6dfe52c1f74c9d0387d4accadba7cc14
IPython/core/history.py
python
HistoryAccessor.init_db
(self)
Connect to the database, and create tables if necessary.
Connect to the database, and create tables if necessary.
[ "Connect", "to", "the", "database", "and", "create", "tables", "if", "necessary", "." ]
def init_db(self): """Connect to the database, and create tables if necessary.""" if not self.enabled: self.db = DummyDB() return # use detect_types so that timestamps return datetime objects kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) kwargs.update(self.connection_options) self.db = sqlite3.connect(str(self.hist_file), **kwargs) with self.db: self.db.execute( """CREATE TABLE IF NOT EXISTS sessions (session integer primary key autoincrement, start timestamp, end timestamp, num_cmds integer, remark text)""" ) self.db.execute( """CREATE TABLE IF NOT EXISTS history (session integer, line integer, source text, source_raw text, PRIMARY KEY (session, line))""" ) # Output history is optional, but ensure the table's there so it can be # enabled later. self.db.execute( """CREATE TABLE IF NOT EXISTS output_history (session integer, line integer, output text, PRIMARY KEY (session, line))""" ) # success! reset corrupt db count self._corrupt_db_counter = 0
[ "def", "init_db", "(", "self", ")", ":", "if", "not", "self", ".", "enabled", ":", "self", ".", "db", "=", "DummyDB", "(", ")", "return", "# use detect_types so that timestamps return datetime objects", "kwargs", "=", "dict", "(", "detect_types", "=", "sqlite3",...
https://github.com/ipython/ipython/blob/c0abea7a6dfe52c1f74c9d0387d4accadba7cc14/IPython/core/history.py#L235-L264
AwesomeTTS/awesometts-anki-addon
c7c2c94479b610b9767ec44cdbb825002bc0c2b7
awesometts/gui/common.py
python
key_event_combo
(event)
return key + sum(flag for flag in key_event_combo.MOD_FLAGS if modifiers & flag)
Given a key event, returns an integer representing the combination of keys that was pressed or released. Certain keys are blacklisted (see BLACKLIST) and key_event_combo() will return None if it sees these keys in the primary key() slot for an event. When used by themselves or exclusively with modifiers, these keys cause various problems: gibberish strings returned from QKeySequence#toString() and in menus, inability to capture the keystroke because the window manager does not forward it to Qt, ambiguous shortcuts where order would matter (e.g. Ctrl + Alt would produce a different numerical value than Alt + Ctrl, because the key codes for Alt and Ctrl are different from the modifier flag codes for Alt and Ctrl), and clashes with input navigation.
Given a key event, returns an integer representing the combination of keys that was pressed or released.
[ "Given", "a", "key", "event", "returns", "an", "integer", "representing", "the", "combination", "of", "keys", "that", "was", "pressed", "or", "released", "." ]
def key_event_combo(event): """ Given a key event, returns an integer representing the combination of keys that was pressed or released. Certain keys are blacklisted (see BLACKLIST) and key_event_combo() will return None if it sees these keys in the primary key() slot for an event. When used by themselves or exclusively with modifiers, these keys cause various problems: gibberish strings returned from QKeySequence#toString() and in menus, inability to capture the keystroke because the window manager does not forward it to Qt, ambiguous shortcuts where order would matter (e.g. Ctrl + Alt would produce a different numerical value than Alt + Ctrl, because the key codes for Alt and Ctrl are different from the modifier flag codes for Alt and Ctrl), and clashes with input navigation. """ key = event.key() if key < 32 or key in key_event_combo.BLACKLIST: return None modifiers = event.modifiers() return key + sum(flag for flag in key_event_combo.MOD_FLAGS if modifiers & flag)
[ "def", "key_event_combo", "(", "event", ")", ":", "key", "=", "event", ".", "key", "(", ")", "if", "key", "<", "32", "or", "key", "in", "key_event_combo", ".", "BLACKLIST", ":", "return", "None", "modifiers", "=", "event", ".", "modifiers", "(", ")", ...
https://github.com/AwesomeTTS/awesometts-anki-addon/blob/c7c2c94479b610b9767ec44cdbb825002bc0c2b7/awesometts/gui/common.py#L41-L65
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py
python
DependencyFinder.__init__
(self, locator=None)
Initialise an instance, using the specified locator to locate distributions.
Initialise an instance, using the specified locator to locate distributions.
[ "Initialise", "an", "instance", "using", "the", "specified", "locator", "to", "locate", "distributions", "." ]
def __init__(self, locator=None): """ Initialise an instance, using the specified locator to locate distributions. """ self.locator = locator or default_locator self.scheme = get_scheme(self.locator.scheme)
[ "def", "__init__", "(", "self", ",", "locator", "=", "None", ")", ":", "self", ".", "locator", "=", "locator", "or", "default_locator", "self", ".", "scheme", "=", "get_scheme", "(", "self", ".", "locator", ".", "scheme", ")" ]
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py#L1035-L1041
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/tkinter/__init__.py
python
Text.insert
(self, index, chars, *args)
Insert CHARS before the characters at INDEX. An additional tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.
Insert CHARS before the characters at INDEX. An additional tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.
[ "Insert", "CHARS", "before", "the", "characters", "at", "INDEX", ".", "An", "additional", "tag", "can", "be", "given", "in", "ARGS", ".", "Additional", "CHARS", "and", "tags", "can", "follow", "in", "ARGS", "." ]
def insert(self, index, chars, *args): """Insert CHARS before the characters at INDEX. An additional tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.""" self.tk.call((self._w, 'insert', index, chars) + args)
[ "def", "insert", "(", "self", ",", "index", ",", "chars", ",", "*", "args", ")", ":", "self", ".", "tk", ".", "call", "(", "(", "self", ".", "_w", ",", "'insert'", ",", "index", ",", "chars", ")", "+", "args", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/tkinter/__init__.py#L3269-L3272
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/binhex.py
python
_Hqxdecoderengine.close
(self)
[]
def close(self): self.ifp.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "ifp", ".", "close", "(", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/binhex.py#L289-L290
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/utils/text.py
python
BuildShellCmd
(template, *args)
return template % args
Build a safe shell command line from the given arguments. This function will check all arguments in the args list so that they are valid shell parameters (i.e. they don't contain shell metacharacters). If everything is ok, it will return the result of template % args. @type template: str @param template: the string holding the template for the string formatting @rtype: str @return: the expanded command line
Build a safe shell command line from the given arguments.
[ "Build", "a", "safe", "shell", "command", "line", "from", "the", "given", "arguments", "." ]
def BuildShellCmd(template, *args): """Build a safe shell command line from the given arguments. This function will check all arguments in the args list so that they are valid shell parameters (i.e. they don't contain shell metacharacters). If everything is ok, it will return the result of template % args. @type template: str @param template: the string holding the template for the string formatting @rtype: str @return: the expanded command line """ for word in args: if not IsValidShellParam(word): raise errors.ProgrammerError("Shell argument '%s' contains" " invalid characters" % word) return template % args
[ "def", "BuildShellCmd", "(", "template", ",", "*", "args", ")", ":", "for", "word", "in", "args", ":", "if", "not", "IsValidShellParam", "(", "word", ")", ":", "raise", "errors", ".", "ProgrammerError", "(", "\"Shell argument '%s' contains\"", "\" invalid charac...
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/utils/text.py#L610-L629
googleapis/python-dialogflow
e48ea001b7c8a4a5c1fe4b162bad49ea397458e9
google/cloud/dialogflow_v2beta1/services/documents/client.py
python
DocumentsClient.list_documents
( self, request: Union[document.ListDocumentsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), )
return response
r"""Returns the list of all documents of the knowledge base. Note: The ``projects.agent.knowledgeBases.documents`` resource is deprecated; only use ``projects.knowledgeBases.documents``. Args: request (Union[google.cloud.dialogflow_v2beta1.types.ListDocumentsRequest, dict]): The request object. Request message for [Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments]. parent (str): Required. The knowledge base to list all documents for. Format: ``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2beta1.services.documents.pagers.ListDocumentsPager: Response message for [Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments]. Iterating over this object will yield results and resolve additional pages automatically.
r"""Returns the list of all documents of the knowledge base.
[ "r", "Returns", "the", "list", "of", "all", "documents", "of", "the", "knowledge", "base", "." ]
def list_documents( self, request: Union[document.ListDocumentsRequest, dict] = None, *, parent: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListDocumentsPager: r"""Returns the list of all documents of the knowledge base. Note: The ``projects.agent.knowledgeBases.documents`` resource is deprecated; only use ``projects.knowledgeBases.documents``. Args: request (Union[google.cloud.dialogflow_v2beta1.types.ListDocumentsRequest, dict]): The request object. Request message for [Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments]. parent (str): Required. The knowledge base to list all documents for. Format: ``projects/<Project ID>/locations/<Location ID>/knowledgeBases/<Knowledge Base ID>``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2beta1.services.documents.pagers.ListDocumentsPager: Response message for [Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a document.ListDocumentsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, document.ListDocumentsRequest): request = document.ListDocumentsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_documents] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListDocumentsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
[ "def", "list_documents", "(", "self", ",", "request", ":", "Union", "[", "document", ".", "ListDocumentsRequest", ",", "dict", "]", "=", "None", ",", "*", ",", "parent", ":", "str", "=", "None", ",", "retry", ":", "OptionalRetry", "=", "gapic_v1", ".", ...
https://github.com/googleapis/python-dialogflow/blob/e48ea001b7c8a4a5c1fe4b162bad49ea397458e9/google/cloud/dialogflow_v2beta1/services/documents/client.py#L367-L449
eirannejad/pyRevit
49c0b7eb54eb343458ce1365425e6552d0c47d44
site-packages/sqlalchemy/engine/base.py
python
Connection.execution_options
(self, **opt)
return c
r""" Set non-SQL options for the connection which take effect during execution. The method returns a copy of this :class:`.Connection` which references the same underlying DBAPI connection, but also defines the given execution options which will take effect for a call to :meth:`execute`. As the new :class:`.Connection` references the same underlying resource, it's usually a good idea to ensure that the copies will be discarded immediately, which is implicit if used as in:: result = connection.execution_options(stream_results=True).\ execute(stmt) Note that any key/value can be passed to :meth:`.Connection.execution_options`, and it will be stored in the ``_execution_options`` dictionary of the :class:`.Connection`. It is suitable for usage by end-user schemes to communicate with event listeners, for example. The keywords that are currently recognized by SQLAlchemy itself include all those listed under :meth:`.Executable.execution_options`, as well as others that are specific to :class:`.Connection`. :param autocommit: Available on: Connection, statement. When True, a COMMIT will be invoked after execution when executed in 'autocommit' mode, i.e. when an explicit transaction is not begun on the connection. Note that DBAPI connections by default are always in a transaction - SQLAlchemy uses rules applied to different kinds of statements to determine if COMMIT will be invoked in order to provide its "autocommit" feature. Typically, all INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements have autocommit behavior enabled; SELECT constructs do not. Use this option when invoking a SELECT or other specific SQL construct where COMMIT is desired (typically when calling stored procedures and such), and an explicit transaction is not in progress. :param compiled_cache: Available on: Connection. A dictionary where :class:`.Compiled` objects will be cached when the :class:`.Connection` compiles a clause expression into a :class:`.Compiled` object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. The format of this dictionary is not guaranteed to stay the same in future releases. Note that the ORM makes use of its own "compiled" caches for some operations, including flush operations. The caching used by the ORM internally supersedes a cache dictionary specified here. :param isolation_level: Available on: :class:`.Connection`. Set the transaction isolation level for the lifespan of this :class:`.Connection` object (*not* the underlying DBAPI connection, for which the level is reset to its original setting upon termination of this :class:`.Connection` object). Valid values include those string values accepted by the :paramref:`.create_engine.isolation_level` parameter passed to :func:`.create_engine`. These levels are semi-database specific; see individual dialect documentation for valid levels. Note that this option necessarily affects the underlying DBAPI connection for the lifespan of the originating :class:`.Connection`, and is not per-execution. This setting is not removed until the underlying DBAPI connection is returned to the connection pool, i.e. the :meth:`.Connection.close` method is called. .. warning:: The ``isolation_level`` execution option should **not** be used when a transaction is already established, that is, the :meth:`.Connection.begin` method or similar has been called. A database cannot change the isolation level on a transaction in progress, and different DBAPIs and/or SQLAlchemy dialects may implicitly roll back or commit the transaction, or not affect the connection at all. .. versionchanged:: 0.9.9 A warning is emitted when the ``isolation_level`` execution option is used after a transaction has been started with :meth:`.Connection.begin` or similar. .. note:: The ``isolation_level`` execution option is implicitly reset if the :class:`.Connection` is invalidated, e.g. via the :meth:`.Connection.invalidate` method, or if a disconnection error occurs. The new connection produced after the invalidation will not have the isolation level re-applied to it automatically. .. seealso:: :paramref:`.create_engine.isolation_level` - set per :class:`.Engine` isolation level :meth:`.Connection.get_isolation_level` - view current level :ref:`SQLite Transaction Isolation <sqlite_isolation_level>` :ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>` :ref:`MySQL Transaction Isolation <mysql_isolation_level>` :ref:`SQL Server Transaction Isolation <mssql_isolation_level>` :ref:`session_transaction_isolation` - for the ORM :param no_parameters: When ``True``, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as ``cursor.execute(statement)``, not passing the parameter collection at all. Some DBAPIs such as psycopg2 and mysql-python consider percent signs as significant only when parameters are present; this option allows code to generate SQL containing percent signs (and possibly other characters) that is neutral regarding whether it's executed by the DBAPI or piped into a script that's later invoked by command line tools. .. versionadded:: 0.7.6 :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be "streamed" and not pre-buffered, if possible. This is a limitation of many DBAPIs. The flag is currently understood only by the psycopg2, mysqldb and pymysql dialects. :param schema_translate_map: Available on: Connection, Engine. A dictionary mapping schema names to schema names, that will be applied to the :paramref:`.Table.schema` element of each :class:`.Table` encountered when SQL or DDL expression elements are compiled into strings; the resulting schema name will be converted based on presence in the map of the original name. .. versionadded:: 1.1 .. seealso:: :ref:`schema_translating`
r""" Set non-SQL options for the connection which take effect during execution.
[ "r", "Set", "non", "-", "SQL", "options", "for", "the", "connection", "which", "take", "effect", "during", "execution", "." ]
def execution_options(self, **opt): r""" Set non-SQL options for the connection which take effect during execution. The method returns a copy of this :class:`.Connection` which references the same underlying DBAPI connection, but also defines the given execution options which will take effect for a call to :meth:`execute`. As the new :class:`.Connection` references the same underlying resource, it's usually a good idea to ensure that the copies will be discarded immediately, which is implicit if used as in:: result = connection.execution_options(stream_results=True).\ execute(stmt) Note that any key/value can be passed to :meth:`.Connection.execution_options`, and it will be stored in the ``_execution_options`` dictionary of the :class:`.Connection`. It is suitable for usage by end-user schemes to communicate with event listeners, for example. The keywords that are currently recognized by SQLAlchemy itself include all those listed under :meth:`.Executable.execution_options`, as well as others that are specific to :class:`.Connection`. :param autocommit: Available on: Connection, statement. When True, a COMMIT will be invoked after execution when executed in 'autocommit' mode, i.e. when an explicit transaction is not begun on the connection. Note that DBAPI connections by default are always in a transaction - SQLAlchemy uses rules applied to different kinds of statements to determine if COMMIT will be invoked in order to provide its "autocommit" feature. Typically, all INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements have autocommit behavior enabled; SELECT constructs do not. Use this option when invoking a SELECT or other specific SQL construct where COMMIT is desired (typically when calling stored procedures and such), and an explicit transaction is not in progress. :param compiled_cache: Available on: Connection. A dictionary where :class:`.Compiled` objects will be cached when the :class:`.Connection` compiles a clause expression into a :class:`.Compiled` object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. The format of this dictionary is not guaranteed to stay the same in future releases. Note that the ORM makes use of its own "compiled" caches for some operations, including flush operations. The caching used by the ORM internally supersedes a cache dictionary specified here. :param isolation_level: Available on: :class:`.Connection`. Set the transaction isolation level for the lifespan of this :class:`.Connection` object (*not* the underlying DBAPI connection, for which the level is reset to its original setting upon termination of this :class:`.Connection` object). Valid values include those string values accepted by the :paramref:`.create_engine.isolation_level` parameter passed to :func:`.create_engine`. These levels are semi-database specific; see individual dialect documentation for valid levels. Note that this option necessarily affects the underlying DBAPI connection for the lifespan of the originating :class:`.Connection`, and is not per-execution. This setting is not removed until the underlying DBAPI connection is returned to the connection pool, i.e. the :meth:`.Connection.close` method is called. .. warning:: The ``isolation_level`` execution option should **not** be used when a transaction is already established, that is, the :meth:`.Connection.begin` method or similar has been called. A database cannot change the isolation level on a transaction in progress, and different DBAPIs and/or SQLAlchemy dialects may implicitly roll back or commit the transaction, or not affect the connection at all. .. versionchanged:: 0.9.9 A warning is emitted when the ``isolation_level`` execution option is used after a transaction has been started with :meth:`.Connection.begin` or similar. .. note:: The ``isolation_level`` execution option is implicitly reset if the :class:`.Connection` is invalidated, e.g. via the :meth:`.Connection.invalidate` method, or if a disconnection error occurs. The new connection produced after the invalidation will not have the isolation level re-applied to it automatically. .. seealso:: :paramref:`.create_engine.isolation_level` - set per :class:`.Engine` isolation level :meth:`.Connection.get_isolation_level` - view current level :ref:`SQLite Transaction Isolation <sqlite_isolation_level>` :ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>` :ref:`MySQL Transaction Isolation <mysql_isolation_level>` :ref:`SQL Server Transaction Isolation <mssql_isolation_level>` :ref:`session_transaction_isolation` - for the ORM :param no_parameters: When ``True``, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as ``cursor.execute(statement)``, not passing the parameter collection at all. Some DBAPIs such as psycopg2 and mysql-python consider percent signs as significant only when parameters are present; this option allows code to generate SQL containing percent signs (and possibly other characters) that is neutral regarding whether it's executed by the DBAPI or piped into a script that's later invoked by command line tools. .. versionadded:: 0.7.6 :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be "streamed" and not pre-buffered, if possible. This is a limitation of many DBAPIs. The flag is currently understood only by the psycopg2, mysqldb and pymysql dialects. :param schema_translate_map: Available on: Connection, Engine. A dictionary mapping schema names to schema names, that will be applied to the :paramref:`.Table.schema` element of each :class:`.Table` encountered when SQL or DDL expression elements are compiled into strings; the resulting schema name will be converted based on presence in the map of the original name. .. versionadded:: 1.1 .. seealso:: :ref:`schema_translating` """ c = self._clone() c._execution_options = c._execution_options.union(opt) if self._has_events or self.engine._has_events: self.dispatch.set_connection_execution_options(c, opt) self.dialect.set_connection_execution_options(c, opt) return c
[ "def", "execution_options", "(", "self", ",", "*", "*", "opt", ")", ":", "c", "=", "self", ".", "_clone", "(", ")", "c", ".", "_execution_options", "=", "c", ".", "_execution_options", ".", "union", "(", "opt", ")", "if", "self", ".", "_has_events", ...
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sqlalchemy/engine/base.py#L167-L319
brendano/tweetmotif
1b0b1e3a941745cd5a26eba01f554688b7c4b27e
everything_else/djfrontend/django-1.0.2/db/backends/__init__.py
python
BaseDatabaseOperations.lookup_cast
(self, lookup_type)
return "%s"
Returns the string to use in a query when performing lookups ("contains", "like", etc). The resulting string should contain a '%s' placeholder for the column being searched against.
Returns the string to use in a query when performing lookups ("contains", "like", etc). The resulting string should contain a '%s' placeholder for the column being searched against.
[ "Returns", "the", "string", "to", "use", "in", "a", "query", "when", "performing", "lookups", "(", "contains", "like", "etc", ")", ".", "The", "resulting", "string", "should", "contain", "a", "%s", "placeholder", "for", "the", "column", "being", "searched", ...
def lookup_cast(self, lookup_type): """ Returns the string to use in a query when performing lookups ("contains", "like", etc). The resulting string should contain a '%s' placeholder for the column being searched against. """ return "%s"
[ "def", "lookup_cast", "(", "self", ",", "lookup_type", ")", ":", "return", "\"%s\"" ]
https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/db/backends/__init__.py#L185-L191
dropbox/dropbox-sdk-python
015437429be224732990041164a21a0501235db1
dropbox/sharing.py
python
ViewerInfoPolicy.is_disabled
(self)
return self._tag == 'disabled'
Check if the union tag is ``disabled``. :rtype: bool
Check if the union tag is ``disabled``.
[ "Check", "if", "the", "union", "tag", "is", "disabled", "." ]
def is_disabled(self): """ Check if the union tag is ``disabled``. :rtype: bool """ return self._tag == 'disabled'
[ "def", "is_disabled", "(", "self", ")", ":", "return", "self", ".", "_tag", "==", "'disabled'" ]
https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/sharing.py#L10846-L10852
stephenmcd/mezzanine
e38ffc69f732000ce44b7ed5c9d0516d258b8af2
mezzanine/blog/management/base.py
python
BaseImporterCommand.add_post
( self, title=None, content=None, old_url=None, pub_date=None, tags=None, categories=None, comments=None, )
return self.posts[-1]
Adds a post to the post list for processing. - ``title`` and ``content`` are strings for the post. - ``old_url`` is a string that a redirect will be created for. - ``pub_date`` is assumed to be a ``datetime`` object. - ``tags`` and ``categories`` are sequences of strings. - ``comments`` is a sequence of dicts - each dict should be the return value of ``add_comment``.
Adds a post to the post list for processing.
[ "Adds", "a", "post", "to", "the", "post", "list", "for", "processing", "." ]
def add_post( self, title=None, content=None, old_url=None, pub_date=None, tags=None, categories=None, comments=None, ): """ Adds a post to the post list for processing. - ``title`` and ``content`` are strings for the post. - ``old_url`` is a string that a redirect will be created for. - ``pub_date`` is assumed to be a ``datetime`` object. - ``tags`` and ``categories`` are sequences of strings. - ``comments`` is a sequence of dicts - each dict should be the return value of ``add_comment``. """ if not title: title = strip_tags(content).split(". ")[0] title = decode_entities(title) if categories is None: categories = [] if tags is None: tags = [] if comments is None: comments = [] self.posts.append( { "title": force_text(title), "publish_date": pub_date, "content": force_text(content), "categories": categories, "tags": tags, "comments": comments, "old_url": old_url, } ) return self.posts[-1]
[ "def", "add_post", "(", "self", ",", "title", "=", "None", ",", "content", "=", "None", ",", "old_url", "=", "None", ",", "pub_date", "=", "None", ",", "tags", "=", "None", ",", "categories", "=", "None", ",", "comments", "=", "None", ",", ")", ":"...
https://github.com/stephenmcd/mezzanine/blob/e38ffc69f732000ce44b7ed5c9d0516d258b8af2/mezzanine/blog/management/base.py#L62-L102
oilshell/oil
94388e7d44a9ad879b12615f6203b38596b5a2d3
Python-2.7.13/Lib/lib-tk/turtle.py
python
TPen.isvisible
(self)
return self._shown
Return True if the Turtle is shown, False if it's hidden. No argument. Example (for a Turtle instance named turtle): >>> turtle.hideturtle() >>> print turtle.isvisible(): False
Return True if the Turtle is shown, False if it's hidden.
[ "Return", "True", "if", "the", "Turtle", "is", "shown", "False", "if", "it", "s", "hidden", "." ]
def isvisible(self): """Return True if the Turtle is shown, False if it's hidden. No argument. Example (for a Turtle instance named turtle): >>> turtle.hideturtle() >>> print turtle.isvisible(): False """ return self._shown
[ "def", "isvisible", "(", "self", ")", ":", "return", "self", ".", "_shown" ]
https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/lib-tk/turtle.py#L2239-L2249
DLR-RM/stable-baselines3
e9a8979022d7005560d43b7a9c1dc1ba85f7989a
stable_baselines3/common/monitor.py
python
ResultsWriter.close
(self)
Close the file handler
Close the file handler
[ "Close", "the", "file", "handler" ]
def close(self) -> None: """ Close the file handler """ self.file_handler.close()
[ "def", "close", "(", "self", ")", "->", "None", ":", "self", ".", "file_handler", ".", "close", "(", ")" ]
https://github.com/DLR-RM/stable-baselines3/blob/e9a8979022d7005560d43b7a9c1dc1ba85f7989a/stable_baselines3/common/monitor.py#L198-L202
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/mxnet.py
python
GetMXNetVersion
(vm)
return stdout.strip()
Returns the version of MXNet installed on the vm. Args: vm: the target vm on which to check the MXNet version Returns: installed python MXNet version as a string
Returns the version of MXNet installed on the vm.
[ "Returns", "the", "version", "of", "MXNet", "installed", "on", "the", "vm", "." ]
def GetMXNetVersion(vm): """Returns the version of MXNet installed on the vm. Args: vm: the target vm on which to check the MXNet version Returns: installed python MXNet version as a string """ stdout, _ = vm.RemoteCommand( ('echo -e "import mxnet\nprint(mxnet.__version__)" | {0} python' .format(GetEnvironmentVars(vm))) ) return stdout.strip()
[ "def", "GetMXNetVersion", "(", "vm", ")", ":", "stdout", ",", "_", "=", "vm", ".", "RemoteCommand", "(", "(", "'echo -e \"import mxnet\\nprint(mxnet.__version__)\" | {0} python'", ".", "format", "(", "GetEnvironmentVars", "(", "vm", ")", ")", ")", ")", "return", ...
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/mxnet.py#L45-L58
wagtail/wagtail
ba8207a5d82c8a1de8f5f9693a7cd07421762999
wagtail/admin/ui/tables.py
python
Column.get_cell
(self, instance)
return Column.Cell(self, instance)
Return an object encapsulating this column and an item of data, which we can use for rendering a table cell into a template
Return an object encapsulating this column and an item of data, which we can use for rendering a table cell into a template
[ "Return", "an", "object", "encapsulating", "this", "column", "and", "an", "item", "of", "data", "which", "we", "can", "use", "for", "rendering", "a", "table", "cell", "into", "a", "template" ]
def get_cell(self, instance): """ Return an object encapsulating this column and an item of data, which we can use for rendering a table cell into a template """ return Column.Cell(self, instance)
[ "def", "get_cell", "(", "self", ",", "instance", ")", ":", "return", "Column", ".", "Cell", "(", "self", ",", "instance", ")" ]
https://github.com/wagtail/wagtail/blob/ba8207a5d82c8a1de8f5f9693a7cd07421762999/wagtail/admin/ui/tables.py#L108-L113
eriklindernoren/ML-From-Scratch
a2806c6732eee8d27762edd6d864e0c179d8e9e8
mlfromscratch/deep_learning/neural_network.py
python
NeuralNetwork.predict
(self, X)
return self._forward_pass(X, training=False)
Use the trained model to predict labels of X
Use the trained model to predict labels of X
[ "Use", "the", "trained", "model", "to", "predict", "labels", "of", "X" ]
def predict(self, X): """ Use the trained model to predict labels of X """ return self._forward_pass(X, training=False)
[ "def", "predict", "(", "self", ",", "X", ")", ":", "return", "self", ".", "_forward_pass", "(", "X", ",", "training", "=", "False", ")" ]
https://github.com/eriklindernoren/ML-From-Scratch/blob/a2806c6732eee8d27762edd6d864e0c179d8e9e8/mlfromscratch/deep_learning/neural_network.py#L121-L123
emcconville/wand
03682680c351645f16c3b8ea23bde79fbb270305
wand/color.py
python
Color.blue_int8
(self)
return max(0, min(255, int(255.0 * self.blue)))
(:class:`numbers.Integral`) Blue as 8bit integer which is a common style. From 0 to 255. .. versionadded:: 0.3.0
(:class:`numbers.Integral`) Blue as 8bit integer which is a common style. From 0 to 255.
[ "(", ":", "class", ":", "numbers", ".", "Integral", ")", "Blue", "as", "8bit", "integer", "which", "is", "a", "common", "style", ".", "From", "0", "to", "255", "." ]
def blue_int8(self): """(:class:`numbers.Integral`) Blue as 8bit integer which is a common style. From 0 to 255. .. versionadded:: 0.3.0 """ return max(0, min(255, int(255.0 * self.blue)))
[ "def", "blue_int8", "(", "self", ")", ":", "return", "max", "(", "0", ",", "min", "(", "255", ",", "int", "(", "255.0", "*", "self", ".", "blue", ")", ")", ")" ]
https://github.com/emcconville/wand/blob/03682680c351645f16c3b8ea23bde79fbb270305/wand/color.py#L418-L425
aiidateam/aiida-core
c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2
aiida/manage/manager.py
python
Manager.get_communicator
(self)
return self._communicator
Return the communicator :return: a global communicator instance
Return the communicator
[ "Return", "the", "communicator" ]
def get_communicator(self) -> 'RmqThreadCommunicator': """Return the communicator :return: a global communicator instance """ if self._communicator is None: self._communicator = self.create_communicator() return self._communicator
[ "def", "get_communicator", "(", "self", ")", "->", "'RmqThreadCommunicator'", ":", "if", "self", ".", "_communicator", "is", "None", ":", "self", ".", "_communicator", "=", "self", ".", "create_communicator", "(", ")", "return", "self", ".", "_communicator" ]
https://github.com/aiidateam/aiida-core/blob/c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2/aiida/manage/manager.py#L228-L237
jaungiers/LSTM-Neural-Network-for-Time-Series-Prediction
da44411c91135b64c02eb60da3c7f574c7c4c253
core/data_processor.py
python
DataLoader._next_window
(self, i, seq_len, normalise)
return x, y
Generates the next data window from the given index location i
Generates the next data window from the given index location i
[ "Generates", "the", "next", "data", "window", "from", "the", "given", "index", "location", "i" ]
def _next_window(self, i, seq_len, normalise): '''Generates the next data window from the given index location i''' window = self.data_train[i:i+seq_len] window = self.normalise_windows(window, single_window=True)[0] if normalise else window x = window[:-1] y = window[-1, [0]] return x, y
[ "def", "_next_window", "(", "self", ",", "i", ",", "seq_len", ",", "normalise", ")", ":", "window", "=", "self", ".", "data_train", "[", "i", ":", "i", "+", "seq_len", "]", "window", "=", "self", ".", "normalise_windows", "(", "window", ",", "single_wi...
https://github.com/jaungiers/LSTM-Neural-Network-for-Time-Series-Prediction/blob/da44411c91135b64c02eb60da3c7f574c7c4c253/core/data_processor.py#L65-L71
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/gaussian_process/_gpc.py
python
_BinaryGaussianProcessClassifierLaplace.predict
(self, X)
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``classes_``
Perform classification on an array of test vectors X.
[ "Perform", "classification", "on", "an", "array", "of", "test", "vectors", "X", "." ]
def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self) # As discussed on Section 3.4.2 of GPML, for making hard binary # decisions, it is enough to compute the MAP of the posterior and # pass it through the link function K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 return np.where(f_star > 0, self.classes_[1], self.classes_[0])
[ "def", "predict", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ")", "# As discussed on Section 3.4.2 of GPML, for making hard binary", "# decisions, it is enough to compute the MAP of the posterior and", "# pass it through the link function", "K_star", "=", "sel...
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/gaussian_process/_gpc.py#L265-L286
Calysto/calysto_scheme
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
calysto_scheme/src/Scheme.py
python
string_is__q
(s1, s2)
return s1 == s2
[]
def string_is__q(s1, s2): return s1 == s2
[ "def", "string_is__q", "(", "s1", ",", "s2", ")", ":", "return", "s1", "==", "s2" ]
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/src/Scheme.py#L818-L819
frescobaldi/frescobaldi
301cc977fc4ba7caa3df9e4bf905212ad5d06912
frescobaldi_app/svgview/view.py
python
View.initJavaScript
(self)
return self._initJavaScript
Return a string containing all JavaScript to run in a page.
Return a string containing all JavaScript to run in a page.
[ "Return", "a", "string", "containing", "all", "JavaScript", "to", "run", "in", "a", "page", "." ]
def initJavaScript(self): """Return a string containing all JavaScript to run in a page.""" try: return self._initJavaScript except AttributeError: js = [] qwebchannel_js = QFile(':/qtwebchannel/qwebchannel.js') qwebchannel_js.open(QIODevice.ReadOnly) js.append(bytes(qwebchannel_js.readAll()).decode('utf-8')) js.append("new QWebChannel(qt.webChannelTransport, function (channel) {\n" " window.pyLinks = channel.objects.pyLinks;\n" "});\n") js.append(getJsScript('pointandclick.js')) # for now only editable in dev (git) or when the user explicitly allows experimental features if app.is_git_controlled() or QSettings().value("experimental-features", False, bool): js.append(getJsScript('editsvg.js')) self._initJavaScript = '\n'.join(js) return self._initJavaScript
[ "def", "initJavaScript", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_initJavaScript", "except", "AttributeError", ":", "js", "=", "[", "]", "qwebchannel_js", "=", "QFile", "(", "':/qtwebchannel/qwebchannel.js'", ")", "qwebchannel_js", ".", "open"...
https://github.com/frescobaldi/frescobaldi/blob/301cc977fc4ba7caa3df9e4bf905212ad5d06912/frescobaldi_app/svgview/view.py#L110-L127
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/aui/auibook.py
python
AuiNotebook.GetPageCount
(self)
return self._tabs.GetPageCount()
Returns the number of pages in the notebook.
Returns the number of pages in the notebook.
[ "Returns", "the", "number", "of", "pages", "in", "the", "notebook", "." ]
def GetPageCount(self): """ Returns the number of pages in the notebook. """ return self._tabs.GetPageCount()
[ "def", "GetPageCount", "(", "self", ")", ":", "return", "self", ".", "_tabs", ".", "GetPageCount", "(", ")" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/aui/auibook.py#L4427-L4430
inkandswitch/livebook
93c8d467734787366ad084fc3566bf5cbe249c51
public/pypyjs/modules/numpy/fft/fftpack.py
python
irfft
(a, n=None, axis=-1, norm=None)
return output * (1 / (sqrt(n) if unitary else n))
Compute the inverse of the n-point DFT for real input. This function computes the inverse of the one-dimensional *n*-point discrete Fourier Transform of real input computed by `rfft`. In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical accuracy. (See Notes below for why ``len(a)`` is necessary here.) The input is expected to be in the form returned by `rfft`, i.e. the real zero-frequency term followed by the complex positive frequency terms in order of increasing frequency. Since the discrete Fourier Transform of real input is Hermitian-symmetric, the negative frequency terms are taken to be the complex conjugates of the corresponding positive frequency terms. Parameters ---------- a : array_like The input array. n : int, optional Length of the transformed axis of the output. For `n` output points, ``n//2+1`` input points are necessary. If the input is longer than this, it is cropped. If it is shorter than this, it is padded with zeros. If `n` is not given, it is determined from the length of the input along the axis specified by `axis`. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. The length of the transformed axis is `n`, or, if `n` is not given, ``2*(m-1)`` where ``m`` is the length of the transformed axis of the input. To get an odd number of output points, `n` must be specified. Raises ------ IndexError If `axis` is larger than the last axis of `a`. See Also -------- numpy.fft : For definition of the DFT and conventions used. rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. fft : The one-dimensional FFT. irfft2 : The inverse of the two-dimensional FFT of real input. irfftn : The inverse of the *n*-dimensional FFT of real input. Notes ----- Returns the real valued `n`-point inverse discrete Fourier transform of `a`, where `a` contains the non-negative frequency terms of a Hermitian-symmetric sequence. `n` is the length of the result, not the input. If you specify an `n` such that `a` must be zero-padded or truncated, the extra/removed values will be added/removed at high frequencies. One can thus resample a series to `m` points via Fourier interpolation by: ``a_resamp = irfft(rfft(a), m)``. Examples -------- >>> np.fft.ifft([1, -1j, -1, 1j]) array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) >>> np.fft.irfft([1, -1j, -1]) array([ 0., 1., 0., 0.]) Notice how the last term in the input to the ordinary `ifft` is the complex conjugate of the second term, and the output has zero imaginary part everywhere. When calling `irfft`, the negative frequencies are not specified, and the output array is purely real.
Compute the inverse of the n-point DFT for real input.
[ "Compute", "the", "inverse", "of", "the", "n", "-", "point", "DFT", "for", "real", "input", "." ]
def irfft(a, n=None, axis=-1, norm=None): """ Compute the inverse of the n-point DFT for real input. This function computes the inverse of the one-dimensional *n*-point discrete Fourier Transform of real input computed by `rfft`. In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical accuracy. (See Notes below for why ``len(a)`` is necessary here.) The input is expected to be in the form returned by `rfft`, i.e. the real zero-frequency term followed by the complex positive frequency terms in order of increasing frequency. Since the discrete Fourier Transform of real input is Hermitian-symmetric, the negative frequency terms are taken to be the complex conjugates of the corresponding positive frequency terms. Parameters ---------- a : array_like The input array. n : int, optional Length of the transformed axis of the output. For `n` output points, ``n//2+1`` input points are necessary. If the input is longer than this, it is cropped. If it is shorter than this, it is padded with zeros. If `n` is not given, it is determined from the length of the input along the axis specified by `axis`. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. The length of the transformed axis is `n`, or, if `n` is not given, ``2*(m-1)`` where ``m`` is the length of the transformed axis of the input. To get an odd number of output points, `n` must be specified. Raises ------ IndexError If `axis` is larger than the last axis of `a`. See Also -------- numpy.fft : For definition of the DFT and conventions used. rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. fft : The one-dimensional FFT. irfft2 : The inverse of the two-dimensional FFT of real input. irfftn : The inverse of the *n*-dimensional FFT of real input. Notes ----- Returns the real valued `n`-point inverse discrete Fourier transform of `a`, where `a` contains the non-negative frequency terms of a Hermitian-symmetric sequence. `n` is the length of the result, not the input. If you specify an `n` such that `a` must be zero-padded or truncated, the extra/removed values will be added/removed at high frequencies. One can thus resample a series to `m` points via Fourier interpolation by: ``a_resamp = irfft(rfft(a), m)``. Examples -------- >>> np.fft.ifft([1, -1j, -1, 1j]) array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) >>> np.fft.irfft([1, -1j, -1]) array([ 0., 1., 0., 0.]) Notice how the last term in the input to the ordinary `ifft` is the complex conjugate of the second term, and the output has zero imaginary part everywhere. When calling `irfft`, the negative frequencies are not specified, and the output array is purely real. """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=complex) if n is None: n = (a.shape[axis] - 1) * 2 unitary = _unitary(norm) output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, _real_fft_cache) return output * (1 / (sqrt(n) if unitary else n))
[ "def", "irfft", "(", "a", ",", "n", "=", "None", ",", "axis", "=", "-", "1", ",", "norm", "=", "None", ")", ":", "# The copy may be required for multithreading.", "a", "=", "array", "(", "a", ",", "copy", "=", "True", ",", "dtype", "=", "complex", ")...
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/numpy/fft/fftpack.py#L370-L456
creatorrr/pyAIML
bda3c8d85e8e0f5fa2d0133fca59a2d4e0baf641
WordSub.py
python
WordSub.sub
(self, text)
return self._regex.sub(self, text)
Translate text, returns the modified text.
Translate text, returns the modified text.
[ "Translate", "text", "returns", "the", "modified", "text", "." ]
def sub(self, text): """Translate text, returns the modified text.""" if self._regexIsDirty: self._update_regex() return self._regex.sub(self, text)
[ "def", "sub", "(", "self", ",", "text", ")", ":", "if", "self", ".", "_regexIsDirty", ":", "self", ".", "_update_regex", "(", ")", "return", "self", ".", "_regex", ".", "sub", "(", "self", ",", "text", ")" ]
https://github.com/creatorrr/pyAIML/blob/bda3c8d85e8e0f5fa2d0133fca59a2d4e0baf641/WordSub.py#L78-L82
fkie/multimaster_fkie
3d23df29d25d71a75c66bbd3cc6e9cbb255724d8
fkie_master_discovery/src/fkie_master_discovery/interface_finder.py
python
get_refresh_service
(masteruri, wait=True, check_host=True)
return _get_service(masteruri, 'refresh', wait, check_host)
Search in services of ROS master for a service with name ending by `refresh` and returns his name, if it runs on the local host. Returns empty list if no service was found and `wait` is ``False``. :param masteruri: the URI of the ROS master :type masteruri: str :param wait: check every second for the service :type wait: boo :param check_host: check for eqaul hostname of topic provider and master uri. :type check_host: bool :return: the list with names of the services ending with `refresh` :rtype: list of strings
Search in services of ROS master for a service with name ending by `refresh` and returns his name, if it runs on the local host. Returns empty list if no service was found and `wait` is ``False``.
[ "Search", "in", "services", "of", "ROS", "master", "for", "a", "service", "with", "name", "ending", "by", "refresh", "and", "returns", "his", "name", "if", "it", "runs", "on", "the", "local", "host", ".", "Returns", "empty", "list", "if", "no", "service"...
def get_refresh_service(masteruri, wait=True, check_host=True): ''' Search in services of ROS master for a service with name ending by `refresh` and returns his name, if it runs on the local host. Returns empty list if no service was found and `wait` is ``False``. :param masteruri: the URI of the ROS master :type masteruri: str :param wait: check every second for the service :type wait: boo :param check_host: check for eqaul hostname of topic provider and master uri. :type check_host: bool :return: the list with names of the services ending with `refresh` :rtype: list of strings ''' return _get_service(masteruri, 'refresh', wait, check_host)
[ "def", "get_refresh_service", "(", "masteruri", ",", "wait", "=", "True", ",", "check_host", "=", "True", ")", ":", "return", "_get_service", "(", "masteruri", ",", "'refresh'", ",", "wait", ",", "check_host", ")" ]
https://github.com/fkie/multimaster_fkie/blob/3d23df29d25d71a75c66bbd3cc6e9cbb255724d8/fkie_master_discovery/src/fkie_master_discovery/interface_finder.py#L184-L206
algorhythms/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
140 Word Break II.py
python
Solution.build_result
(self, dp, cur_index, cur_sentence, result)
dfs recursive from right to left
dfs recursive
[ "dfs", "recursive" ]
def build_result(self, dp, cur_index, cur_sentence, result): """ dfs recursive from right to left """ # reached, build the result from cur_sentence if cur_index == 0: result.append(" ".join(cur_sentence)) return # dfs for prefix in dp[cur_index]: cur_sentence.appendleft(prefix) self.build_result(dp, cur_index - len(prefix), cur_sentence, result) cur_sentence.popleft()
[ "def", "build_result", "(", "self", ",", "dp", ",", "cur_index", ",", "cur_sentence", ",", "result", ")", ":", "# reached, build the result from cur_sentence", "if", "cur_index", "==", "0", ":", "result", ".", "append", "(", "\" \"", ".", "join", "(", "cur_sen...
https://github.com/algorhythms/LeetCode/blob/3fb14aeea62a960442e47dfde9f964c7ffce32be/140 Word Break II.py#L64-L79
clinton-hall/nzbToMedia
27669389216902d1085660167e7bda0bd8527ecf
libs/common/beetsplug/replaygain.py
python
Bs1770gainBackend.parse_tool_output
(self, text, path_list, is_album)
return out
Given the output from bs1770gain, parse the text and return a list of dictionaries containing information about each analyzed file.
Given the output from bs1770gain, parse the text and return a list of dictionaries containing information about each analyzed file.
[ "Given", "the", "output", "from", "bs1770gain", "parse", "the", "text", "and", "return", "a", "list", "of", "dictionaries", "containing", "information", "about", "each", "analyzed", "file", "." ]
def parse_tool_output(self, text, path_list, is_album): """Given the output from bs1770gain, parse the text and return a list of dictionaries containing information about each analyzed file. """ per_file_gain = {} album_gain = {} # mutable variable so it can be set from handlers parser = xml.parsers.expat.ParserCreate(encoding='utf-8') state = {'file': None, 'gain': None, 'peak': None} def start_element_handler(name, attrs): if name == u'track': state['file'] = bytestring_path(attrs[u'file']) if state['file'] in per_file_gain: raise ReplayGainError( u'duplicate filename in bs1770gain output') elif name == u'integrated': state['gain'] = float(attrs[u'lu']) elif name == u'sample-peak': state['peak'] = float(attrs[u'factor']) def end_element_handler(name): if name == u'track': if state['gain'] is None or state['peak'] is None: raise ReplayGainError(u'could not parse gain or peak from ' 'the output of bs1770gain') per_file_gain[state['file']] = Gain(state['gain'], state['peak']) state['gain'] = state['peak'] = None elif name == u'summary': if state['gain'] is None or state['peak'] is None: raise ReplayGainError(u'could not parse gain or peak from ' 'the output of bs1770gain') album_gain["album"] = Gain(state['gain'], state['peak']) state['gain'] = state['peak'] = None parser.StartElementHandler = start_element_handler parser.EndElementHandler = end_element_handler parser.Parse(text, True) if len(per_file_gain) != len(path_list): raise ReplayGainError( u'the number of results returned by bs1770gain does not match ' 'the number of files passed to it') # bs1770gain does not return the analysis results in the order that # files are passed on the command line, because it is sorting the files # internally. We must recover the order from the filenames themselves. try: out = [per_file_gain[os.path.basename(p)] for p in path_list] except KeyError: raise ReplayGainError( u'unrecognized filename in bs1770gain output ' '(bs1770gain can only deal with utf-8 file names)') if is_album: out.append(album_gain["album"]) return out
[ "def", "parse_tool_output", "(", "self", ",", "text", ",", "path_list", ",", "is_album", ")", ":", "per_file_gain", "=", "{", "}", "album_gain", "=", "{", "}", "# mutable variable so it can be set from handlers", "parser", "=", "xml", ".", "parsers", ".", "expat...
https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/beetsplug/replaygain.py#L217-L272
gevent/gevent
ae2cb5aeb2aea8987efcb90a4c50ca4e1ee12c31
src/gevent/_patcher.py
python
_collect_stdlib_gevent_modules
()
return result
Return a map from standard library name to imported gevent module that provides the same API. Optional modules are skipped if they cannot be imported.
Return a map from standard library name to imported gevent module that provides the same API.
[ "Return", "a", "map", "from", "standard", "library", "name", "to", "imported", "gevent", "module", "that", "provides", "the", "same", "API", "." ]
def _collect_stdlib_gevent_modules(): """ Return a map from standard library name to imported gevent module that provides the same API. Optional modules are skipped if they cannot be imported. """ result = {} for gevent_name, stdlib_name in iteritems(MAPPING): try: result[stdlib_name] = importlib.import_module(gevent_name) except ImportError: if stdlib_name in OPTIONAL_STDLIB_MODULES: continue raise return result
[ "def", "_collect_stdlib_gevent_modules", "(", ")", ":", "result", "=", "{", "}", "for", "gevent_name", ",", "stdlib_name", "in", "iteritems", "(", "MAPPING", ")", ":", "try", ":", "result", "[", "stdlib_name", "]", "=", "importlib", ".", "import_module", "("...
https://github.com/gevent/gevent/blob/ae2cb5aeb2aea8987efcb90a4c50ca4e1ee12c31/src/gevent/_patcher.py#L47-L63
Drakkar-Software/OctoBot
c80ed2270e5d085994213955c0f56b9e3b70b476
octobot/task_manager.py
python
TaskManager._create_new_asyncio_main_loop
(self)
[]
def _create_new_asyncio_main_loop(self): self.async_loop = asyncio.new_event_loop() self.async_loop.set_debug(constants.FORCE_ASYNCIO_DEBUG_OPTION) self.async_loop.set_exception_handler(self._loop_exception_handler) asyncio.set_event_loop(self.async_loop) self.current_loop_thread = threading.Thread(target=self.async_loop.run_forever, name=f"{self.get_name()} new asyncio main loop") self.current_loop_thread.start()
[ "def", "_create_new_asyncio_main_loop", "(", "self", ")", ":", "self", ".", "async_loop", "=", "asyncio", ".", "new_event_loop", "(", ")", "self", ".", "async_loop", ".", "set_debug", "(", "constants", ".", "FORCE_ASYNCIO_DEBUG_OPTION", ")", "self", ".", "async_...
https://github.com/Drakkar-Software/OctoBot/blob/c80ed2270e5d085994213955c0f56b9e3b70b476/octobot/task_manager.py#L112-L119
DataBiosphere/toil
2e148eee2114ece8dcc3ec8a83f36333266ece0d
src/toil/common.py
python
ToilMetrics._containerRunning
(containerName)
return result
[]
def _containerRunning(containerName): try: result = subprocess.check_output(["docker", "inspect", "-f", "'{{.State.Running}}'", containerName]).decode('utf-8') == "true" except subprocess.CalledProcessError: result = False return result
[ "def", "_containerRunning", "(", "containerName", ")", ":", "try", ":", "result", "=", "subprocess", ".", "check_output", "(", "[", "\"docker\"", ",", "\"inspect\"", ",", "\"-f\"", ",", "\"'{{.State.Running}}'\"", ",", "containerName", "]", ")", ".", "decode", ...
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/common.py#L1386-L1392
mongodb/mongo-python-driver
c760f900f2e4109a247c2ffc8ad3549362007772
pymongo/message.py
python
_query
(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None)
return _query_uncompressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts)
Get a **query** message.
Get a **query** message.
[ "Get", "a", "**", "query", "**", "message", "." ]
def _query(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None): """Get a **query** message.""" if ctx: return _query_compressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx) return _query_uncompressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts)
[ "def", "_query", "(", "options", ",", "collection_name", ",", "num_to_skip", ",", "num_to_return", ",", "query", ",", "field_selector", ",", "opts", ",", "ctx", "=", "None", ")", ":", "if", "ctx", ":", "return", "_query_compressed", "(", "options", ",", "c...
https://github.com/mongodb/mongo-python-driver/blob/c760f900f2e4109a247c2ffc8ad3549362007772/pymongo/message.py#L646-L654
grnet/synnefo
d06ec8c7871092131cdaabf6b03ed0b504c93e43
ci/utils.py
python
SynnefoCI.setup_fabric
(self)
Setup fabric environment
Setup fabric environment
[ "Setup", "fabric", "environment" ]
def setup_fabric(self): """Setup fabric environment""" self.logger.info("Setup fabric parameters..") fabric.env.user = self.read_temp_config('server_user') fabric.env.host_string = self.read_temp_config('server_ip') fabric.env.port = int(self.read_temp_config('server_port')) fabric.env.password = self.read_temp_config('server_passwd') fabric.env.connection_attempts = 10 fabric.env.shell = "/bin/bash -c" fabric.env.disable_known_hosts = True fabric.env.output_prefix = None no_agent = SSH_NO_AGENT or self.get_config( 'Global', 'no_agent', False, 'boolean') fabric.env.no_agent = no_agent forward_agent = self.get_config( 'Global', 'forward_agent', False, 'boolean') fabric.env.forward_agent = forward_agent
[ "def", "setup_fabric", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Setup fabric parameters..\"", ")", "fabric", ".", "env", ".", "user", "=", "self", ".", "read_temp_config", "(", "'server_user'", ")", "fabric", ".", "env", ".", "hos...
https://github.com/grnet/synnefo/blob/d06ec8c7871092131cdaabf6b03ed0b504c93e43/ci/utils.py#L763-L781
ialbert/biostar-central
2dc7bd30691a50b2da9c2833ba354056bc686afa
biostar/forum/auth.py
python
is_suspended
(user)
return False
[]
def is_suspended(user): if user.is_authenticated and user.profile.state in (Profile.BANNED, Profile.SUSPENDED, Profile.SPAMMER): return True return False
[ "def", "is_suspended", "(", "user", ")", ":", "if", "user", ".", "is_authenticated", "and", "user", ".", "profile", ".", "state", "in", "(", "Profile", ".", "BANNED", ",", "Profile", ".", "SUSPENDED", ",", "Profile", ".", "SPAMMER", ")", ":", "return", ...
https://github.com/ialbert/biostar-central/blob/2dc7bd30691a50b2da9c2833ba354056bc686afa/biostar/forum/auth.py#L345-L349
numba/numba
bf480b9e0da858a65508c2b17759a72ee6a44c51
numba/core/ir_utils.py
python
enforce_no_phis
(func_ir)
Enforce there being no ir.Expr.phi nodes in the IR.
Enforce there being no ir.Expr.phi nodes in the IR.
[ "Enforce", "there", "being", "no", "ir", ".", "Expr", ".", "phi", "nodes", "in", "the", "IR", "." ]
def enforce_no_phis(func_ir): """ Enforce there being no ir.Expr.phi nodes in the IR. """ for blk in func_ir.blocks.values(): phis = [x for x in blk.find_exprs(op='phi')] if phis: msg = "Illegal IR, phi found at: %s" % phis[0] raise CompilerError(msg, loc=phis[0].loc)
[ "def", "enforce_no_phis", "(", "func_ir", ")", ":", "for", "blk", "in", "func_ir", ".", "blocks", ".", "values", "(", ")", ":", "phis", "=", "[", "x", "for", "x", "in", "blk", ".", "find_exprs", "(", "op", "=", "'phi'", ")", "]", "if", "phis", ":...
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/ir_utils.py#L2201-L2209
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/PIL/ContainerIO.py
python
ContainerIO.read
(self, n=0)
return self.fh.read(n)
Read data. :param n: Number of bytes to read. If omitted or zero, read until end of region. :returns: An 8-bit string.
Read data.
[ "Read", "data", "." ]
def read(self, n=0): """ Read data. :param n: Number of bytes to read. If omitted or zero, read until end of region. :returns: An 8-bit string. """ if n: n = min(n, self.length - self.pos) else: n = self.length - self.pos if not n: # EOF return "" self.pos = self.pos + n return self.fh.read(n)
[ "def", "read", "(", "self", ",", "n", "=", "0", ")", ":", "if", "n", ":", "n", "=", "min", "(", "n", ",", "self", ".", "length", "-", "self", ".", "pos", ")", "else", ":", "n", "=", "self", ".", "length", "-", "self", ".", "pos", "if", "n...
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/PIL/ContainerIO.py#L73-L88
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/pyparsing.py
python
ParserElement.setDebugActions
(self, startAction, successAction, exceptionAction)
return self
Enable display of debugging messages while doing pattern matching.
Enable display of debugging messages while doing pattern matching.
[ "Enable", "display", "of", "debugging", "messages", "while", "doing", "pattern", "matching", "." ]
def setDebugActions(self, startAction, successAction, exceptionAction): """ Enable display of debugging messages while doing pattern matching. """ self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self
[ "def", "setDebugActions", "(", "self", ",", "startAction", ",", "successAction", ",", "exceptionAction", ")", ":", "self", ".", "debugActions", "=", "(", "startAction", "or", "_defaultStartDebugAction", ",", "successAction", "or", "_defaultSuccessDebugAction", ",", ...
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/pyparsing.py#L2492-L2500