repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ubc/ubcpi
ubcpi/serialize.py
https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/serialize.py#L310-L348
def serialize_to_xml(root, block): """ Serialize the Peer Instruction XBlock's content to XML. Args: block (PeerInstructionXBlock): The peer instruction block to serialize. root (etree.Element): The XML root node to update. Returns: etree.Element """ root.tag = 'ubcpi' if block.rationale_size is not None: if block.rationale_size.get('min'): root.set('rationale_size_min', unicode(block.rationale_size.get('min'))) if block.rationale_size.get('max'): root.set('rationale_size_max', unicode(block.rationale_size['max'])) if block.algo: if block.algo.get('name'): root.set('algorithm', block.algo.get('name')) if block.algo.get('num_responses'): root.set('num_responses', unicode(block.algo.get('num_responses'))) display_name = etree.SubElement(root, 'display_name') display_name.text = block.display_name question = etree.SubElement(root, 'question') question_text = etree.SubElement(question, 'text') question_text.text = block.question_text['text'] serialize_image(block.question_text, question) options = etree.SubElement(root, 'options') serialize_options(options, block) seeds = etree.SubElement(root, 'seeds') serialize_seeds(seeds, block)
[ "def", "serialize_to_xml", "(", "root", ",", "block", ")", ":", "root", ".", "tag", "=", "'ubcpi'", "if", "block", ".", "rationale_size", "is", "not", "None", ":", "if", "block", ".", "rationale_size", ".", "get", "(", "'min'", ")", ":", "root", ".", ...
Serialize the Peer Instruction XBlock's content to XML. Args: block (PeerInstructionXBlock): The peer instruction block to serialize. root (etree.Element): The XML root node to update. Returns: etree.Element
[ "Serialize", "the", "Peer", "Instruction", "XBlock", "s", "content", "to", "XML", "." ]
python
train
33.153846
floydhub/floyd-cli
floyd/client/base.py
https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/base.py#L30-L72
def request(self, method, url, params=None, data=None, files=None, json=None, timeout=5, headers=None, skip_auth=False): """ Execute the request using requests library """ request_url = self.base_url + url floyd_logger.debug("Starting request to url: %s with params: %s, data: %s", request_url, params, data) request_headers = {'x-floydhub-cli-version': get_cli_version()} # Auth headers if present if self.auth_header: request_headers["Authorization"] = self.auth_header # Add any additional headers if headers: request_headers.update(headers) try: response = requests.request(method, request_url, params=params, data=data, json=json, headers=request_headers, files=files, timeout=timeout) except requests.exceptions.ConnectionError as exception: floyd_logger.debug("Exception: %s", exception, exc_info=True) sys.exit("Cannot connect to the Floyd server. Check your internet connection.") except requests.exceptions.Timeout as exception: floyd_logger.debug("Exception: %s", exception, exc_info=True) sys.exit("Connection to FloydHub server timed out. Please retry or check your internet connection.") floyd_logger.debug("Response Content: %s, Headers: %s" % (response.content, response.headers)) self.check_response_status(response) return response
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "data", "=", "None", ",", "files", "=", "None", ",", "json", "=", "None", ",", "timeout", "=", "5", ",", "headers", "=", "None", ",", "skip_auth", "=", "Fa...
Execute the request using requests library
[ "Execute", "the", "request", "using", "requests", "library" ]
python
train
42.930233
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L3609-L3646
def make_2d_block_raster_mask(query_shape, memory_flange): """Creates a mask for 2d block raster scan. The query mask can look to the left, top left, top, and top right, but not to the right. Inside the query, we have the standard raster scan masking. Args: query_shape: A tuple of ints (query_height, query_width) memory_flange: A tuple of ints (memory_flange_height, memory_flange_width) Returns: A tensor of shape query_size, memory_size """ # mask inside the query block query_triangle = common_layers.ones_matrix_band_part( np.prod(query_shape), np.prod(query_shape), -1, 0) split_query_masks = tf.split(query_triangle, query_shape[0], axis=1) # adding mask for left and right mask_pieces = [ tf.concat( # pylint: disable=g-complex-comprehension [tf.ones([np.prod(query_shape), memory_flange[1]]), split_query_masks[i], tf.zeros([np.prod(query_shape), memory_flange[1]])], axis=1) for i in range(query_shape[0]) ] # adding mask for top final_mask = tf.concat( [ tf.ones([ np.prod(query_shape), (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0] ]), tf.concat(mask_pieces, axis=1) ], axis=1) # 0.0 is visible location, 1.0 is masked. return 1. - final_mask
[ "def", "make_2d_block_raster_mask", "(", "query_shape", ",", "memory_flange", ")", ":", "# mask inside the query block", "query_triangle", "=", "common_layers", ".", "ones_matrix_band_part", "(", "np", ".", "prod", "(", "query_shape", ")", ",", "np", ".", "prod", "(...
Creates a mask for 2d block raster scan. The query mask can look to the left, top left, top, and top right, but not to the right. Inside the query, we have the standard raster scan masking. Args: query_shape: A tuple of ints (query_height, query_width) memory_flange: A tuple of ints (memory_flange_height, memory_flange_width) Returns: A tensor of shape query_size, memory_size
[ "Creates", "a", "mask", "for", "2d", "block", "raster", "scan", "." ]
python
train
34.631579
jwodder/javaproperties
javaproperties/xmlprops.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L114-L130
def dumps_xml(props, comment=None, sort_keys=False): """ Convert a series ``props`` of key-value pairs to a text string containing an XML properties document. The document will include a doctype declaration but not an XML declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to serialize. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :rtype: text string """ return ''.join(s + '\n' for s in _stream_xml(props, comment, sort_keys))
[ "def", "dumps_xml", "(", "props", ",", "comment", "=", "None", ",", "sort_keys", "=", "False", ")", ":", "return", "''", ".", "join", "(", "s", "+", "'\\n'", "for", "s", "in", "_stream_xml", "(", "props", ",", "comment", ",", "sort_keys", ")", ")" ]
Convert a series ``props`` of key-value pairs to a text string containing an XML properties document. The document will include a doctype declaration but not an XML declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to serialize. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :rtype: text string
[ "Convert", "a", "series", "props", "of", "key", "-", "value", "pairs", "to", "a", "text", "string", "containing", "an", "XML", "properties", "document", ".", "The", "document", "will", "include", "a", "doctype", "declaration", "but", "not", "an", "XML", "d...
python
train
50.529412
tradenity/python-sdk
tradenity/resources/state.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/state.py#L443-L463
def get_state_by_id(cls, state_id, **kwargs): """Find State Return single instance of State by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_state_by_id(state_id, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to return (required) :return: State If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_state_by_id_with_http_info(state_id, **kwargs) else: (data) = cls._get_state_by_id_with_http_info(state_id, **kwargs) return data
[ "def", "get_state_by_id", "(", "cls", ",", "state_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_state_by_id_with_http_inf...
Find State Return single instance of State by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_state_by_id(state_id, async=True) >>> result = thread.get() :param async bool :param str state_id: ID of state to return (required) :return: State If the method is called asynchronously, returns the request thread.
[ "Find", "State" ]
python
train
39.333333
BreakingBytes/simkit
simkit/core/calculations.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculations.py#L41-L56
def register(self, new_calc, *args, **kwargs): """ Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation """ kwargs.update(zip(self.meta_names, args)) # dependencies should be a list of other calculations if isinstance(kwargs['dependencies'], basestring): kwargs['dependencies'] = [kwargs['dependencies']] # call super method, now meta can be passed as args or kwargs. super(CalcRegistry, self).register(new_calc, **kwargs)
[ "def", "register", "(", "self", ",", "new_calc", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# dependencies should be a list of other calculations", "if", ...
Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation
[ "Register", "calculations", "and", "meta", "data", "." ]
python
train
45.875
Parsely/birding
src/birding/shelf.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/shelf.py#L97-L102
def unpack(self, key, value): """Unpack and return value only if it is fresh.""" value, freshness = value if not self.is_fresh(freshness): raise KeyError('{} (stale)'.format(key)) return value
[ "def", "unpack", "(", "self", ",", "key", ",", "value", ")", ":", "value", ",", "freshness", "=", "value", "if", "not", "self", ".", "is_fresh", "(", "freshness", ")", ":", "raise", "KeyError", "(", "'{} (stale)'", ".", "format", "(", "key", ")", ")"...
Unpack and return value only if it is fresh.
[ "Unpack", "and", "return", "value", "only", "if", "it", "is", "fresh", "." ]
python
train
38.5
chrissimpkins/crypto
lib/crypto/library/hash.py
https://github.com/chrissimpkins/crypto/blob/6b95fa81b26312e46f02557dca0b5f5c898a76fd/lib/crypto/library/hash.py#L10-L14
def generate_hash(filepath): """Public function that reads a local file and generates a SHA256 hash digest for it""" fr = FileReader(filepath) data = fr.read_bin() return _calculate_sha256(data)
[ "def", "generate_hash", "(", "filepath", ")", ":", "fr", "=", "FileReader", "(", "filepath", ")", "data", "=", "fr", ".", "read_bin", "(", ")", "return", "_calculate_sha256", "(", "data", ")" ]
Public function that reads a local file and generates a SHA256 hash digest for it
[ "Public", "function", "that", "reads", "a", "local", "file", "and", "generates", "a", "SHA256", "hash", "digest", "for", "it" ]
python
train
41.2
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L4941-L4967
def _from_dict(cls, _dict): """Initialize a SpeechRecognitionResult object from a json dictionary.""" args = {} if 'final' in _dict or 'final_results' in _dict: args['final_results'] = _dict.get('final') or _dict.get( 'final_results') else: raise ValueError( 'Required property \'final\' not present in SpeechRecognitionResult JSON' ) if 'alternatives' in _dict: args['alternatives'] = [ SpeechRecognitionAlternative._from_dict(x) for x in (_dict.get('alternatives')) ] else: raise ValueError( 'Required property \'alternatives\' not present in SpeechRecognitionResult JSON' ) if 'keywords_result' in _dict: args['keywords_result'] = _dict.get('keywords_result') if 'word_alternatives' in _dict: args['word_alternatives'] = [ WordAlternativeResults._from_dict(x) for x in (_dict.get('word_alternatives')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'final'", "in", "_dict", "or", "'final_results'", "in", "_dict", ":", "args", "[", "'final_results'", "]", "=", "_dict", ".", "get", "(", "'final'", ")", "or", "_d...
Initialize a SpeechRecognitionResult object from a json dictionary.
[ "Initialize", "a", "SpeechRecognitionResult", "object", "from", "a", "json", "dictionary", "." ]
python
train
40.851852
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L135-L146
def to_json_str(self): """Convert data to json string representation. Returns: json representation as string. """ _json = self.to_json() try: return json.dumps(_json, sort_keys=True, cls=JsonEncoder) except: logging.exception("Could not serialize JSON: %r", _json) raise
[ "def", "to_json_str", "(", "self", ")", ":", "_json", "=", "self", ".", "to_json", "(", ")", "try", ":", "return", "json", ".", "dumps", "(", "_json", ",", "sort_keys", "=", "True", ",", "cls", "=", "JsonEncoder", ")", "except", ":", "logging", ".", ...
Convert data to json string representation. Returns: json representation as string.
[ "Convert", "data", "to", "json", "string", "representation", "." ]
python
train
25.666667
tensorflow/lucid
lucid/optvis/objectives.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L133-L161
def neuron(layer_name, channel_n, x=None, y=None, batch=None): """Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+ """ def inner(T): layer = T(layer_name) shape = tf.shape(layer) x_ = shape[1] // 2 if x is None else x y_ = shape[2] // 2 if y is None else y if batch is None: return layer[:, x_, y_, channel_n] else: return layer[batch, x_, y_, channel_n] return inner
[ "def", "neuron", "(", "layer_name", ",", "channel_n", ",", "x", "=", "None", ",", "y", "=", "None", ",", "batch", "=", "None", ")", ":", "def", "inner", "(", "T", ")", ":", "layer", "=", "T", "(", "layer_name", ")", "shape", "=", "tf", ".", "sh...
Visualize a single neuron of a single channel. Defaults to the center neuron. When width and height are even numbers, we choose the neuron in the bottom right of the center 2x2 neurons. Odd width & height: Even width & height: +---+---+---+ +---+---+---+---+ | | | | | | | | | +---+---+---+ +---+---+---+---+ | | X | | | | | | | +---+---+---+ +---+---+---+---+ | | | | | | | X | | +---+---+---+ +---+---+---+---+ | | | | | +---+---+---+---+
[ "Visualize", "a", "single", "neuron", "of", "a", "single", "channel", "." ]
python
train
36.931034
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py#L900-L920
def _encode_auth(auth): """ A function compatible with Python 2.3-3.3 that will encode auth from a URL suitable for an HTTP header. >>> str(_encode_auth('username%3Apassword')) 'dXNlcm5hbWU6cGFzc3dvcmQ=' Long auth strings should not cause a newline to be inserted. >>> long_auth = 'username:' + 'password'*10 >>> chr(10) in str(_encode_auth(long_auth)) False """ auth_s = unquote(auth) # convert to bytes auth_bytes = auth_s.encode() # use the legacy interface for Python 2.3 support encoded_bytes = base64.encodestring(auth_bytes) # convert back to a string encoded = encoded_bytes.decode() # strip the trailing carriage return return encoded.replace('\n','')
[ "def", "_encode_auth", "(", "auth", ")", ":", "auth_s", "=", "unquote", "(", "auth", ")", "# convert to bytes", "auth_bytes", "=", "auth_s", ".", "encode", "(", ")", "# use the legacy interface for Python 2.3 support", "encoded_bytes", "=", "base64", ".", "encodestr...
A function compatible with Python 2.3-3.3 that will encode auth from a URL suitable for an HTTP header. >>> str(_encode_auth('username%3Apassword')) 'dXNlcm5hbWU6cGFzc3dvcmQ=' Long auth strings should not cause a newline to be inserted. >>> long_auth = 'username:' + 'password'*10 >>> chr(10) in str(_encode_auth(long_auth)) False
[ "A", "function", "compatible", "with", "Python", "2", ".", "3", "-", "3", ".", "3", "that", "will", "encode", "auth", "from", "a", "URL", "suitable", "for", "an", "HTTP", "header", ".", ">>>", "str", "(", "_encode_auth", "(", "username%3Apassword", "))",...
python
test
34.142857
grundic/yagocd
yagocd/session.py
https://github.com/grundic/yagocd/blob/4c75336ae6f107c8723d37b15e52169151822127/yagocd/session.py#L69-L84
def server_version(self): """ Special method for getting server version. Because of different behaviour on different versions of server, we have to pass different headers to the endpoints. This method requests the version from server and caches it in internal variable, so other resources could use it. :return: server version parsed from `about` page. """ if self.__server_version is None: from yagocd.resources.info import InfoManager self.__server_version = InfoManager(self).version return self.__server_version
[ "def", "server_version", "(", "self", ")", ":", "if", "self", ".", "__server_version", "is", "None", ":", "from", "yagocd", ".", "resources", ".", "info", "import", "InfoManager", "self", ".", "__server_version", "=", "InfoManager", "(", "self", ")", ".", ...
Special method for getting server version. Because of different behaviour on different versions of server, we have to pass different headers to the endpoints. This method requests the version from server and caches it in internal variable, so other resources could use it. :return: server version parsed from `about` page.
[ "Special", "method", "for", "getting", "server", "version", "." ]
python
train
37.9375
exhuma/python-cluster
cluster/method/hierarchical.py
https://github.com/exhuma/python-cluster/blob/4c0ac14d9beafcd51f0d849151514083c296402f/cluster/method/hierarchical.py#L120-L189
def cluster(self, matrix=None, level=None, sequence=None): """ Perform hierarchical clustering. :param matrix: The 2D list that is currently under processing. The matrix contains the distances of each item with each other :param level: The current level of clustering :param sequence: The sequence number of the clustering """ logger.info("Performing cluster()") if matrix is None: # create level 0, first iteration (sequence) level = 0 sequence = 0 matrix = [] # if the matrix only has two rows left, we are done linkage = partial(self.linkage, distance_function=self.distance) initial_element_count = len(self._data) while len(matrix) > 2 or matrix == []: item_item_matrix = Matrix(self._data, linkage, True, 0) item_item_matrix.genmatrix(self.num_processes) matrix = item_item_matrix.matrix smallestpair = None mindistance = None rowindex = 0 # keep track of where we are in the matrix # find the minimum distance for row in matrix: cellindex = 0 # keep track of where we are in the matrix for cell in row: # if we are not on the diagonal (which is always 0) # and if this cell represents a new minimum... cell_lt_mdist = cell < mindistance if mindistance else False if ((rowindex != cellindex) and (cell_lt_mdist or smallestpair is None)): smallestpair = (rowindex, cellindex) mindistance = cell cellindex += 1 rowindex += 1 sequence += 1 level = matrix[smallestpair[1]][smallestpair[0]] cluster = Cluster(level, self._data[smallestpair[0]], self._data[smallestpair[1]]) # maintain the data, by combining the the two most similar items # in the list we use the min and max functions to ensure the # integrity of the data. imagine: if we first remove the item # with the smaller index, all the rest of the items shift down by # one. So the next index will be wrong. We could simply adjust the # value of the second "remove" call, but we don't know the order # in which they come. The max and min approach clarifies that self._data.remove(self._data[max(smallestpair[0], smallestpair[1])]) # remove item 1 self._data.remove(self._data[min(smallestpair[0], smallestpair[1])]) # remove item 2 self._data.append(cluster) # append item 1 and 2 combined self.publish_progress(initial_element_count, len(self._data)) # all the data is in one single cluster. We return that and stop self.__cluster_created = True logger.info("Call to cluster() is complete") return
[ "def", "cluster", "(", "self", ",", "matrix", "=", "None", ",", "level", "=", "None", ",", "sequence", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Performing cluster()\"", ")", "if", "matrix", "is", "None", ":", "# create level 0, first iteration (...
Perform hierarchical clustering. :param matrix: The 2D list that is currently under processing. The matrix contains the distances of each item with each other :param level: The current level of clustering :param sequence: The sequence number of the clustering
[ "Perform", "hierarchical", "clustering", "." ]
python
train
45.928571
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L661-L686
def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args): ''' Delete a tenant (keystone tenant-delete) CLI Examples: .. code-block:: bash salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.tenant_delete name=demo ''' kstone = auth(profile, **connection_args) if name: for tenant in getattr(kstone, _TENANTS, None).list(): if tenant.name == name: tenant_id = tenant.id break if not tenant_id: return {'Error': 'Unable to resolve tenant id'} getattr(kstone, _TENANTS, None).delete(tenant_id) ret = 'Tenant ID {0} deleted'.format(tenant_id) if name: ret += ' ({0})'.format(name) return ret
[ "def", "tenant_delete", "(", "tenant_id", "=", "None", ",", "name", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "name", ":", ...
Delete a tenant (keystone tenant-delete) CLI Examples: .. code-block:: bash salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.tenant_delete name=demo
[ "Delete", "a", "tenant", "(", "keystone", "tenant", "-", "delete", ")" ]
python
train
32.038462
zhanglab/psamm
psamm/commands/primarypairs.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/commands/primarypairs.py#L215-L236
def _parse_weights(weight_args, default_weight=0.6): """Parse list of weight assignments.""" weights_dict = {} r_group_weight = default_weight for weight_arg in weight_args: for weight_assignment in weight_arg.split(','): if '=' not in weight_assignment: raise ValueError( 'Invalid weight assignment: {}'.format(weight_assignment)) key, value = weight_assignment.split('=', 1) value = float(value) if key == 'R': r_group_weight = value elif key == '*': default_weight = value elif hasattr(Atom, key): weights_dict[Atom(key)] = value else: raise ValueError('Invalid element: {}'.format(key)) return weights_dict, r_group_weight, default_weight
[ "def", "_parse_weights", "(", "weight_args", ",", "default_weight", "=", "0.6", ")", ":", "weights_dict", "=", "{", "}", "r_group_weight", "=", "default_weight", "for", "weight_arg", "in", "weight_args", ":", "for", "weight_assignment", "in", "weight_arg", ".", ...
Parse list of weight assignments.
[ "Parse", "list", "of", "weight", "assignments", "." ]
python
train
37.954545
kata198/AdvancedHTMLParser
AdvancedHTMLParser/utils.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/utils.py#L24-L41
def stripIEConditionals(contents, addHtmlIfMissing=True): ''' stripIEConditionals - Strips Internet Explorer conditional statements. @param contents <str> - Contents String @param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing. ''' allMatches = IE_CONDITIONAL_PATTERN.findall(contents) if not allMatches: return contents for match in allMatches: contents = contents.replace(match, '') if END_HTML.match(contents) and not START_HTML.match(contents): contents = addStartTag(contents, '<html>') return contents
[ "def", "stripIEConditionals", "(", "contents", ",", "addHtmlIfMissing", "=", "True", ")", ":", "allMatches", "=", "IE_CONDITIONAL_PATTERN", ".", "findall", "(", "contents", ")", "if", "not", "allMatches", ":", "return", "contents", "for", "match", "in", "allMatc...
stripIEConditionals - Strips Internet Explorer conditional statements. @param contents <str> - Contents String @param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing.
[ "stripIEConditionals", "-", "Strips", "Internet", "Explorer", "conditional", "statements", "." ]
python
train
35.222222
pywbem/pywbem
attic/cim_provider2.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/attic/cim_provider2.py#L936-L950
def filter_instance(self, inst, plist): """Remove properties from an instance that aren't in the PropertyList inst -- The pywbem.CIMInstance plist -- The property List, or None. The list items must be all lowercase. """ if plist is not None: for pname in inst.properties.keys(): if pname.lower() not in plist and pname: if inst.path is not None and pname in inst.path.keybindings: continue del inst.properties[pname]
[ "def", "filter_instance", "(", "self", ",", "inst", ",", "plist", ")", ":", "if", "plist", "is", "not", "None", ":", "for", "pname", "in", "inst", ".", "properties", ".", "keys", "(", ")", ":", "if", "pname", ".", "lower", "(", ")", "not", "in", ...
Remove properties from an instance that aren't in the PropertyList inst -- The pywbem.CIMInstance plist -- The property List, or None. The list items must be all lowercase.
[ "Remove", "properties", "from", "an", "instance", "that", "aren", "t", "in", "the", "PropertyList" ]
python
train
36.666667
pybel/pybel-tools
src/pybel_tools/summary/provenance.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/summary/provenance.py#L43-L55
def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]: """Prepare a citation data dictionary from a graph. :return: A dictionary of dictionaries {citation type: {(source, target): citation reference} """ results = defaultdict(lambda: defaultdict(set)) for u, v, data in graph.edges(data=True): if CITATION not in data: continue results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip()) return dict(results)
[ "def", "_generate_citation_dict", "(", "graph", ":", "BELGraph", ")", "->", "Mapping", "[", "str", ",", "Mapping", "[", "Tuple", "[", "BaseEntity", ",", "BaseEntity", "]", ",", "str", "]", "]", ":", "results", "=", "defaultdict", "(", "lambda", ":", "def...
Prepare a citation data dictionary from a graph. :return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
[ "Prepare", "a", "citation", "data", "dictionary", "from", "a", "graph", "." ]
python
valid
41.230769
assamite/creamas
creamas/ds.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/ds.py#L65-L94
async def run_node(menv, log_folder): """Run :class:`~creamas.mp.MultiEnvironment` until its manager's :meth:`~aiomas.subproc.Manager.stop` is called. :param menv: :class:`~creamas.mp.MultiEnvironment` to wait for. :param str log_folder: Logging folder to be passed down to :meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is called. This method will block the current thread until the manager's :meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message is received, multi-environment is destroyed. The method is intended to be used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn multi-environments on different nodes. That is, using this function in the script will block the script's further execution until the simulation has run its course and the nodes need to be destroyed. Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will automatically call each node manager's :meth:`stop` and therefore release the script. """ try: await menv.manager.stop_received except KeyboardInterrupt: pass finally: ret = await menv.destroy(log_folder, as_coro=True) return ret
[ "async", "def", "run_node", "(", "menv", ",", "log_folder", ")", ":", "try", ":", "await", "menv", ".", "manager", ".", "stop_received", "except", "KeyboardInterrupt", ":", "pass", "finally", ":", "ret", "=", "await", "menv", ".", "destroy", "(", "log_fold...
Run :class:`~creamas.mp.MultiEnvironment` until its manager's :meth:`~aiomas.subproc.Manager.stop` is called. :param menv: :class:`~creamas.mp.MultiEnvironment` to wait for. :param str log_folder: Logging folder to be passed down to :meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is called. This method will block the current thread until the manager's :meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message is received, multi-environment is destroyed. The method is intended to be used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn multi-environments on different nodes. That is, using this function in the script will block the script's further execution until the simulation has run its course and the nodes need to be destroyed. Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will automatically call each node manager's :meth:`stop` and therefore release the script.
[ "Run", ":", "class", ":", "~creamas", ".", "mp", ".", "MultiEnvironment", "until", "its", "manager", "s", ":", "meth", ":", "~aiomas", ".", "subproc", ".", "Manager", ".", "stop", "is", "called", "." ]
python
train
41.033333
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L1086-L1109
def matches(self, *args, **kwargs): """Test if a request matches a :ref:`message spec <message spec>`. Returns ``True`` or ``False``. """ request = make_prototype_request(*args, **kwargs) if self._prototype.opcode not in (None, request.opcode): return False if self._prototype.is_command not in (None, request.is_command): return False for name in dir(self._prototype): if name.startswith('_') or name in request._non_matched_attrs: # Ignore privates, and handle documents specially. continue prototype_value = getattr(self._prototype, name, None) if inspect.ismethod(prototype_value): continue actual_value = getattr(request, name, None) if prototype_value not in (None, actual_value): return False if len(self._prototype.docs) not in (0, len(request.docs)): return False return self._prototype._matches_docs(self._prototype.docs, request.docs)
[ "def", "matches", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "request", "=", "make_prototype_request", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_prototype", ".", "opcode", "not", "in", "(", "None", ...
Test if a request matches a :ref:`message spec <message spec>`. Returns ``True`` or ``False``.
[ "Test", "if", "a", "request", "matches", "a", ":", "ref", ":", "message", "spec", "<message", "spec", ">", "." ]
python
train
44
appliedsec/pygeoip
pygeoip/__init__.py
https://github.com/appliedsec/pygeoip/blob/2a725df0b727e8b08f217ab84f7b8243c42554f5/pygeoip/__init__.py#L127-L191
def _setup_segments(self): """ Parses the database file to determine what kind of database is being used and setup segment sizes and start points that will be used by the seek*() methods later. """ self._databaseType = const.COUNTRY_EDITION self._recordLength = const.STANDARD_RECORD_LENGTH self._databaseSegments = const.COUNTRY_BEGIN filepos = self._fp.tell() self._fp.seek(-3, os.SEEK_END) for i in range(const.STRUCTURE_INFO_MAX_SIZE): chars = chr(255) * 3 delim = self._fp.read(3) if PY3 and type(delim) is bytes: delim = delim.decode(ENCODING) if PY2: chars = chars.decode(ENCODING) if type(delim) is str: delim = delim.decode(ENCODING) if delim == chars: byte = self._fp.read(1) self._databaseType = ord(byte) # Compatibility with databases from April 2003 and earlier if self._databaseType >= 106: self._databaseType -= 105 if self._databaseType == const.REGION_EDITION_REV0: self._databaseSegments = const.STATE_BEGIN_REV0 elif self._databaseType == const.REGION_EDITION_REV1: self._databaseSegments = const.STATE_BEGIN_REV1 elif self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6, const.ORG_EDITION, const.ISP_EDITION, const.NETSPEED_EDITION_REV1, const.NETSPEED_EDITION_REV1_V6, const.ASNUM_EDITION, const.ASNUM_EDITION_V6): self._databaseSegments = 0 buf = self._fp.read(const.SEGMENT_RECORD_LENGTH) if PY3 and type(buf) is bytes: buf = buf.decode(ENCODING) for j in range(const.SEGMENT_RECORD_LENGTH): self._databaseSegments += (ord(buf[j]) << (j * 8)) LONG_RECORDS = (const.ORG_EDITION, const.ISP_EDITION) if self._databaseType in LONG_RECORDS: self._recordLength = const.ORG_RECORD_LENGTH break else: self._fp.seek(-4, os.SEEK_CUR) self._fp.seek(filepos, os.SEEK_SET)
[ "def", "_setup_segments", "(", "self", ")", ":", "self", ".", "_databaseType", "=", "const", ".", "COUNTRY_EDITION", "self", ".", "_recordLength", "=", "const", ".", "STANDARD_RECORD_LENGTH", "self", ".", "_databaseSegments", "=", "const", ".", "COUNTRY_BEGIN", ...
Parses the database file to determine what kind of database is being used and setup segment sizes and start points that will be used by the seek*() methods later.
[ "Parses", "the", "database", "file", "to", "determine", "what", "kind", "of", "database", "is", "being", "used", "and", "setup", "segment", "sizes", "and", "start", "points", "that", "will", "be", "used", "by", "the", "seek", "*", "()", "methods", "later",...
python
valid
40.984615
datosgobar/pydatajson
pydatajson/core.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/core.py#L692-L769
def generate_harvester_config(self, catalogs=None, harvest='valid', report=None, export_path=None): """Genera un archivo de configuración del harvester a partir de un reporte, o de un conjunto de catálogos y un criterio de cosecha (`harvest`). Args: catalogs (str, dict o list): Uno (str o dict) o varios (list de strs y/o dicts) catálogos. harvest (str): Criterio para determinar qué datasets incluir en el archivo de configuración generado ('all', 'none', 'valid', 'report' o 'good'). report (list o str): Tabla de reporte generada por generate_datasets_report() como lista de diccionarios o archivo en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`, en cuyo caso `catalogs` se ignora. export_path (str): Path donde exportar el reporte generado (en formato XLSX o CSV). Si se especifica, el método no devolverá nada. Returns: list of dicts: Un diccionario con variables de configuración por cada dataset a cosechar. """ # Si se pasa un único catálogo, genero una lista que lo contenga if isinstance(catalogs, string_types + (dict,)): catalogs = [catalogs] if harvest == 'report': if not report: raise ValueError(""" Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para el argumento 'report'. Por favor, intentelo nuevamente.""") datasets_report = readers.read_table(report) elif harvest in ['valid', 'none', 'all']: # catalogs no puede faltar para estos criterios assert isinstance(catalogs, string_types + (dict, list)) datasets_report = self.generate_datasets_report(catalogs, harvest) else: raise ValueError(""" {} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o 'report'.""".format(harvest)) # define los campos del reporte que mantiene para el config file config_keys = [ "catalog_federation_id", "catalog_federation_org", "dataset_identifier" ] # cambia algunos nombres de estos campos para el config file config_translator = { "catalog_federation_id": "catalog_id", "catalog_federation_org": "dataset_organization" } translated_keys = [config_translator.get(k, k) for k in config_keys] harvester_config = [ OrderedDict( # Retengo únicamente los campos que necesita el harvester [(config_translator.get(k, k), v) for (k, v) in dataset.items() if k in config_keys] ) # Para aquellost datasets marcados con 'harvest'==1 for dataset in datasets_report if bool(int(dataset["harvest"])) ] # chequea que el archivo de configuración tiene todos los campos required_keys = set(translated_keys) for row in harvester_config: row_keys = set(row.keys()) msg = "Hay una fila con claves {} y debe tener claves {}".format( row_keys, required_keys) assert row_keys == required_keys, msg if export_path: writers.write_table(harvester_config, export_path) else: return harvester_config
[ "def", "generate_harvester_config", "(", "self", ",", "catalogs", "=", "None", ",", "harvest", "=", "'valid'", ",", "report", "=", "None", ",", "export_path", "=", "None", ")", ":", "# Si se pasa un único catálogo, genero una lista que lo contenga", "if", "isinstance"...
Genera un archivo de configuración del harvester a partir de un reporte, o de un conjunto de catálogos y un criterio de cosecha (`harvest`). Args: catalogs (str, dict o list): Uno (str o dict) o varios (list de strs y/o dicts) catálogos. harvest (str): Criterio para determinar qué datasets incluir en el archivo de configuración generado ('all', 'none', 'valid', 'report' o 'good'). report (list o str): Tabla de reporte generada por generate_datasets_report() como lista de diccionarios o archivo en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`, en cuyo caso `catalogs` se ignora. export_path (str): Path donde exportar el reporte generado (en formato XLSX o CSV). Si se especifica, el método no devolverá nada. Returns: list of dicts: Un diccionario con variables de configuración por cada dataset a cosechar.
[ "Genera", "un", "archivo", "de", "configuración", "del", "harvester", "a", "partir", "de", "un", "reporte", "o", "de", "un", "conjunto", "de", "catálogos", "y", "un", "criterio", "de", "cosecha", "(", "harvest", ")", "." ]
python
train
44
KarchinLab/probabilistic2020
prob2020/python/utils.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/utils.py#L212-L229
def bed_generator(bed_path): """Iterates through a BED file yielding parsed BED lines. Parameters ---------- bed_path : str path to BED file Yields ------ BedLine(line) : BedLine A BedLine object which has parsed the individual line in a BED file. """ with open(bed_path) as handle: bed_reader = csv.reader(handle, delimiter='\t') for line in bed_reader: yield BedLine(line)
[ "def", "bed_generator", "(", "bed_path", ")", ":", "with", "open", "(", "bed_path", ")", "as", "handle", ":", "bed_reader", "=", "csv", ".", "reader", "(", "handle", ",", "delimiter", "=", "'\\t'", ")", "for", "line", "in", "bed_reader", ":", "yield", ...
Iterates through a BED file yielding parsed BED lines. Parameters ---------- bed_path : str path to BED file Yields ------ BedLine(line) : BedLine A BedLine object which has parsed the individual line in a BED file.
[ "Iterates", "through", "a", "BED", "file", "yielding", "parsed", "BED", "lines", "." ]
python
train
24.833333
ansible-community/ara
ara/config/webapp.py
https://github.com/ansible-community/ara/blob/15e2d0133c23b6d07438a553bb8149fadff21547/ara/config/webapp.py#L58-L64
def config(self): """ Returns a dictionary for the loaded configuration """ return { key: self.__dict__[key] for key in dir(self) if key.isupper() }
[ "def", "config", "(", "self", ")", ":", "return", "{", "key", ":", "self", ".", "__dict__", "[", "key", "]", "for", "key", "in", "dir", "(", "self", ")", "if", "key", ".", "isupper", "(", ")", "}" ]
Returns a dictionary for the loaded configuration
[ "Returns", "a", "dictionary", "for", "the", "loaded", "configuration" ]
python
train
28.857143
flo-compbio/genometools
genometools/ontology/util.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ontology/util.py#L62-L73
def get_current_ontology_date(): """Get the release date of the current Gene Ontolgo release.""" with closing(requests.get( 'http://geneontology.org/ontology/go-basic.obo', stream=True)) as r: for i, l in enumerate(r.iter_lines(decode_unicode=True)): if i == 1: assert l.split(':')[0] == 'data-version' date = l.split('/')[-1] break return date
[ "def", "get_current_ontology_date", "(", ")", ":", "with", "closing", "(", "requests", ".", "get", "(", "'http://geneontology.org/ontology/go-basic.obo'", ",", "stream", "=", "True", ")", ")", "as", "r", ":", "for", "i", ",", "l", "in", "enumerate", "(", "r"...
Get the release date of the current Gene Ontolgo release.
[ "Get", "the", "release", "date", "of", "the", "current", "Gene", "Ontolgo", "release", "." ]
python
train
36.5
talentpair/featurevectormatrix
featurevectormatrix/__init__.py
https://github.com/talentpair/featurevectormatrix/blob/1327026f7e46138947ba55433c11a85bca1adc5d/featurevectormatrix/__init__.py#L250-L259
def keys(self): """ Returns all row keys :raise NotImplementedError: if all rows aren't keyed :return: all row keys """ if len(self._row_name_list) != len(self._rows): raise NotImplementedError("You can't get row keys for a FVM that doesn't have all rows keyed") return self.row_names()
[ "def", "keys", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_row_name_list", ")", "!=", "len", "(", "self", ".", "_rows", ")", ":", "raise", "NotImplementedError", "(", "\"You can't get row keys for a FVM that doesn't have all rows keyed\"", ")", "return...
Returns all row keys :raise NotImplementedError: if all rows aren't keyed :return: all row keys
[ "Returns", "all", "row", "keys" ]
python
train
33.9
mamins1376/PyBehnevis
pybehnevis/__init__.py
https://github.com/mamins1376/PyBehnevis/blob/8642b4999a62ea8fa76879fbdc320462eb720f6d/pybehnevis/__init__.py#L36-L63
def convert(self, text): """ convert Finglish(or whatever you'd like to call) to Persian. gets and returns string. """ url = self.API_URL encoding = self.ENCODING headers = self.HEADERS data = urlencode({ 'farsi': str(text) }).encode(encoding) request = Request(url=url,data=data,headers=headers) response = urlopen(request) result = response.read() response_encoding = response.headers['Content-Type'] response_encoding = response_encoding[response_encoding.find('=')+1:] result = result.decode(response_encoding) # a simple fix result = result.replace('\ufeff','')[:-1] return result
[ "def", "convert", "(", "self", ",", "text", ")", ":", "url", "=", "self", ".", "API_URL", "encoding", "=", "self", ".", "ENCODING", "headers", "=", "self", ".", "HEADERS", "data", "=", "urlencode", "(", "{", "'farsi'", ":", "str", "(", "text", ")", ...
convert Finglish(or whatever you'd like to call) to Persian. gets and returns string.
[ "convert", "Finglish", "(", "or", "whatever", "you", "d", "like", "to", "call", ")", "to", "Persian", ".", "gets", "and", "returns", "string", "." ]
python
train
23.071429
materialsproject/pymatgen
pymatgen/io/abinit/works.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/works.py#L1724-L1778
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None): """ Construct a `GKKPWork` from a `PhononWfkqWork` object. The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands """ # Get list of qpoints from the the phonon tasks in this work qpoints = [] qpoints_deps = [] for task in phononwfkq_work: if isinstance(task,PhononTask): # Store qpoints qpt = task.input.get("qpt", [0,0,0]) qpoints.append(qpt) # Store dependencies qpoints_deps.append(task.deps) # Create file nodes ddb_path = phononwfkq_work.outdir.has_abiext("DDB") dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB") ddb_file = FileNode(ddb_path) dvdb_file = FileNode(dvdb_path) # Get scf_task from first q-point for dep in qpoints_deps[0]: if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK': scf_task = dep.node # Create new work new = cls(manager=manager) new.remove_wfkq = remove_wfkq new.wfkq_tasks = [] new.wfk_task = [] # Add one eph task per qpoint for qpt,qpoint_deps in zip(qpoints,qpoints_deps): # Create eph task eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt) deps = {ddb_file: "DDB", dvdb_file: "DVDB" } for dep in qpoint_deps: deps[dep.node] = dep.exts[0] # If no WFQ in deps link the WFK with WFQ extension if 'WFQ' not in deps.values(): inv_deps = dict((v, k) for k, v in deps.items()) wfk_task = inv_deps['WFK'] wfk_path = wfk_task.outdir.has_abiext("WFK") # Check if netcdf filename, extension = os.path.splitext(wfk_path) infile = 'out_WFQ' + extension wfq_path = os.path.join(os.path.dirname(wfk_path), infile) if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path) deps[FileNode(wfq_path)] = 'WFQ' new.register_eph_task(eph_input, deps=deps) return new
[ "def", "from_phononwfkq_work", "(", "cls", ",", "phononwfkq_work", ",", "nscf_vars", "=", "{", "}", ",", "remove_wfkq", "=", "True", ",", "with_ddk", "=", "True", ",", "manager", "=", "None", ")", ":", "# Get list of qpoints from the the phonon tasks in this work", ...
Construct a `GKKPWork` from a `PhononWfkqWork` object. The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
[ "Construct", "a", "GKKPWork", "from", "a", "PhononWfkqWork", "object", ".", "The", "WFQ", "are", "the", "ones", "used", "for", "PhononWfkqWork", "so", "in", "principle", "have", "only", "valence", "bands" ]
python
train
42.763636
Qiskit/qiskit-terra
qiskit/dagcircuit/dagcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L180-L188
def add_qreg(self, qreg): """Add all wires in a quantum register.""" if not isinstance(qreg, QuantumRegister): raise DAGCircuitError("not a QuantumRegister instance.") if qreg.name in self.qregs: raise DAGCircuitError("duplicate register %s" % qreg.name) self.qregs[qreg.name] = qreg for j in range(qreg.size): self._add_wire((qreg, j))
[ "def", "add_qreg", "(", "self", ",", "qreg", ")", ":", "if", "not", "isinstance", "(", "qreg", ",", "QuantumRegister", ")", ":", "raise", "DAGCircuitError", "(", "\"not a QuantumRegister instance.\"", ")", "if", "qreg", ".", "name", "in", "self", ".", "qregs...
Add all wires in a quantum register.
[ "Add", "all", "wires", "in", "a", "quantum", "register", "." ]
python
test
44.888889
MillionIntegrals/vel
vel/rl/modules/noise/ou_noise.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/noise/ou_noise.py#L28-L41
def forward(self, actions, batch_info): """ Return model step after applying noise """ while len(self.processes) < actions.shape[0]: len_action_space = self.action_space.shape[-1] self.processes.append( OrnsteinUhlenbeckNoiseProcess( np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space) ) ) noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device) return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor)
[ "def", "forward", "(", "self", ",", "actions", ",", "batch_info", ")", ":", "while", "len", "(", "self", ".", "processes", ")", "<", "actions", ".", "shape", "[", "0", "]", ":", "len_action_space", "=", "self", ".", "action_space", ".", "shape", "[", ...
Return model step after applying noise
[ "Return", "model", "step", "after", "applying", "noise" ]
python
train
42.571429
davidmogar/cucco
cucco/cucco.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/cucco.py#L65-L82
def _parse_normalizations(normalizations): """Parse and yield normalizations. Parse normalizations parameter that yield all normalizations and arguments found on it. Args: normalizations: List of normalizations. Yields: A tuple with a parsed normalization. The first item will contain the normalization name and the second will be a dict with the arguments to be used for the normalization. """ str_type = str if sys.version_info[0] > 2 else (str, unicode) for normalization in normalizations: yield (normalization, {}) if isinstance(normalization, str_type) else normalization
[ "def", "_parse_normalizations", "(", "normalizations", ")", ":", "str_type", "=", "str", "if", "sys", ".", "version_info", "[", "0", "]", ">", "2", "else", "(", "str", ",", "unicode", ")", "for", "normalization", "in", "normalizations", ":", "yield", "(", ...
Parse and yield normalizations. Parse normalizations parameter that yield all normalizations and arguments found on it. Args: normalizations: List of normalizations. Yields: A tuple with a parsed normalization. The first item will contain the normalization name and the second will be a dict with the arguments to be used for the normalization.
[ "Parse", "and", "yield", "normalizations", "." ]
python
train
38.222222
SecurityInnovation/PGPy
pgpy/packet/types.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/packet/types.py#L66-L115
def parse(self, packet): """ There are two formats for headers old style --------- Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length. If the header length is 1 octet (length_type == 3), then there is no Length field. new style --------- New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length. Packet Tag ---------- The packet tag is the first byte, comprising the following fields: +-------------+----------+---------------+---+---+---+---+----------+----------+ | byte | 1 | +-------------+----------+---------------+---+---+---+---+----------+----------+ | bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | +-------------+----------+---------------+---+---+---+---+----------+----------+ | old-style | always 1 | packet format | packet tag | length type | | description | | 0 = old-style | | 0 = 1 octet | | | | 1 = new-style | | 1 = 2 octets | | | | | | 2 = 5 octets | | | | | | 3 = no length field | +-------------+ + +---------------+---------------------+ | new-style | | | packet tag | | description | | | | +-------------+----------+---------------+-------------------------------------+ :param packet: raw packet bytes """ self._lenfmt = ((packet[0] & 0x40) >> 6) self.tag = packet[0] if self._lenfmt == 0: self.llen = (packet[0] & 0x03) del packet[0] if (self._lenfmt == 0 and self.llen > 0) or self._lenfmt == 1: self.length = packet else: # indeterminate packet length self.length = len(packet)
[ "def", "parse", "(", "self", ",", "packet", ")", ":", "self", ".", "_lenfmt", "=", "(", "(", "packet", "[", "0", "]", "&", "0x40", ")", ">>", "6", ")", "self", ".", "tag", "=", "packet", "[", "0", "]", "if", "self", ".", "_lenfmt", "==", "0",...
There are two formats for headers old style --------- Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length. If the header length is 1 octet (length_type == 3), then there is no Length field. new style --------- New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length. Packet Tag ---------- The packet tag is the first byte, comprising the following fields: +-------------+----------+---------------+---+---+---+---+----------+----------+ | byte | 1 | +-------------+----------+---------------+---+---+---+---+----------+----------+ | bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | +-------------+----------+---------------+---+---+---+---+----------+----------+ | old-style | always 1 | packet format | packet tag | length type | | description | | 0 = old-style | | 0 = 1 octet | | | | 1 = new-style | | 1 = 2 octets | | | | | | 2 = 5 octets | | | | | | 3 = no length field | +-------------+ + +---------------+---------------------+ | new-style | | | packet tag | | description | | | | +-------------+----------+---------------+-------------------------------------+ :param packet: raw packet bytes
[ "There", "are", "two", "formats", "for", "headers" ]
python
train
43.7
Rapptz/discord.py
discord/message.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/message.py#L699-L728
async def add_reaction(self, emoji): """|coro| Add a reaction to the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You must have the :attr:`~Permissions.read_message_history` permission to use this. If nobody else has reacted to the message using this emoji, the :attr:`~Permissions.add_reactions` permission is required. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to react with. Raises -------- HTTPException Adding the reaction failed. Forbidden You do not have the proper permissions to react to the message. NotFound The emoji you specified was not found. InvalidArgument The emoji parameter is invalid. """ emoji = self._emoji_reaction(emoji) await self._state.http.add_reaction(self.channel.id, self.id, emoji)
[ "async", "def", "add_reaction", "(", "self", ",", "emoji", ")", ":", "emoji", "=", "self", ".", "_emoji_reaction", "(", "emoji", ")", "await", "self", ".", "_state", ".", "http", ".", "add_reaction", "(", "self", ".", "channel", ".", "id", ",", "self",...
|coro| Add a reaction to the message. The emoji may be a unicode emoji or a custom guild :class:`Emoji`. You must have the :attr:`~Permissions.read_message_history` permission to use this. If nobody else has reacted to the message using this emoji, the :attr:`~Permissions.add_reactions` permission is required. Parameters ------------ emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`] The emoji to react with. Raises -------- HTTPException Adding the reaction failed. Forbidden You do not have the proper permissions to react to the message. NotFound The emoji you specified was not found. InvalidArgument The emoji parameter is invalid.
[ "|coro|" ]
python
train
33.433333
pymacaron/pymacaron-core
pymacaron_core/exceptions.py
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/exceptions.py#L25-L33
def add_error_handlers(app): """Add custom error handlers for PyMacaronCoreExceptions to the app""" def handle_validation_error(error): response = jsonify({'message': str(error)}) response.status_code = error.status_code return response app.errorhandler(ValidationError)(handle_validation_error)
[ "def", "add_error_handlers", "(", "app", ")", ":", "def", "handle_validation_error", "(", "error", ")", ":", "response", "=", "jsonify", "(", "{", "'message'", ":", "str", "(", "error", ")", "}", ")", "response", ".", "status_code", "=", "error", ".", "s...
Add custom error handlers for PyMacaronCoreExceptions to the app
[ "Add", "custom", "error", "handlers", "for", "PyMacaronCoreExceptions", "to", "the", "app" ]
python
train
36.111111
chrizzFTD/naming
naming/base.py
https://github.com/chrizzFTD/naming/blob/ed0efbd2a3718f977c01cc15b33aeb1aa4fb299c/naming/base.py#L6-L11
def _dct_from_mro(cls: type, attr_name: str) -> dict: """"Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest).""" d = {} for c in reversed(cls.mro()): d.update(getattr(c, attr_name, {})) return d
[ "def", "_dct_from_mro", "(", "cls", ":", "type", ",", "attr_name", ":", "str", ")", "->", "dict", ":", "d", "=", "{", "}", "for", "c", "in", "reversed", "(", "cls", ".", "mro", "(", ")", ")", ":", "d", ".", "update", "(", "getattr", "(", "c", ...
Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest).
[ "Get", "a", "merged", "dictionary", "from", "cls", "bases", "attribute", "attr_name", ".", "MRO", "defines", "importance", "(", "closest", "=", "strongest", ")", "." ]
python
train
45.166667
jart/fabulous
fabulous/term.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L758-L768
def _get_title(self): """According to http://support.microsoft.com/kb/124103 the buffer size is 1024 Does not support unicode, only ANSI""" #TODO: unicode support strbuffer = self.ctypes.create_string_buffer(1024) size = self.ctypes.c_short(1024) #unicode versions are (Get|Set)ConsolTitleW self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size) return strbuffer.value
[ "def", "_get_title", "(", "self", ")", ":", "#TODO: unicode support", "strbuffer", "=", "self", ".", "ctypes", ".", "create_string_buffer", "(", "1024", ")", "size", "=", "self", ".", "ctypes", ".", "c_short", "(", "1024", ")", "#unicode versions are (Get|Set)Co...
According to http://support.microsoft.com/kb/124103 the buffer size is 1024 Does not support unicode, only ANSI
[ "According", "to", "http", ":", "//", "support", ".", "microsoft", ".", "com", "/", "kb", "/", "124103", "the", "buffer", "size", "is", "1024", "Does", "not", "support", "unicode", "only", "ANSI" ]
python
train
40.545455
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAIndicator/indicators.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAIndicator/indicators.py#L95-L118
def QA_indicator_DMI(DataFrame, M1=14, M2=6): """ 趋向指标 DMI """ HIGH = DataFrame.high LOW = DataFrame.low CLOSE = DataFrame.close OPEN = DataFrame.open TR = SUM(MAX(MAX(HIGH-LOW, ABS(HIGH-REF(CLOSE, 1))), ABS(LOW-REF(CLOSE, 1))), M1) HD = HIGH-REF(HIGH, 1) LD = REF(LOW, 1)-LOW DMP = SUM(IFAND(HD>0,HD>LD,HD,0), M1) DMM = SUM(IFAND(LD>0,LD>HD,LD,0), M1) DI1 = DMP*100/TR DI2 = DMM*100/TR ADX = MA(ABS(DI2-DI1)/(DI1+DI2)*100, M2) ADXR = (ADX+REF(ADX, M2))/2 return pd.DataFrame({ 'DI1': DI1, 'DI2': DI2, 'ADX': ADX, 'ADXR': ADXR })
[ "def", "QA_indicator_DMI", "(", "DataFrame", ",", "M1", "=", "14", ",", "M2", "=", "6", ")", ":", "HIGH", "=", "DataFrame", ".", "high", "LOW", "=", "DataFrame", ".", "low", "CLOSE", "=", "DataFrame", ".", "close", "OPEN", "=", "DataFrame", ".", "ope...
趋向指标 DMI
[ "趋向指标", "DMI" ]
python
train
25.541667
saltstack/salt
salt/modules/marathon.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/marathon.py#L55-L70
def apps(): ''' Return a list of the currently installed app ids. CLI Example: .. code-block:: bash salt marathon-minion-id marathon.apps ''' response = salt.utils.http.query( "{0}/v2/apps".format(_base_url()), decode_type='json', decode=True, ) return {'apps': [app['id'] for app in response['dict']['apps']]}
[ "def", "apps", "(", ")", ":", "response", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "\"{0}/v2/apps\"", ".", "format", "(", "_base_url", "(", ")", ")", ",", "decode_type", "=", "'json'", ",", "decode", "=", "True", ",", ")", "return",...
Return a list of the currently installed app ids. CLI Example: .. code-block:: bash salt marathon-minion-id marathon.apps
[ "Return", "a", "list", "of", "the", "currently", "installed", "app", "ids", "." ]
python
train
22.625
da4089/simplefix
simplefix/message.py
https://github.com/da4089/simplefix/blob/10f7f165a99a03467110bee69cc7c083c3531c68/simplefix/message.py#L290-L334
def append_tz_timestamp(self, tag, timestamp=None, precision=3, header=False): """Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.""" # Get float offset from Unix epoch. if timestamp is None: now = time.time() elif type(timestamp) is float: now = timestamp else: now = time.mktime(timestamp.timetuple()) + \ (timestamp.microsecond * 1e-6) # Get offset of local timezone east of UTC. utc = datetime.datetime.utcfromtimestamp(now) local = datetime.datetime.fromtimestamp(now) td = local - utc offset = int(((td.days * 86400) + td.seconds) / 60) s = local.strftime("%Y%m%d-%H:%M:%S") if precision == 3: s += ".%03u" % (local.microsecond / 1000) elif precision == 6: s += ".%06u" % local.microsecond elif precision != 0: raise ValueError("Precision (%u) should be one of " "0, 3 or 6 digits" % precision) s += self._tz_offset_string(offset) return self.append_pair(tag, s, header=header)
[ "def", "append_tz_timestamp", "(", "self", ",", "tag", ",", "timestamp", "=", "None", ",", "precision", "=", "3", ",", "header", "=", "False", ")", ":", "# Get float offset from Unix epoch.", "if", "timestamp", "is", "None", ":", "now", "=", "time", ".", "...
Append a field with a TZTimestamp value, derived from local time. :param tag: Integer or string FIX tag number. :param timestamp: Time value, see below. :param precision: Number of decimal places: 0, 3 (ms) or 6 (us). :param header: Append to FIX header if True; default to body. The `timestamp` value should be a local datetime, such as created by datetime.datetime.now(); a float, being the number of seconds since midnight 1 Jan 1970 UTC, such as returned by time.time(); or, None, in which case datetime.datetime.now() is used to get the current local time. Precision values other than zero (seconds), 3 (milliseconds), or 6 (microseconds) will raise an exception. Note that prior to FIX 5.0, only values of 0 or 3 comply with the standard.
[ "Append", "a", "field", "with", "a", "TZTimestamp", "value", "derived", "from", "local", "time", "." ]
python
train
42.533333
krischer/pyflex
doc/convert.py
https://github.com/krischer/pyflex/blob/e1a988793ba79cc3b31051ba3df5b3a23bc769c4/doc/convert.py#L12-L28
def clean_for_doc(nb): """ Cleans the notebook to be suitable for inclusion in the docs. """ new_cells = [] for cell in nb.worksheets[0].cells: # Remove the pylab inline line. if "input" in cell and cell["input"].strip() == "%pylab inline": continue # Remove output resulting from the stream/trace method chaining. if "outputs" in cell: outputs = [_i for _i in cell["outputs"] if "text" not in _i or not _i["text"].startswith("<obspy.core")] cell["outputs"] = outputs new_cells.append(cell) nb.worksheets[0].cells = new_cells return nb
[ "def", "clean_for_doc", "(", "nb", ")", ":", "new_cells", "=", "[", "]", "for", "cell", "in", "nb", ".", "worksheets", "[", "0", "]", ".", "cells", ":", "# Remove the pylab inline line.", "if", "\"input\"", "in", "cell", "and", "cell", "[", "\"input\"", ...
Cleans the notebook to be suitable for inclusion in the docs.
[ "Cleans", "the", "notebook", "to", "be", "suitable", "for", "inclusion", "in", "the", "docs", "." ]
python
train
38
abe-winter/pg13-py
pg13/diff.py
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L84-L94
def subslice(inner,outer,section): 'helper for rediff\ outer is a slice (2-tuple, not an official python slice) in global coordinates\ inner is a slice (2-tuple) on that slice\ returns the result of sub-slicing outer by inner' # todo: think about constraints here. inner and outer ordered, inner[1] less than outer[1]-outer[0] # todo: this would make more sense as a member of a Slice class if section=='head': return outer[0],outer[0]+inner[0] elif section=='tail': return outer[0]+inner[1],outer[1] elif section=='middle': return outer[0]+inner[0],outer[0]+inner[1] else: raise ValueError('section val %s not one of (head,middle,tail)'%section)
[ "def", "subslice", "(", "inner", ",", "outer", ",", "section", ")", ":", "# todo: think about constraints here. inner and outer ordered, inner[1] less than outer[1]-outer[0]\r", "# todo: this would make more sense as a member of a Slice class\r", "if", "section", "==", "'head'", ":",...
helper for rediff\ outer is a slice (2-tuple, not an official python slice) in global coordinates\ inner is a slice (2-tuple) on that slice\ returns the result of sub-slicing outer by inner
[ "helper", "for", "rediff", "\\", "outer", "is", "a", "slice", "(", "2", "-", "tuple", "not", "an", "official", "python", "slice", ")", "in", "global", "coordinates", "\\", "inner", "is", "a", "slice", "(", "2", "-", "tuple", ")", "on", "that", "slice...
python
train
60.545455
daler/gffutils
gffutils/helpers.py
https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/helpers.py#L263-L268
def _unjsonify(x, isattributes=False): """Convert JSON string to an ordered defaultdict.""" if isattributes: obj = json.loads(x) return dict_class(obj) return json.loads(x)
[ "def", "_unjsonify", "(", "x", ",", "isattributes", "=", "False", ")", ":", "if", "isattributes", ":", "obj", "=", "json", ".", "loads", "(", "x", ")", "return", "dict_class", "(", "obj", ")", "return", "json", ".", "loads", "(", "x", ")" ]
Convert JSON string to an ordered defaultdict.
[ "Convert", "JSON", "string", "to", "an", "ordered", "defaultdict", "." ]
python
train
32.5
PythonCharmers/python-future
src/future/backports/http/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/server.py#L543-L552
def date_time_string(self, timestamp=None): """Return the current date and time formatted for a message header.""" if timestamp is None: timestamp = time.time() year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( self.weekdayname[wd], day, self.monthname[month], year, hh, mm, ss) return s
[ "def", "date_time_string", "(", "self", ",", "timestamp", "=", "None", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "time", ".", "time", "(", ")", "year", ",", "month", ",", "day", ",", "hh", ",", "mm", ",", "ss", ",", "wd", ...
Return the current date and time formatted for a message header.
[ "Return", "the", "current", "date", "and", "time", "formatted", "for", "a", "message", "header", "." ]
python
train
43.8
allianceauth/allianceauth
allianceauth/authentication/admin.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/authentication/admin.py#L15-L27
def make_service_hooks_update_groups_action(service): """ Make a admin action for the given service :param service: services.hooks.ServicesHook :return: fn to update services groups for the selected users """ def update_service_groups(modeladmin, request, queryset): for user in queryset: # queryset filtering doesn't work here? service.update_groups(user) update_service_groups.__name__ = str('update_{}_groups'.format(slugify(service.name))) update_service_groups.short_description = "Sync groups for selected {} accounts".format(service.title) return update_service_groups
[ "def", "make_service_hooks_update_groups_action", "(", "service", ")", ":", "def", "update_service_groups", "(", "modeladmin", ",", "request", ",", "queryset", ")", ":", "for", "user", "in", "queryset", ":", "# queryset filtering doesn't work here?", "service", ".", "...
Make a admin action for the given service :param service: services.hooks.ServicesHook :return: fn to update services groups for the selected users
[ "Make", "a", "admin", "action", "for", "the", "given", "service", ":", "param", "service", ":", "services", ".", "hooks", ".", "ServicesHook", ":", "return", ":", "fn", "to", "update", "services", "groups", "for", "the", "selected", "users" ]
python
train
47.769231
erikrose/peep
peep.py
https://github.com/erikrose/peep/blob/c16f08c7f61e2f2afecb7cd1c93752bdd96c4968/peep.py#L217-L226
def hash_of_file(path): """Return the hash of a downloaded file.""" with open(path, 'rb') as archive: sha = sha256() while True: data = archive.read(2 ** 20) if not data: break sha.update(data) return encoded_hash(sha)
[ "def", "hash_of_file", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "archive", ":", "sha", "=", "sha256", "(", ")", "while", "True", ":", "data", "=", "archive", ".", "read", "(", "2", "**", "20", ")", "if", "not", ...
Return the hash of a downloaded file.
[ "Return", "the", "hash", "of", "a", "downloaded", "file", "." ]
python
train
28.9
saltstack/salt
salt/modules/splunk_search.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk_search.py#L201-L300
def list_all(prefix=None, app=None, owner=None, description_contains=None, name_not_contains=None, profile="splunk"): ''' Get all splunk search details. Produces results that can be used to create an sls file. if app or owner are specified, results will be limited to matching saved searches. if description_contains is specified, results will be limited to those where "description_contains in description" is true if name_not_contains is specified, results will be limited to those where "name_not_contains not in name" is true. If prefix parameter is given, alarm names in the output will be prepended with the prefix; alarms that have the prefix will be skipped. This can be used to convert existing alarms to be managed by salt, as follows: CLI example: 1. Make a "backup" of all existing searches $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls 2. Get all searches with new prefixed names $ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls 3. Insert the managed searches into splunk $ salt-call state.sls managed_searches.sls 4. Manually verify that the new searches look right 5. Delete the original searches $ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls $ salt-call state.sls remove_legacy_searches.sls 6. Get all searches again, verify no changes $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls $ diff final_searches.sls managed_searches.sls ''' client = _get_splunk(profile) # splunklib doesn't provide the default settings for saved searches. # so, in order to get the defaults, we create a search with no # configuration, get that search, and then delete it. We use its contents # as the default settings name = "splunk_search.list_all get defaults" try: client.saved_searches.delete(name) except Exception: pass search = client.saved_searches.create(name, search="nothing") defaults = dict(search.content) client.saved_searches.delete(name) # stuff that splunk returns but that you should not attempt to set. # cf http://dev.splunk.com/view/python-sdk/SP-CAAAEK2 readonly_keys = ("triggered_alert_count", "action.email", "action.populate_lookup", "action.rss", "action.script", "action.summary_index", "qualifiedSearch", "next_scheduled_time") results = OrderedDict() # sort the splunk searches by name, so we get consistent output searches = sorted([(s.name, s) for s in client.saved_searches]) for name, search in searches: if app and search.access.app != app: continue if owner and search.access.owner != owner: continue if name_not_contains and name_not_contains in name: continue if prefix: if name.startswith(prefix): continue name = prefix + name # put name in the OrderedDict first d = [{"name": name}] # add the rest of the splunk settings, ignoring any defaults description = '' for (k, v) in sorted(search.content.items()): if k in readonly_keys: continue if k.startswith("display."): continue if not v: continue if k in defaults and defaults[k] == v: continue d.append({k: v}) if k == 'description': description = v if description_contains and description_contains not in description: continue results["manage splunk search " + name] = {"splunk_search.present": d} return salt.utils.yaml.safe_dump(results, default_flow_style=False, width=120)
[ "def", "list_all", "(", "prefix", "=", "None", ",", "app", "=", "None", ",", "owner", "=", "None", ",", "description_contains", "=", "None", ",", "name_not_contains", "=", "None", ",", "profile", "=", "\"splunk\"", ")", ":", "client", "=", "_get_splunk", ...
Get all splunk search details. Produces results that can be used to create an sls file. if app or owner are specified, results will be limited to matching saved searches. if description_contains is specified, results will be limited to those where "description_contains in description" is true if name_not_contains is specified, results will be limited to those where "name_not_contains not in name" is true. If prefix parameter is given, alarm names in the output will be prepended with the prefix; alarms that have the prefix will be skipped. This can be used to convert existing alarms to be managed by salt, as follows: CLI example: 1. Make a "backup" of all existing searches $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls 2. Get all searches with new prefixed names $ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls 3. Insert the managed searches into splunk $ salt-call state.sls managed_searches.sls 4. Manually verify that the new searches look right 5. Delete the original searches $ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls $ salt-call state.sls remove_legacy_searches.sls 6. Get all searches again, verify no changes $ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls $ diff final_searches.sls managed_searches.sls
[ "Get", "all", "splunk", "search", "details", ".", "Produces", "results", "that", "can", "be", "used", "to", "create", "an", "sls", "file", "." ]
python
train
40.77
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/encoder.py#L682-L703
def StringEncoder(field_number, is_repeated, is_packed): """Returns an encoder for a string field.""" tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint local_len = len assert not is_packed if is_repeated: def EncodeRepeatedField(write, value): for element in value: encoded = element.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) write(encoded) return EncodeRepeatedField else: def EncodeField(write, value): encoded = value.encode('utf-8') write(tag) local_EncodeVarint(write, local_len(encoded)) return write(encoded) return EncodeField
[ "def", "StringEncoder", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag", "=", "TagBytes", "(", "field_number", ",", "wire_format", ".", "WIRETYPE_LENGTH_DELIMITED", ")", "local_EncodeVarint", "=", "_EncodeVarint", "local_len", "=", "len", ...
Returns an encoder for a string field.
[ "Returns", "an", "encoder", "for", "a", "string", "field", "." ]
python
train
31.363636
ucsb-cs/submit
submit/models.py
https://github.com/ucsb-cs/submit/blob/92810c81255a4fc6bbebac1ac8aae856fd576ffe/submit/models.py#L625-L627
def can_view(self, user): """Return whether or not `user` can view the submission.""" return user in self.group.users or self.project.can_view(user)
[ "def", "can_view", "(", "self", ",", "user", ")", ":", "return", "user", "in", "self", ".", "group", ".", "users", "or", "self", ".", "project", ".", "can_view", "(", "user", ")" ]
Return whether or not `user` can view the submission.
[ "Return", "whether", "or", "not", "user", "can", "view", "the", "submission", "." ]
python
train
54
inveniosoftware/invenio-files-rest
invenio_files_rest/models.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L761-L766
def clear_last_check(self): """Clear the checksum of the file.""" with db.session.begin_nested(): self.last_check = None self.last_check_at = datetime.utcnow() return self
[ "def", "clear_last_check", "(", "self", ")", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "self", ".", "last_check", "=", "None", "self", ".", "last_check_at", "=", "datetime", ".", "utcnow", "(", ")", "return", "self" ]
Clear the checksum of the file.
[ "Clear", "the", "checksum", "of", "the", "file", "." ]
python
train
35.666667
woolfson-group/isambard
isambard/ampal/base_ampal.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/base_ampal.py#L853-L863
def unique_id(self): """Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id) """ chain = self.ampal_parent.ampal_parent.id residue = self.ampal_parent.id return chain, residue, self.id
[ "def", "unique_id", "(", "self", ")", ":", "chain", "=", "self", ".", "ampal_parent", ".", "ampal_parent", ".", "id", "residue", "=", "self", ".", "ampal_parent", ".", "id", "return", "chain", ",", "residue", ",", "self", ".", "id" ]
Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id)
[ "Creates", "a", "unique", "ID", "for", "the", "Atom", "based", "on", "its", "parents", "." ]
python
train
30.272727
mjirik/imtools
imtools/show_segmentation.py
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/show_segmentation.py#L386-L439
def create_pvsm_file(vtk_files, pvsm_filename, relative_paths=True): """ Create paraview status file (.pvsm) based on input vtk files. :param vtk_files: :param pvsm_filename: :param relative_paths: :return: """ from xml.etree.ElementTree import Element, SubElement, Comment import os.path as op top = Element('ParaView') comment = Comment('Generated for PyMOTW') top.append(comment) numberi = 4923 # vtk_file = "C:\Users\miros\lisa_data\83779720_2_liver.vtk" sms = SubElement(top, "ServerManagerState", version="5.4.1") file_list = SubElement(sms, "ProxyCollection", name="sources") for vtk_file_orig in vtk_files: numberi +=1 dir, vtk_file_head = op.split(vtk_file_orig) if relative_paths: vtk_file = vtk_file_head else: vtk_file = vtk_file_orig number = str(numberi) proxy1 = SubElement(sms, "Proxy", group="sources", type="LegacyVTKFileReader", id=number, servers="1") property = SubElement(proxy1, "Property", name="FileNameInfo", id=number + ".FileNameInfo", number_of_elements="1") element = SubElement(property, "Element", index="0", value=vtk_file) property2 = SubElement(proxy1, "Property", name="FileNames", id=number + ".FileNames", number_of_elements="1") pr2s1 = SubElement(property2, "Element", index="0", value=vtk_file) pr2s2 = SubElement(property2, "Domain", name="files", id=number + ".FileNames.files") # < Property # name = "Opacity" # id = "8109.Opacity" # number_of_elements = "1" > # < Element # index = "0" # value = "0.28" / > # < Domain # name = "range" # id = "8109.Opacity.range" / > # < / Property > fn1 = SubElement(file_list, "Item", id=number, name=vtk_file_head) xml_str = prettify(top) # logger.debug(xml_str) with open(op.expanduser(pvsm_filename), "w") as file: file.write(xml_str)
[ "def", "create_pvsm_file", "(", "vtk_files", ",", "pvsm_filename", ",", "relative_paths", "=", "True", ")", ":", "from", "xml", ".", "etree", ".", "ElementTree", "import", "Element", ",", "SubElement", ",", "Comment", "import", "os", ".", "path", "as", "op",...
Create paraview status file (.pvsm) based on input vtk files. :param vtk_files: :param pvsm_filename: :param relative_paths: :return:
[ "Create", "paraview", "status", "file", "(", ".", "pvsm", ")", "based", "on", "input", "vtk", "files", ".", ":", "param", "vtk_files", ":", ":", "param", "pvsm_filename", ":", ":", "param", "relative_paths", ":", ":", "return", ":" ]
python
train
36.351852
cuzzo/iw_parse
iw_parse.py
https://github.com/cuzzo/iw_parse/blob/84c287dc6cfceb04ccbc0a8995f8a87323356ee5/iw_parse.py#L45-L63
def get_signal_level(cell): """ Gets the signal level of a network / cell. @param string cell A network / cell from iwlist scan. @return string The signal level of the network. """ signal = matching_line(cell, "Signal level=") if signal is None: return "" signal = signal.split("=")[1].split("/") if len(signal) == 2: return str(int(round(float(signal[0]) / float(signal[1]) * 100))) elif len(signal) == 1: return signal[0].split(' ')[0] else: return ""
[ "def", "get_signal_level", "(", "cell", ")", ":", "signal", "=", "matching_line", "(", "cell", ",", "\"Signal level=\"", ")", "if", "signal", "is", "None", ":", "return", "\"\"", "signal", "=", "signal", ".", "split", "(", "\"=\"", ")", "[", "1", "]", ...
Gets the signal level of a network / cell. @param string cell A network / cell from iwlist scan. @return string The signal level of the network.
[ "Gets", "the", "signal", "level", "of", "a", "network", "/", "cell", ".", "@param", "string", "cell", "A", "network", "/", "cell", "from", "iwlist", "scan", "." ]
python
train
27.526316
saltstack/salt
salt/modules/saltutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltutil.py#L1425-L1444
def regen_keys(): ''' Used to regenerate the minion keys. CLI Example: .. code-block:: bash salt '*' saltutil.regen_keys ''' for fn_ in os.listdir(__opts__['pki_dir']): path = os.path.join(__opts__['pki_dir'], fn_) try: os.remove(path) except os.error: pass # TODO: move this into a channel function? Or auth? # create a channel again, this will force the key regen channel = salt.transport.client.ReqChannel.factory(__opts__) channel.close()
[ "def", "regen_keys", "(", ")", ":", "for", "fn_", "in", "os", ".", "listdir", "(", "__opts__", "[", "'pki_dir'", "]", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'pki_dir'", "]", ",", "fn_", ")", "try", ":", "os"...
Used to regenerate the minion keys. CLI Example: .. code-block:: bash salt '*' saltutil.regen_keys
[ "Used", "to", "regenerate", "the", "minion", "keys", "." ]
python
train
26.1
ggravlingen/pytradfri
pytradfri/api/aiocoap_api.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/api/aiocoap_api.py#L90-L109
async def _get_response(self, msg): """Perform the request, get the response.""" try: protocol = await self._get_protocol() pr = protocol.request(msg) r = await pr.response return pr, r except ConstructionRenderableError as e: raise ClientError("There was an error with the request.", e) except RequestTimedOut as e: await self._reset_protocol(e) raise RequestTimeout('Request timed out.', e) except (OSError, socket.gaierror, Error) as e: # aiocoap sometimes raises an OSError/socket.gaierror too. # aiocoap issue #124 await self._reset_protocol(e) raise ServerError("There was an error with the request.", e) except asyncio.CancelledError as e: await self._reset_protocol(e) raise e
[ "async", "def", "_get_response", "(", "self", ",", "msg", ")", ":", "try", ":", "protocol", "=", "await", "self", ".", "_get_protocol", "(", ")", "pr", "=", "protocol", ".", "request", "(", "msg", ")", "r", "=", "await", "pr", ".", "response", "retur...
Perform the request, get the response.
[ "Perform", "the", "request", "get", "the", "response", "." ]
python
train
43.45
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L251-L257
def applet_describe(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /applet-xxxx/describe API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe """ return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "applet_describe", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/describe'", "%", "object_id", ",", "input_params", ",", "always_retry", ...
Invokes the /applet-xxxx/describe API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe
[ "Invokes", "the", "/", "applet", "-", "xxxx", "/", "describe", "API", "method", "." ]
python
train
54.571429
twilio/twilio-python
twilio/base/page.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/base/page.py#L81-L90
def previous_page_url(self): """ :return str: Returns a link to the previous_page_url or None if doesn't exist. """ if 'meta' in self._payload and 'previous_page_url' in self._payload['meta']: return self._payload['meta']['previous_page_url'] elif 'previous_page_uri' in self._payload and self._payload['previous_page_uri']: return self._version.domain.absolute_url(self._payload['previous_page_uri']) return None
[ "def", "previous_page_url", "(", "self", ")", ":", "if", "'meta'", "in", "self", ".", "_payload", "and", "'previous_page_url'", "in", "self", ".", "_payload", "[", "'meta'", "]", ":", "return", "self", ".", "_payload", "[", "'meta'", "]", "[", "'previous_p...
:return str: Returns a link to the previous_page_url or None if doesn't exist.
[ ":", "return", "str", ":", "Returns", "a", "link", "to", "the", "previous_page_url", "or", "None", "if", "doesn", "t", "exist", "." ]
python
train
47.7
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_spinn3r_feed_storage.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L301-L332
def _make_content_item(node, mime_type=None, alternate_data=None): """Create a ContentItem from a node in the spinn3r data tree. The ContentItem is created with raw data set to ``node.data``, decompressed if the node's encoding is 'zlib', and UTF-8 normalized, with a MIME type from ``node.mime_type``. ``node`` the actual node from the spinn3r protobuf data ``mime_type`` string MIME type to use (defaults to ``node.mime_type``) ``alternate_data`` alternate (compressed) data to use, if ``node.data`` is missing or can't be decompressed """ raw = node.data if getattr(node, 'encoding', None) == 'zlib': try: raw = zlib.decompress(node.data) except Exception, exc: if alternate_data is not None: try: raw = zlib.decompress(alternate_data) except Exception: raise exc # the original exception else: raise if mime_type is None: mime_type = node.mime_type raw = raw.decode('utf8').encode('utf8') return streamcorpus.ContentItem(raw=raw, media_type=mime_type)
[ "def", "_make_content_item", "(", "node", ",", "mime_type", "=", "None", ",", "alternate_data", "=", "None", ")", ":", "raw", "=", "node", ".", "data", "if", "getattr", "(", "node", ",", "'encoding'", ",", "None", ")", "==", "'zlib'", ":", "try", ":", ...
Create a ContentItem from a node in the spinn3r data tree. The ContentItem is created with raw data set to ``node.data``, decompressed if the node's encoding is 'zlib', and UTF-8 normalized, with a MIME type from ``node.mime_type``. ``node`` the actual node from the spinn3r protobuf data ``mime_type`` string MIME type to use (defaults to ``node.mime_type``) ``alternate_data`` alternate (compressed) data to use, if ``node.data`` is missing or can't be decompressed
[ "Create", "a", "ContentItem", "from", "a", "node", "in", "the", "spinn3r", "data", "tree", "." ]
python
test
36.25
iotaledger/iota.lib.py
iota/adapter/sandbox.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/adapter/sandbox.py#L191-L205
def get_jobs_url(self, job_id): # type: (Text) -> Text """ Returns the URL to check job status. :param job_id: The ID of the job to check. """ return compat.urllib_parse.urlunsplit(( self.uri.scheme, self.uri.netloc, self.uri.path.rstrip('/') + '/jobs/' + job_id, self.uri.query, self.uri.fragment, ))
[ "def", "get_jobs_url", "(", "self", ",", "job_id", ")", ":", "# type: (Text) -> Text", "return", "compat", ".", "urllib_parse", ".", "urlunsplit", "(", "(", "self", ".", "uri", ".", "scheme", ",", "self", ".", "uri", ".", "netloc", ",", "self", ".", "uri...
Returns the URL to check job status. :param job_id: The ID of the job to check.
[ "Returns", "the", "URL", "to", "check", "job", "status", "." ]
python
test
27.733333
Clinical-Genomics/scout
scout/server/blueprints/cases/views.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/views.py#L511-L551
def phenotypes_actions(institute_id, case_name): """Perform actions on multiple phenotypes.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) case_url = url_for('.case', institute_id=institute_id, case_name=case_name) action = request.form['action'] hpo_ids = request.form.getlist('hpo_id') user_obj = store.user(current_user.email) if action == 'DELETE': for hpo_id in hpo_ids: # DELETE a phenotype from the list store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id) elif action == 'PHENOMIZER': if len(hpo_ids) == 0: hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])] username = current_app.config['PHENOMIZER_USERNAME'] password = current_app.config['PHENOMIZER_PASSWORD'] diseases = controllers.hpo_diseases(username, password, hpo_ids) return render_template('cases/diseases.html', diseases=diseases, institute=institute_obj, case=case_obj) elif action == 'GENES': hgnc_symbols = set() for raw_symbols in request.form.getlist('genes'): # avoid empty lists if raw_symbols: hgnc_symbols.update(raw_symbol.split(' ', 1)[0] for raw_symbol in raw_symbols.split('|')) store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols) elif action == 'GENERATE': if len(hpo_ids) == 0: hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])] results = store.generate_hpo_gene_list(*hpo_ids) # determine how many HPO terms each gene must match hpo_count = int(request.form.get('min_match') or 1) hgnc_ids = [result[0] for result in results if result[1] >= hpo_count] store.update_dynamic_gene_list(case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids) return redirect(case_url)
[ "def", "phenotypes_actions", "(", "institute_id", ",", "case_name", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "case_url", "=", "url_for", "(", "'.case'", ",", "institute_id", ...
Perform actions on multiple phenotypes.
[ "Perform", "actions", "on", "multiple", "phenotypes", "." ]
python
test
47.878049
sv0/django-markdown-app
django_markdown/templatetags/django_markdown.py
https://github.com/sv0/django-markdown-app/blob/973968c68d79cbe35304e9d6da876ad33f427d2d/django_markdown/templatetags/django_markdown.py#L96-L110
def markdown_media_css(): """ Add css requirements to HTML. :returns: Editor template context. """ return dict( CSS_SET=posixpath.join( settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css' ), CSS_SKIN=posixpath.join( 'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN, 'style.css' ) )
[ "def", "markdown_media_css", "(", ")", ":", "return", "dict", "(", "CSS_SET", "=", "posixpath", ".", "join", "(", "settings", ".", "MARKDOWN_SET_PATH", ",", "settings", ".", "MARKDOWN_SET_NAME", ",", "'style.css'", ")", ",", "CSS_SKIN", "=", "posixpath", ".", ...
Add css requirements to HTML. :returns: Editor template context.
[ "Add", "css", "requirements", "to", "HTML", "." ]
python
train
25.466667
chaoss/grimoirelab-perceval
perceval/backends/core/confluence.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/confluence.py#L221-L224
def _init_client(self, from_archive=False): """Init client""" return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive)
[ "def", "_init_client", "(", "self", ",", "from_archive", "=", "False", ")", ":", "return", "ConfluenceClient", "(", "self", ".", "url", ",", "archive", "=", "self", ".", "archive", ",", "from_archive", "=", "from_archive", ")" ]
Init client
[ "Init", "client" ]
python
test
39.5
Spinmob/spinmob
egg/_temporary_fixes.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_temporary_fixes.py#L251-L259
def value(self): """ Return the value of this SpinBox. """ if self.opts['int']: return int(self.val) else: return float(self.val)
[ "def", "value", "(", "self", ")", ":", "if", "self", ".", "opts", "[", "'int'", "]", ":", "return", "int", "(", "self", ".", "val", ")", "else", ":", "return", "float", "(", "self", ".", "val", ")" ]
Return the value of this SpinBox.
[ "Return", "the", "value", "of", "this", "SpinBox", "." ]
python
train
21.555556
JonathanRaiman/pytreebank
pytreebank/parse.py
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L112-L124
def labels(self): """ Construct a dictionary of string -> labels Returns: -------- OrderedDict<str, int> : string label pairs. """ labelings = OrderedDict() for tree in self: for label, line in tree.to_labeled_lines(): labelings[line] = label return labelings
[ "def", "labels", "(", "self", ")", ":", "labelings", "=", "OrderedDict", "(", ")", "for", "tree", "in", "self", ":", "for", "label", ",", "line", "in", "tree", ".", "to_labeled_lines", "(", ")", ":", "labelings", "[", "line", "]", "=", "label", "retu...
Construct a dictionary of string -> labels Returns: -------- OrderedDict<str, int> : string label pairs.
[ "Construct", "a", "dictionary", "of", "string", "-", ">", "labels" ]
python
train
27.076923
sffjunkie/astral
src/astral.py
https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L1390-L1412
def moon_phase(self, date=None, rtype=int): """Calculates the moon phase for a specific date. :param date: The date to calculate the phase for. If ommitted the current date is used. :type date: :class:`datetime.date` :returns: A number designating the phase | 0 = New moon | 7 = First quarter | 14 = Full moon | 21 = Last quarter """ if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() return self.astral.moon_phase(date, rtype)
[ "def", "moon_phase", "(", "self", ",", "date", "=", "None", ",", "rtype", "=", "int", ")", ":", "if", "self", ".", "astral", "is", "None", ":", "self", ".", "astral", "=", "Astral", "(", ")", "if", "date", "is", "None", ":", "date", "=", "datetim...
Calculates the moon phase for a specific date. :param date: The date to calculate the phase for. If ommitted the current date is used. :type date: :class:`datetime.date` :returns: A number designating the phase | 0 = New moon | 7 = First quarter | 14 = Full moon | 21 = Last quarter
[ "Calculates", "the", "moon", "phase", "for", "a", "specific", "date", "." ]
python
train
27.826087
jborean93/ntlm-auth
ntlm_auth/compute_hash.py
https://github.com/jborean93/ntlm-auth/blob/2c7cd81516d9bfd42e8ff473a534d876b21ebb38/ntlm_auth/compute_hash.py#L12-L45
def _lmowfv1(password): """ [MS-NLMP] v28.0 2016-07-14 3.3.1 NTLM v1 Authentication Same function as LMOWFv1 in document to create a one way hash of the password. Only used in NTLMv1 auth without session security :param password: The password or hash of the user we are trying to authenticate with :return res: A Lan Manager hash of the password supplied """ # if the password is a hash, return the LM hash if re.match(r'^[a-fA-F\d]{32}:[a-fA-F\d]{32}$', password): lm_hash = binascii.unhexlify(password.split(':')[0]) return lm_hash # fix the password to upper case and length to 14 bytes password = password.upper() lm_pw = password.encode('utf-8') padding_size = 0 if len(lm_pw) >= 14 else (14 - len(lm_pw)) lm_pw += b"\x00" * padding_size # do hash magic_str = b"KGS!@#$%" # page 56 in [MS-NLMP v28.0] res = b"" dobj = DES(DES.key56_to_key64(lm_pw[0:7])) res += dobj.encrypt(magic_str) dobj = DES(DES.key56_to_key64(lm_pw[7:14])) res += dobj.encrypt(magic_str) return res
[ "def", "_lmowfv1", "(", "password", ")", ":", "# if the password is a hash, return the LM hash", "if", "re", ".", "match", "(", "r'^[a-fA-F\\d]{32}:[a-fA-F\\d]{32}$'", ",", "password", ")", ":", "lm_hash", "=", "binascii", ".", "unhexlify", "(", "password", ".", "sp...
[MS-NLMP] v28.0 2016-07-14 3.3.1 NTLM v1 Authentication Same function as LMOWFv1 in document to create a one way hash of the password. Only used in NTLMv1 auth without session security :param password: The password or hash of the user we are trying to authenticate with :return res: A Lan Manager hash of the password supplied
[ "[", "MS", "-", "NLMP", "]", "v28", ".", "0", "2016", "-", "07", "-", "14" ]
python
train
31.352941
openvax/pyensembl
pyensembl/ensembl_release.py
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/ensembl_release.py#L38-L45
def normalize_init_values(cls, release, species, server): """ Normalizes the arguments which uniquely specify an EnsemblRelease genome. """ release = check_release_number(release) species = check_species_object(species) return (release, species, server)
[ "def", "normalize_init_values", "(", "cls", ",", "release", ",", "species", ",", "server", ")", ":", "release", "=", "check_release_number", "(", "release", ")", "species", "=", "check_species_object", "(", "species", ")", "return", "(", "release", ",", "speci...
Normalizes the arguments which uniquely specify an EnsemblRelease genome.
[ "Normalizes", "the", "arguments", "which", "uniquely", "specify", "an", "EnsemblRelease", "genome", "." ]
python
train
37.75
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/orm_inspect.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_inspect.py#L240-L288
def copy_sqla_object(obj: object, omit_fk: bool = True, omit_pk: bool = True, omit_attrs: List[str] = None, debug: bool = False) -> object: """ Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies across all attributes, omitting PKs (by default), FKs (by default), and relationship attributes (always omitted). Args: obj: the object to copy omit_fk: omit foreign keys (FKs)? omit_pk: omit primary keys (PKs)? omit_attrs: attributes (by name) not to copy debug: be verbose Returns: a new copy of the object """ omit_attrs = omit_attrs or [] # type: List[str] cls = type(obj) mapper = class_mapper(cls) newobj = cls() # not: cls.__new__(cls) rel_keys = set([c.key for c in mapper.relationships]) prohibited = rel_keys if omit_pk: pk_keys = set([c.key for c in mapper.primary_key]) prohibited |= pk_keys if omit_fk: fk_keys = set([c.key for c in mapper.columns if c.foreign_keys]) prohibited |= fk_keys prohibited |= set(omit_attrs) if debug: log.debug("copy_sqla_object: skipping: {}", prohibited) for k in [p.key for p in mapper.iterate_properties if p.key not in prohibited]: try: value = getattr(obj, k) if debug: log.debug("copy_sqla_object: processing attribute {} = {}", k, value) setattr(newobj, k, value) except AttributeError: if debug: log.debug("copy_sqla_object: failed attribute {}", k) pass return newobj
[ "def", "copy_sqla_object", "(", "obj", ":", "object", ",", "omit_fk", ":", "bool", "=", "True", ",", "omit_pk", ":", "bool", "=", "True", ",", "omit_attrs", ":", "List", "[", "str", "]", "=", "None", ",", "debug", ":", "bool", "=", "False", ")", "-...
Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies across all attributes, omitting PKs (by default), FKs (by default), and relationship attributes (always omitted). Args: obj: the object to copy omit_fk: omit foreign keys (FKs)? omit_pk: omit primary keys (PKs)? omit_attrs: attributes (by name) not to copy debug: be verbose Returns: a new copy of the object
[ "Given", "an", "SQLAlchemy", "object", "creates", "a", "new", "object", "(", "FOR", "WHICH", "THE", "OBJECT", "MUST", "SUPPORT", "CREATION", "USING", "__init__", "()", "WITH", "NO", "PARAMETERS", ")", "and", "copies", "across", "all", "attributes", "omitting",...
python
train
36.183673
juiceinc/recipe
recipe/core.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L276-L305
def filters(self, *filters): """ Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list """ def filter_constructor(f, shelf=None): if isinstance(f, BinaryExpression): return Filter(f) else: return f for f in filters: self._cauldron.use( self._shelf.find( f, (Filter, Having), constructor=filter_constructor ) ) self.dirty = True return self
[ "def", "filters", "(", "self", ",", "*", "filters", ")", ":", "def", "filter_constructor", "(", "f", ",", "shelf", "=", "None", ")", ":", "if", "isinstance", "(", "f", ",", "BinaryExpression", ")", ":", "return", "Filter", "(", "f", ")", "else", ":",...
Add a list of Filter ingredients to the query. These can either be Filter objects or strings representing filters on the service's shelf. ``.filters()`` are additive, calling .filters() more than once will add to the list of filters being used by the recipe. The Filter expression will be added to the query's where clause :param filters: Filters to add to the recipe. Filters can either be keys on the ``shelf`` or Filter objects :type filters: list
[ "Add", "a", "list", "of", "Filter", "ingredients", "to", "the", "query", ".", "These", "can", "either", "be", "Filter", "objects", "or", "strings", "representing", "filters", "on", "the", "service", "s", "shelf", ".", ".", "filters", "()", "are", "additive...
python
train
33.266667
keenlabs/KeenClient-Python
keen/saved_queries.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/saved_queries.py#L48-L58
def results(self, query_name): """ Gets a single saved query with a 'result' object for a project from the Keen IO API given a query name. Read or Master key must be set. """ url = "{0}/{1}/result".format(self.saved_query_url, query_name) response = self._get_json(HTTPMethods.GET, url, self._get_read_key()) return response
[ "def", "results", "(", "self", ",", "query_name", ")", ":", "url", "=", "\"{0}/{1}/result\"", ".", "format", "(", "self", ".", "saved_query_url", ",", "query_name", ")", "response", "=", "self", ".", "_get_json", "(", "HTTPMethods", ".", "GET", ",", "url",...
Gets a single saved query with a 'result' object for a project from the Keen IO API given a query name. Read or Master key must be set.
[ "Gets", "a", "single", "saved", "query", "with", "a", "result", "object", "for", "a", "project", "from", "the", "Keen", "IO", "API", "given", "a", "query", "name", ".", "Read", "or", "Master", "key", "must", "be", "set", "." ]
python
train
34.545455
google/grr
grr/server/grr_response_server/aff4_objects/filestore.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/filestore.py#L282-L336
def _HashFile(self, fd): """Look for the required hashes in the file.""" hashes = data_store_utils.GetFileHashEntry(fd) if hashes: found_all = True for fingerprint_type, hash_types in iteritems(self.HASH_TYPES): for hash_type in hash_types: if fingerprint_type == "pecoff": hash_type = "pecoff_%s" % hash_type if not hashes.HasField(hash_type): found_all = False break if not found_all: break if found_all: return hashes fingerprinter = fingerprint.Fingerprinter(fd) if "generic" in self.HASH_TYPES: hashers = self._GetHashers(self.HASH_TYPES["generic"]) fingerprinter.EvalGeneric(hashers=hashers) if "pecoff" in self.HASH_TYPES: hashers = self._GetHashers(self.HASH_TYPES["pecoff"]) if hashers: fingerprinter.EvalPecoff(hashers=hashers) if not hashes: hashes = fd.Schema.HASH() for result in fingerprinter.HashIt(): fingerprint_type = result["name"] for hash_type in self.HASH_TYPES[fingerprint_type]: if hash_type not in result: continue if hash_type == "SignedData": # There can be several certs in the same file. for signed_data in result[hash_type]: hashes.signed_data.Append( revision=signed_data[0], cert_type=signed_data[1], certificate=signed_data[2]) continue # Set the hashes in the original object if fingerprint_type == "generic": hashes.Set(hash_type, result[hash_type]) elif fingerprint_type == "pecoff": hashes.Set("pecoff_%s" % hash_type, result[hash_type]) else: logging.error("Unknown fingerprint_type %s.", fingerprint_type) return hashes
[ "def", "_HashFile", "(", "self", ",", "fd", ")", ":", "hashes", "=", "data_store_utils", ".", "GetFileHashEntry", "(", "fd", ")", "if", "hashes", ":", "found_all", "=", "True", "for", "fingerprint_type", ",", "hash_types", "in", "iteritems", "(", "self", "...
Look for the required hashes in the file.
[ "Look", "for", "the", "required", "hashes", "in", "the", "file", "." ]
python
train
32.436364
seanpar203/event-bus
event_bus/bus.py
https://github.com/seanpar203/event-bus/blob/60319b9eb4e38c348e80f3ec625312eda75da765/event_bus/bus.py#L73-L92
def on(self, event: str) -> Callable: """ Decorator for subscribing a function to a specific event. :param event: Name of the event to subscribe to. :type event: str :return: The outer function. :rtype: Callable """ def outer(func): self.add_event(func, event) @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper return outer
[ "def", "on", "(", "self", ",", "event", ":", "str", ")", "->", "Callable", ":", "def", "outer", "(", "func", ")", ":", "self", ".", "add_event", "(", "func", ",", "event", ")", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ...
Decorator for subscribing a function to a specific event. :param event: Name of the event to subscribe to. :type event: str :return: The outer function. :rtype: Callable
[ "Decorator", "for", "subscribing", "a", "function", "to", "a", "specific", "event", "." ]
python
train
23.75
project-rig/rig
rig/routing_table/utils.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L108-L198
def table_is_subset_of(entries_a, entries_b): """Check that every key matched by every entry in one table results in the same route when checked against the other table. For example, the table:: >>> from rig.routing_table import Routes >>> table = [ ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf), ... RoutingTableEntry({Routes.east}, 0x1, 0xf), ... RoutingTableEntry({Routes.south_west}, 0x5, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf), ... RoutingTableEntry({Routes.east}, 0x9, 0xf), ... RoutingTableEntry({Routes.south_west}, 0xe, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf), ... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb), ... ] is a functional subset of a minimised version of itself:: >>> from rig.routing_table.ordered_covering import minimise >>> other_table = minimise(table, target_length=None) >>> other_table == table False >>> table_is_subset_of(table, other_table) True But not vice-versa:: >>> table_is_subset_of(other_table, table) False Default routes are taken into account, such that the table:: >>> table = [ ... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}), ... ] is a subset of the empty table:: >>> table_is_subset_of(table, list()) True Parameters ---------- entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Ordered of lists of routing table entries to compare. Returns ------- bool True if every key matched in `entries_a` would result in an equivalent route for the packet when matched in `entries_b`. """ # Determine which bits we don't need to explicitly test for common_xs = get_common_xs(entries_b) # For every entry in the first table for entry in expand_entries(entries_a, ignore_xs=common_xs): # Look at every entry in the second table for other_entry in entries_b: # If the first entry matches the second if other_entry.mask & entry.key == other_entry.key: if other_entry.route == entry.route: # If the route is the same then we move on to the next # entry in the first table. break else: # Otherwise we return false as the tables are different return False else: # If we didn't break out of the loop then the entry from the first # table never matched an entry in the second table. If the entry # from the first table could not be default routed we return False # as the tables cannot be equivalent. default_routed = False if len(entry.route) == 1 and len(entry.sources) == 1: source = next(iter(entry.sources)) sink = next(iter(entry.route)) if (source is not None and sink.is_link and source is sink.opposite): default_routed = True if not default_routed: return False return True
[ "def", "table_is_subset_of", "(", "entries_a", ",", "entries_b", ")", ":", "# Determine which bits we don't need to explicitly test for", "common_xs", "=", "get_common_xs", "(", "entries_b", ")", "# For every entry in the first table", "for", "entry", "in", "expand_entries", ...
Check that every key matched by every entry in one table results in the same route when checked against the other table. For example, the table:: >>> from rig.routing_table import Routes >>> table = [ ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf), ... RoutingTableEntry({Routes.east}, 0x1, 0xf), ... RoutingTableEntry({Routes.south_west}, 0x5, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf), ... RoutingTableEntry({Routes.east}, 0x9, 0xf), ... RoutingTableEntry({Routes.south_west}, 0xe, 0xf), ... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf), ... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb), ... ] is a functional subset of a minimised version of itself:: >>> from rig.routing_table.ordered_covering import minimise >>> other_table = minimise(table, target_length=None) >>> other_table == table False >>> table_is_subset_of(table, other_table) True But not vice-versa:: >>> table_is_subset_of(other_table, table) False Default routes are taken into account, such that the table:: >>> table = [ ... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}), ... ] is a subset of the empty table:: >>> table_is_subset_of(table, list()) True Parameters ---------- entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] Ordered of lists of routing table entries to compare. Returns ------- bool True if every key matched in `entries_a` would result in an equivalent route for the packet when matched in `entries_b`.
[ "Check", "that", "every", "key", "matched", "by", "every", "entry", "in", "one", "table", "results", "in", "the", "same", "route", "when", "checked", "against", "the", "other", "table", "." ]
python
train
37.065934
codelv/enaml-native-cli
enamlnativecli/main.py
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1678-L1706
def _default_ctx(self): """ Return the package config or context and normalize some of the values """ if not self.in_app_directory: print("Warning: {} does not exist. Using the default.".format( self.package)) ctx = {} else: with open(self.package) as f: ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader)) if self.in_app_directory: # Update the env for each platform excluded = list(ctx.get('excluded', [])) for env in [ctx['ios'], ctx['android']]: if 'python_build_dir' not in env: env['python_build_dir'] = expanduser(abspath('build/python')) if 'conda_prefix' not in env: env['conda_prefix'] = os.environ.get( 'CONDA_PREFIX', expanduser(abspath('venv'))) # Join the shared and local exclusions env['excluded'] = list(env.get('excluded', [])) + excluded return ctx
[ "def", "_default_ctx", "(", "self", ")", ":", "if", "not", "self", ".", "in_app_directory", ":", "print", "(", "\"Warning: {} does not exist. Using the default.\"", ".", "format", "(", "self", ".", "package", ")", ")", "ctx", "=", "{", "}", "else", ":", "wit...
Return the package config or context and normalize some of the values
[ "Return", "the", "package", "config", "or", "context", "and", "normalize", "some", "of", "the", "values" ]
python
train
36.034483
JoelBender/bacpypes
py25/bacpypes/constructeddata.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/constructeddata.py#L1470-L1481
def cast_in(self, element): """encode the element into the internal tag list.""" if _debug: SequenceOfAny._debug("cast_in %r", element) # make sure it is a list if not isinstance(element, List): raise EncodingError("%r is not a list" % (element,)) t = TagList() element.encode(t) self.tagList.extend(t.tagList)
[ "def", "cast_in", "(", "self", ",", "element", ")", ":", "if", "_debug", ":", "SequenceOfAny", ".", "_debug", "(", "\"cast_in %r\"", ",", "element", ")", "# make sure it is a list", "if", "not", "isinstance", "(", "element", ",", "List", ")", ":", "raise", ...
encode the element into the internal tag list.
[ "encode", "the", "element", "into", "the", "internal", "tag", "list", "." ]
python
train
30.833333
theelous3/asks
asks/request_object.py
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L710-L721
async def _body_callback(self, h11_connection): ''' A callback func to be supplied if the user wants to do something directly with the response body's stream. ''' # pylint: disable=not-callable while True: next_event = await self._recv_event(h11_connection) if isinstance(next_event, h11.Data): await self.callback(next_event.data) else: return next_event
[ "async", "def", "_body_callback", "(", "self", ",", "h11_connection", ")", ":", "# pylint: disable=not-callable", "while", "True", ":", "next_event", "=", "await", "self", ".", "_recv_event", "(", "h11_connection", ")", "if", "isinstance", "(", "next_event", ",", ...
A callback func to be supplied if the user wants to do something directly with the response body's stream.
[ "A", "callback", "func", "to", "be", "supplied", "if", "the", "user", "wants", "to", "do", "something", "directly", "with", "the", "response", "body", "s", "stream", "." ]
python
train
38.333333
bram85/topydo
topydo/ui/columns/TodoWidget.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/TodoWidget.py#L164-L195
def create(p_class, p_todo, p_id_width=4): """ Creates a TodoWidget instance for the given todo. Widgets are cached, the same object is returned for the same todo item. """ def parent_progress_may_have_changed(p_todo): """ Returns True when a todo's progress should be updated because it is dependent on the parent's progress. """ return p_todo.has_tag('p') and not p_todo.has_tag('due') source = p_todo.source() if source in p_class.cache: widget = p_class.cache[source] if p_todo is not widget.todo: # same source text but different todo instance (could happen # after an edit where a new Todo instance is created with the # same text as before) # simply fix the reference in the stored widget. widget.todo = p_todo if parent_progress_may_have_changed(p_todo): widget.update_progress() else: widget = p_class(p_todo, p_id_width) p_class.cache[source] = widget return widget
[ "def", "create", "(", "p_class", ",", "p_todo", ",", "p_id_width", "=", "4", ")", ":", "def", "parent_progress_may_have_changed", "(", "p_todo", ")", ":", "\"\"\"\n Returns True when a todo's progress should be updated because it is\n dependent on the parent...
Creates a TodoWidget instance for the given todo. Widgets are cached, the same object is returned for the same todo item.
[ "Creates", "a", "TodoWidget", "instance", "for", "the", "given", "todo", ".", "Widgets", "are", "cached", "the", "same", "object", "is", "returned", "for", "the", "same", "todo", "item", "." ]
python
train
35.5625
senaite/senaite.core
bika/lims/browser/workflow/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/workflow/analysisrequest.py#L275-L286
def set_printed_time(self, sample): """Updates the printed time of the last results report from the sample """ if api.get_workflow_status_of(sample) != "published": return False reports = sample.objectValues("ARReport") reports = sorted(reports, key=lambda report: report.getDatePublished()) last_report = reports[-1] if not last_report.getDatePrinted(): last_report.setDatePrinted(DateTime()) sample.reindexObject(idxs=["getPrinted"]) return True
[ "def", "set_printed_time", "(", "self", ",", "sample", ")", ":", "if", "api", ".", "get_workflow_status_of", "(", "sample", ")", "!=", "\"published\"", ":", "return", "False", "reports", "=", "sample", ".", "objectValues", "(", "\"ARReport\"", ")", "reports", ...
Updates the printed time of the last results report from the sample
[ "Updates", "the", "printed", "time", "of", "the", "last", "results", "report", "from", "the", "sample" ]
python
train
44.666667
inasafe/inasafe
safe/gui/widgets/field_mapping_widget.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/field_mapping_widget.py#L88-L104
def get_field_mapping(self): """Obtain metadata from current state of the widget. :returns: Dictionary of values by type in this format: {'fields': {}, 'values': {}}. :rtype: dict """ fields = {} values = {} for tab in self.tabs: parameter_values = tab.get_parameter_value() fields.update(parameter_values['fields']) values.update(parameter_values['values']) return { 'fields': fields, 'values': values }
[ "def", "get_field_mapping", "(", "self", ")", ":", "fields", "=", "{", "}", "values", "=", "{", "}", "for", "tab", "in", "self", ".", "tabs", ":", "parameter_values", "=", "tab", ".", "get_parameter_value", "(", ")", "fields", ".", "update", "(", "para...
Obtain metadata from current state of the widget. :returns: Dictionary of values by type in this format: {'fields': {}, 'values': {}}. :rtype: dict
[ "Obtain", "metadata", "from", "current", "state", "of", "the", "widget", "." ]
python
train
31.352941
gtaylor/python-colormath
examples/conversions.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/examples/conversions.py#L17-L30
def example_lab_to_xyz(): """ This function shows a simple conversion of an Lab color to an XYZ color. """ print("=== Simple Example: Lab->XYZ ===") # Instantiate an Lab color object with the given values. lab = LabColor(0.903, 16.296, -2.22) # Show a string representation. print(lab) # Convert to XYZ. xyz = convert_color(lab, XYZColor) print(xyz) print("=== End Example ===\n")
[ "def", "example_lab_to_xyz", "(", ")", ":", "print", "(", "\"=== Simple Example: Lab->XYZ ===\"", ")", "# Instantiate an Lab color object with the given values.", "lab", "=", "LabColor", "(", "0.903", ",", "16.296", ",", "-", "2.22", ")", "# Show a string representation.", ...
This function shows a simple conversion of an Lab color to an XYZ color.
[ "This", "function", "shows", "a", "simple", "conversion", "of", "an", "Lab", "color", "to", "an", "XYZ", "color", "." ]
python
train
29.714286
saltstack/salt
salt/modules/inspectlib/collector.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/collector.py#L126-L152
def __get_cfg_pkgs_rpm(self): ''' Get packages with configuration files on RPM systems. ''' out, err = self._syscall('rpm', None, None, '-qa', '--configfiles', '--queryformat', '%{name}-%{version}-%{release}\\n') data = dict() pkg_name = None pkg_configs = [] out = salt.utils.stringutils.to_str(out) for line in out.split(os.linesep): line = line.strip() if not line: continue if not line.startswith("/"): if pkg_name and pkg_configs: data[pkg_name] = pkg_configs pkg_name = line pkg_configs = [] else: pkg_configs.append(line) if pkg_name and pkg_configs: data[pkg_name] = pkg_configs return data
[ "def", "__get_cfg_pkgs_rpm", "(", "self", ")", ":", "out", ",", "err", "=", "self", ".", "_syscall", "(", "'rpm'", ",", "None", ",", "None", ",", "'-qa'", ",", "'--configfiles'", ",", "'--queryformat'", ",", "'%{name}-%{version}-%{release}\\\\n'", ")", "data",...
Get packages with configuration files on RPM systems.
[ "Get", "packages", "with", "configuration", "files", "on", "RPM", "systems", "." ]
python
train
31.703704
tanghaibao/jcvi
jcvi/apps/gbsubmit.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/gbsubmit.py#L125-L175
def fcs(args): """ %prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter """ p = OptionParser(fcs.__doc__) p.add_option("--cutoff", default=200, help="Skip small components less than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fcsfile, = args cutoff = opts.cutoff fp = open(fcsfile) for row in fp: if row[0] == "#": continue sep = "\t" if "\t" in row else None atoms = row.rstrip().split(sep, 3) contig, length = atoms[:2] length = int(length) label = atoms[-1] label = label.replace(" ", "_") if len(atoms) == 3: ranges = "{0}..{1}".format(1, length) else: assert len(atoms) == 4 ranges = atoms[2] for ab in ranges.split(","): a, b = ab.split("..") a, b = int(a), int(b) assert a <= b ahang = a - 1 bhang = length - b if ahang < cutoff: a = 1 if bhang < cutoff: b = length print("\t".join(str(x) for x in (contig, a - 1, b, label)))
[ "def", "fcs", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fcs", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--cutoff\"", ",", "default", "=", "200", ",", "help", "=", "\"Skip small components less than [default: %default]\"", ")", "opts", ...
%prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter
[ "%prog", "fcs", "fcsfile" ]
python
train
28.156863
ArchiveTeam/wpull
wpull/warc/recorder.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/warc/recorder.py#L595-L623
def _record_revisit(self, payload_offset: int): '''Record the revisit if possible.''' fields = self._response_record.fields ref_record_id = self._url_table.get_revisit_id( fields['WARC-Target-URI'], fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '') ) if ref_record_id: try: self._response_record.block_file.truncate(payload_offset) except TypeError: self._response_record.block_file.seek(0) data = self._response_record.block_file.read(payload_offset) self._response_record.block_file.truncate() self._response_record.block_file.seek(0) self._response_record.block_file.write(data) self._recorder.set_length_and_maybe_checksums( self._response_record ) fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT fields['WARC-Refers-To'] = ref_record_id fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI fields['WARC-Truncated'] = 'length'
[ "def", "_record_revisit", "(", "self", ",", "payload_offset", ":", "int", ")", ":", "fields", "=", "self", ".", "_response_record", ".", "fields", "ref_record_id", "=", "self", ".", "_url_table", ".", "get_revisit_id", "(", "fields", "[", "'WARC-Target-URI'", ...
Record the revisit if possible.
[ "Record", "the", "revisit", "if", "possible", "." ]
python
train
38.137931
mikedh/trimesh
trimesh/path/packing.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/packing.py#L178-L222
def pack_paths(paths, sheet_size=None): """ Pack a list of Path2D objects into a rectangle. Parameters ------------ paths: (n,) Path2D Geometry to be packed Returns ------------ packed : trimesh.path.Path2D Object containing input geometry inserted : (m,) int Indexes of paths inserted into result """ from .util import concatenate if sheet_size is not None: sheet_size = np.sort(sheet_size)[::-1] quantity = [] for path in paths: if 'quantity' in path.metadata: quantity.append(path.metadata['quantity']) else: quantity.append(1) # pack using exterior polygon (will OBB) polygons = [i.polygons_closed[i.root[0]] for i in paths] # pack the polygons using rectangular bin packing inserted, transforms = multipack(polygons=polygons, quantity=quantity, sheet_size=sheet_size) multi = [] for i, T in zip(inserted, transforms): multi.append(paths[i].copy()) multi[-1].apply_transform(T) # append all packed paths into a single Path object packed = concatenate(multi) return packed, inserted
[ "def", "pack_paths", "(", "paths", ",", "sheet_size", "=", "None", ")", ":", "from", ".", "util", "import", "concatenate", "if", "sheet_size", "is", "not", "None", ":", "sheet_size", "=", "np", ".", "sort", "(", "sheet_size", ")", "[", ":", ":", "-", ...
Pack a list of Path2D objects into a rectangle. Parameters ------------ paths: (n,) Path2D Geometry to be packed Returns ------------ packed : trimesh.path.Path2D Object containing input geometry inserted : (m,) int Indexes of paths inserted into result
[ "Pack", "a", "list", "of", "Path2D", "objects", "into", "a", "rectangle", "." ]
python
train
26.622222
humilis/humilis-lambdautils
lambdautils/state.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L395-L401
def get_context(namespace, context_id): """Get stored context object.""" context_obj = get_state(context_id, namespace=namespace) if not context_obj: raise ContextError("Context '{}' not found in namespace '{}'".format( context_id, namespace)) return context_obj
[ "def", "get_context", "(", "namespace", ",", "context_id", ")", ":", "context_obj", "=", "get_state", "(", "context_id", ",", "namespace", "=", "namespace", ")", "if", "not", "context_obj", ":", "raise", "ContextError", "(", "\"Context '{}' not found in namespace '{...
Get stored context object.
[ "Get", "stored", "context", "object", "." ]
python
train
41.714286
openbermuda/ripl
ripl/md2slides.py
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/md2slides.py#L62-L109
def generate_slides(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 # each slide is a dict slide = {} last_heading = 0 for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] indent = item['indent'] # Any heading is the heading for a new slide if heading: if slide and last_heading <= 1: yield slide last_heading = heading rows = [] slide = {} if heading < 2: slide.update(dict( heading = dict(text=line.strip('#')), rows = rows)) continue # Any block with more than one hash is a comment if last_heading > 1: continue if indent == 0 and line: # at a potential image rows.append(self.build_row(line)) else: # Just add the line of text items = [dict(text=(' ' * indent) + line)] rows.append(dict(items=items)) if slide: yield slide
[ "def", "generate_slides", "(", "self", ",", "infile", ")", ":", "state", "=", "0", "# each slide is a dict", "slide", "=", "{", "}", "last_heading", "=", "0", "for", "item", "in", "self", ".", "generate_lines", "(", "infile", ")", ":", "line", "=", "item...
Process a file of rest and yield dictionaries
[ "Process", "a", "file", "of", "rest", "and", "yield", "dictionaries" ]
python
train
25.729167
genialis/resolwe
resolwe/elastic/builder.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/builder.py#L45-L51
def _get_cache_key(self, obj): """Derive cache key for given object.""" if obj is not None: # Make sure that key is REALLY unique. return '{}-{}'.format(id(self), obj.pk) return "{}-None".format(id(self))
[ "def", "_get_cache_key", "(", "self", ",", "obj", ")", ":", "if", "obj", "is", "not", "None", ":", "# Make sure that key is REALLY unique.", "return", "'{}-{}'", ".", "format", "(", "id", "(", "self", ")", ",", "obj", ".", "pk", ")", "return", "\"{}-None\"...
Derive cache key for given object.
[ "Derive", "cache", "key", "for", "given", "object", "." ]
python
train
35.285714
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/msvs.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/msvs.py#L1752-L1856
def projectEmitter(target, source, env): """Sets up the DSP dependencies.""" # todo: Not sure what sets source to what user has passed as target, # but this is what happens. When that is fixed, we also won't have # to make the user always append env['MSVSPROJECTSUFFIX'] to target. if source[0] == target[0]: source = [] # make sure the suffix is correct for the version of MSVS we're running. (base, suff) = SCons.Util.splitext(str(target[0])) suff = env.subst('$MSVSPROJECTSUFFIX') target[0] = base + suff if not source: source = 'prj_inputs:' source = source + env.subst('$MSVSSCONSCOM', 1) source = source + env.subst('$MSVSENCODING', 1) # Project file depends on CPPDEFINES and CPPPATH preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', [])))) includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None) includepath = xmlify(';'.join([str(x) for x in includepath_Dirs])) source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath) if 'buildtarget' in env and env['buildtarget'] != None: if SCons.Util.is_String(env['buildtarget']): source = source + ' "%s"' % env['buildtarget'] elif SCons.Util.is_List(env['buildtarget']): for bt in env['buildtarget']: if SCons.Util.is_String(bt): source = source + ' "%s"' % bt else: try: source = source + ' "%s"' % bt.get_abspath() except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None") else: try: source = source + ' "%s"' % env['buildtarget'].get_abspath() except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None") if 'outdir' in env and env['outdir'] != None: if SCons.Util.is_String(env['outdir']): source = source + ' "%s"' % env['outdir'] elif SCons.Util.is_List(env['outdir']): for s in env['outdir']: if SCons.Util.is_String(s): source = source + ' "%s"' % s else: try: source = source + ' "%s"' % s.get_abspath() except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None") else: try: source = source + ' "%s"' % env['outdir'].get_abspath() except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None") if 'name' in env: if SCons.Util.is_String(env['name']): source = source + ' "%s"' % env['name'] else: raise SCons.Errors.InternalError("name must be a string") if 'variant' in env: if SCons.Util.is_String(env['variant']): source = source + ' "%s"' % env['variant'] elif SCons.Util.is_List(env['variant']): for variant in env['variant']: if SCons.Util.is_String(variant): source = source + ' "%s"' % variant else: raise SCons.Errors.InternalError("name must be a string or a list of strings") else: raise SCons.Errors.InternalError("variant must be a string or a list of strings") else: raise SCons.Errors.InternalError("variant must be specified") for s in _DSPGenerator.srcargs: if s in env: if SCons.Util.is_String(env[s]): source = source + ' "%s' % env[s] elif SCons.Util.is_List(env[s]): for t in env[s]: if SCons.Util.is_String(t): source = source + ' "%s"' % t else: raise SCons.Errors.InternalError(s + " must be a string or a list of strings") else: raise SCons.Errors.InternalError(s + " must be a string or a list of strings") source = source + ' "%s"' % str(target[0]) source = [SCons.Node.Python.Value(source)] targetlist = [target[0]] sourcelist = source if env.get('auto_build_solution', 1): env['projects'] = [env.File(t).srcnode() for t in targetlist] t, s = solutionEmitter(target, target, env) targetlist = targetlist + t # Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters) version_num = 6.0 if 'MSVS_VERSION' in env: version_num, suite = msvs_parse_version(env['MSVS_VERSION']) if version_num >= 10.0: targetlist.append(targetlist[0] + '.filters') return (targetlist, sourcelist)
[ "def", "projectEmitter", "(", "target", ",", "source", ",", "env", ")", ":", "# todo: Not sure what sets source to what user has passed as target,", "# but this is what happens. When that is fixed, we also won't have", "# to make the user always append env['MSVSPROJECTSUFFIX'] to target.", ...
Sets up the DSP dependencies.
[ "Sets", "up", "the", "DSP", "dependencies", "." ]
python
train
47.466667
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/win32/kernel32.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/win32/kernel32.py#L726-L740
def wait(self, dwMilliseconds = None): """ Wait for the Win32 object to be signaled. @type dwMilliseconds: int @param dwMilliseconds: (Optional) Timeout value in milliseconds. Use C{INFINITE} or C{None} for no timeout. """ if self.value is None: raise ValueError("Handle is already closed!") if dwMilliseconds is None: dwMilliseconds = INFINITE r = WaitForSingleObject(self.value, dwMilliseconds) if r != WAIT_OBJECT_0: raise ctypes.WinError(r)
[ "def", "wait", "(", "self", ",", "dwMilliseconds", "=", "None", ")", ":", "if", "self", ".", "value", "is", "None", ":", "raise", "ValueError", "(", "\"Handle is already closed!\"", ")", "if", "dwMilliseconds", "is", "None", ":", "dwMilliseconds", "=", "INFI...
Wait for the Win32 object to be signaled. @type dwMilliseconds: int @param dwMilliseconds: (Optional) Timeout value in milliseconds. Use C{INFINITE} or C{None} for no timeout.
[ "Wait", "for", "the", "Win32", "object", "to", "be", "signaled", "." ]
python
train
36.8
gwastro/pycbc
pycbc/io/record.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L366-L465
def add_fields(input_array, arrays, names=None, assubarray=False): """Adds the given array(s) as new field(s) to the given input array. Returns a new instance of the input_array with the new fields added. Parameters ---------- input_array : instance of a numpy.ndarray or numpy recarray The array to to add the fields to. arrays : (list of) numpy array(s) The arrays to add. If adding multiple arrays, must be a list; if adding a single array, can just be that array. names : (list of) strings Optional, the name(s) of the new fields in the output array. If adding multiple fields, must be a list of strings with the same length as the list of arrays. If None provided, names used will be the same as the name of the datatype in the given arrays. If the datatype has no name, the new field will be ``'fi'`` where i is the index of the array in arrays. assubarray : bool Add the list of arrays as a single subarray field. If True, and names provided, names should be a string or a length-1 sequence. Default is False, in which case each array will be added as a separate field. Returns ------- new_array : new instance of `input_array` A copy of the `input_array` with the desired fields added. """ if not isinstance(arrays, list): arrays = [arrays] # ensure that all arrays in arrays are arrays arrays = _ensure_array_list(arrays) # set the names if names is not None: if isinstance(names, string_types): names = [names] # check if any names are subarray names; if so, we have to add them # separately subarray_names = [name for name in names if len(name.split('.')) > 1] else: subarray_names = [] if any(subarray_names): subarrays = [arrays[ii] for ii,name in enumerate(names) \ if name in subarray_names] # group together by subarray groups = {} for name,arr in zip(subarray_names, subarrays): key = name.split('.')[0] subkey = '.'.join(name.split('.')[1:]) try: groups[key].append((subkey, arr)) except KeyError: groups[key] = [(subkey, arr)] # now cycle over the groups, adding all of the fields in each group # as a subarray for group_name in groups: # we'll create a dictionary out of the subarray field names -> # subarrays thisdict = dict(groups[group_name]) # check if the input array has this field; if so, remove it, then # add it back with the other new arrays if group_name in input_array.fieldnames: # get the data new_subarray = input_array[group_name] # add the new fields to the subarray new_subarray = add_fields(new_subarray, thisdict.values(), thisdict.keys()) # remove the original from the input array input_array = input_array.without_fields(group_name) else: new_subarray = thisdict.values() # add the new subarray to input_array as a subarray input_array = add_fields(input_array, new_subarray, names=group_name, assubarray=True) # set the subarray names input_array[group_name].dtype.names = thisdict.keys() # remove the subarray names from names keep_idx = [ii for ii,name in enumerate(names) \ if name not in subarray_names] names = [names[ii] for ii in keep_idx] # if there's nothing left, just return if names == []: return input_array # also remove the subarray arrays arrays = [arrays[ii] for ii in keep_idx] if assubarray: # merge all of the arrays into a single array if len(arrays) > 1: arrays = [merge_arrays(arrays, flatten=True)] # now merge all the fields as a single subarray merged_arr = numpy.empty(len(arrays[0]), dtype=[('f0', arrays[0].dtype.descr)]) merged_arr['f0'] = arrays[0] arrays = [merged_arr] merge_list = [input_array] + arrays if names is not None: names = list(input_array.dtype.names) + names # merge into a single array return merge_arrays(merge_list, names=names, flatten=True, outtype=type(input_array))
[ "def", "add_fields", "(", "input_array", ",", "arrays", ",", "names", "=", "None", ",", "assubarray", "=", "False", ")", ":", "if", "not", "isinstance", "(", "arrays", ",", "list", ")", ":", "arrays", "=", "[", "arrays", "]", "# ensure that all arrays in a...
Adds the given array(s) as new field(s) to the given input array. Returns a new instance of the input_array with the new fields added. Parameters ---------- input_array : instance of a numpy.ndarray or numpy recarray The array to to add the fields to. arrays : (list of) numpy array(s) The arrays to add. If adding multiple arrays, must be a list; if adding a single array, can just be that array. names : (list of) strings Optional, the name(s) of the new fields in the output array. If adding multiple fields, must be a list of strings with the same length as the list of arrays. If None provided, names used will be the same as the name of the datatype in the given arrays. If the datatype has no name, the new field will be ``'fi'`` where i is the index of the array in arrays. assubarray : bool Add the list of arrays as a single subarray field. If True, and names provided, names should be a string or a length-1 sequence. Default is False, in which case each array will be added as a separate field. Returns ------- new_array : new instance of `input_array` A copy of the `input_array` with the desired fields added.
[ "Adds", "the", "given", "array", "(", "s", ")", "as", "new", "field", "(", "s", ")", "to", "the", "given", "input", "array", ".", "Returns", "a", "new", "instance", "of", "the", "input_array", "with", "the", "new", "fields", "added", "." ]
python
train
44.23
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L1589-L1621
def gpio_set(self, pins, states): """Sets the state for one or more user-controllable GPIOs. For each of the given pins, sets the the corresponding state based on the index. Args: self (JLink): the ``JLink`` instance pins (list): list of GPIO indices states (list): list of states to set Returns: A list of updated states. Raises: JLinkException: on error. ValueError: if ``len(pins) != len(states)`` """ if len(pins) != len(states): raise ValueError('Length mismatch between pins and states.') size = len(pins) indices = (ctypes.c_uint8 * size)(*pins) states = (ctypes.c_uint8 * size)(*states) result_states = (ctypes.c_uint8 * size)() result = self._dll.JLINK_EMU_GPIO_SetState(ctypes.byref(indices), ctypes.byref(states), ctypes.byref(result_states), size) if result < 0: raise errors.JLinkException(result) return list(result_states)
[ "def", "gpio_set", "(", "self", ",", "pins", ",", "states", ")", ":", "if", "len", "(", "pins", ")", "!=", "len", "(", "states", ")", ":", "raise", "ValueError", "(", "'Length mismatch between pins and states.'", ")", "size", "=", "len", "(", "pins", ")"...
Sets the state for one or more user-controllable GPIOs. For each of the given pins, sets the the corresponding state based on the index. Args: self (JLink): the ``JLink`` instance pins (list): list of GPIO indices states (list): list of states to set Returns: A list of updated states. Raises: JLinkException: on error. ValueError: if ``len(pins) != len(states)``
[ "Sets", "the", "state", "for", "one", "or", "more", "user", "-", "controllable", "GPIOs", "." ]
python
train
35.393939
numenta/htmresearch
htmresearch/frameworks/layers/l2_l4_network_creation.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/l2_l4_network_creation.py#L296-L355
def createMultipleL4L2Columns(network, networkConfig): """ Create a network consisting of multiple columns. Each column contains one L4 and one L2, is identical in structure to the network created by createL4L2Column. In addition all the L2 columns are fully connected to each other through their lateral inputs. Region names have a column number appended as in externalInput_0, externalInput_1, etc. networkConfig must be of the following format (see createL4L2Column for further documentation): { "networkType": "MultipleL4L2Columns", "numCorticalColumns": 3, "externalInputSize": 1024, "sensorInputSize": 1024, "L4Params": { <constructor parameters for ApicalTMPairRegion }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } """ # Create each column numCorticalColumns = networkConfig["numCorticalColumns"] for i in xrange(numCorticalColumns): networkConfigCopy = copy.deepcopy(networkConfig) layerConfig = networkConfigCopy["L2Params"] layerConfig["seed"] = layerConfig.get("seed", 42) + i layerConfig["numOtherCorticalColumns"] = numCorticalColumns - 1 suffix = "_" + str(i) network = createL4L2Column(network, networkConfigCopy, suffix) # Now connect the L2 columns laterally for i in range(networkConfig["numCorticalColumns"]): suffixSrc = "_" + str(i) for j in range(networkConfig["numCorticalColumns"]): if i != j: suffixDest = "_" + str(j) network.link( "L2Column" + suffixSrc, "L2Column" + suffixDest, "UniformLink", "", srcOutput="feedForwardOutput", destInput="lateralInput", propagationDelay=1) enableProfiling(network) return network
[ "def", "createMultipleL4L2Columns", "(", "network", ",", "networkConfig", ")", ":", "# Create each column", "numCorticalColumns", "=", "networkConfig", "[", "\"numCorticalColumns\"", "]", "for", "i", "in", "xrange", "(", "numCorticalColumns", ")", ":", "networkConfigCop...
Create a network consisting of multiple columns. Each column contains one L4 and one L2, is identical in structure to the network created by createL4L2Column. In addition all the L2 columns are fully connected to each other through their lateral inputs. Region names have a column number appended as in externalInput_0, externalInput_1, etc. networkConfig must be of the following format (see createL4L2Column for further documentation): { "networkType": "MultipleL4L2Columns", "numCorticalColumns": 3, "externalInputSize": 1024, "sensorInputSize": 1024, "L4Params": { <constructor parameters for ApicalTMPairRegion }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } }
[ "Create", "a", "network", "consisting", "of", "multiple", "columns", ".", "Each", "column", "contains", "one", "L4", "and", "one", "L2", "is", "identical", "in", "structure", "to", "the", "network", "created", "by", "createL4L2Column", ".", "In", "addition", ...
python
train
31.683333
mozilla/amo-validator
validator/contextgenerator.py
https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/contextgenerator.py#L108-L120
def get_line(self, position): 'Returns the line number that the given string position is found on' datalen = len(self.data) count = len(self.data[0]) line = 1 while count < position: if line >= datalen: break count += len(self.data[line]) + 1 line += 1 return line
[ "def", "get_line", "(", "self", ",", "position", ")", ":", "datalen", "=", "len", "(", "self", ".", "data", ")", "count", "=", "len", "(", "self", ".", "data", "[", "0", "]", ")", "line", "=", "1", "while", "count", "<", "position", ":", "if", ...
Returns the line number that the given string position is found on
[ "Returns", "the", "line", "number", "that", "the", "given", "string", "position", "is", "found", "on" ]
python
train
27.230769
F5Networks/f5-common-python
f5/bigip/resource.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/resource.py#L783-L819
def get_collection(self, **kwargs): """Get an iterator of Python ``Resource`` objects that represent URIs. The returned objects are Pythonic `Resource`s that map to the most recently `refreshed` state of uris-resources published by the device. In order to instantiate the correct types, the concrete subclass must populate its registry with acceptable types, based on the `kind` field returned by the REST server. .. note:: This method implies a single REST transaction with the Collection subclass URI. :raises: UnregisteredKind :returns: list of reference dicts and Python ``Resource`` objects """ list_of_contents = [] self.refresh(**kwargs) if 'items' in self.__dict__: for item in self.items: # It's possible to have non-"kind" JSON returned. We just # append the corresponding dict. PostProcessing is the caller's # responsibility. if 'kind' not in item: list_of_contents.append(item) continue kind = item['kind'] if kind in self._meta_data['attribute_registry']: # If it has a kind, it must be registered. instance = self._meta_data['attribute_registry'][kind](self) instance._local_update(item) instance._activate_URI(instance.selfLink) list_of_contents.append(instance) else: error_message = '%r is not registered!' % kind raise UnregisteredKind(error_message) return list_of_contents
[ "def", "get_collection", "(", "self", ",", "*", "*", "kwargs", ")", ":", "list_of_contents", "=", "[", "]", "self", ".", "refresh", "(", "*", "*", "kwargs", ")", "if", "'items'", "in", "self", ".", "__dict__", ":", "for", "item", "in", "self", ".", ...
Get an iterator of Python ``Resource`` objects that represent URIs. The returned objects are Pythonic `Resource`s that map to the most recently `refreshed` state of uris-resources published by the device. In order to instantiate the correct types, the concrete subclass must populate its registry with acceptable types, based on the `kind` field returned by the REST server. .. note:: This method implies a single REST transaction with the Collection subclass URI. :raises: UnregisteredKind :returns: list of reference dicts and Python ``Resource`` objects
[ "Get", "an", "iterator", "of", "Python", "Resource", "objects", "that", "represent", "URIs", "." ]
python
train
45.945946
learningequality/iceqube
src/iceqube/storage/backends/inmem.py
https://github.com/learningequality/iceqube/blob/97ac9e0f65bfedb0efa9f94638bcb57c7926dea2/src/iceqube/storage/backends/inmem.py#L93-L116
def schedule_job(self, j): """ Add the job given by j to the job queue. Note: Does not actually run the job. """ job_id = uuid.uuid4().hex j.job_id = job_id session = self.sessionmaker() orm_job = ORMJob( id=job_id, state=j.state, app=self.app, namespace=self.namespace, obj=j) session.add(orm_job) try: session.commit() except Exception as e: logging.error( "Got an error running session.commit(): {}".format(e)) return job_id
[ "def", "schedule_job", "(", "self", ",", "j", ")", ":", "job_id", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "j", ".", "job_id", "=", "job_id", "session", "=", "self", ".", "sessionmaker", "(", ")", "orm_job", "=", "ORMJob", "(", "id", "=", ...
Add the job given by j to the job queue. Note: Does not actually run the job.
[ "Add", "the", "job", "given", "by", "j", "to", "the", "job", "queue", "." ]
python
train
25.125
openego/eDisGo
edisgo/grid/network.py
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L2035-L2054
def _check_timeindex(self, timeseries): """ Raises an error if time index of storage time series does not comply with the time index of load and feed-in time series. Parameters ----------- timeseries : :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power the storage is charged (negative) and discharged (positive) with in kW in column 'p' and reactive power in kVA in column 'q'. """ try: timeseries.loc[self.edisgo.network.timeseries.timeindex] except: message = 'Time index of storage time series does not match ' \ 'with load and feed-in time series.' logging.error(message) raise KeyError(message)
[ "def", "_check_timeindex", "(", "self", ",", "timeseries", ")", ":", "try", ":", "timeseries", ".", "loc", "[", "self", ".", "edisgo", ".", "network", ".", "timeseries", ".", "timeindex", "]", "except", ":", "message", "=", "'Time index of storage time series ...
Raises an error if time index of storage time series does not comply with the time index of load and feed-in time series. Parameters ----------- timeseries : :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power the storage is charged (negative) and discharged (positive) with in kW in column 'p' and reactive power in kVA in column 'q'.
[ "Raises", "an", "error", "if", "time", "index", "of", "storage", "time", "series", "does", "not", "comply", "with", "the", "time", "index", "of", "load", "and", "feed", "-", "in", "time", "series", "." ]
python
train
39.1
aio-libs/yarl
yarl/__init__.py
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L873-L891
def with_query(self, *args, **kwargs): """Return a new URL with query part replaced. Accepts any Mapping (e.g. dict, multidict.MultiDict instances) or str, autoencode the argument if needed. A sequence of (key, value) pairs is supported as well. It also can take an arbitrary number of keyword arguments. Clear query if None is passed. """ # N.B. doesn't cleanup query/fragment new_query = self._get_str_query(*args, **kwargs) return URL( self._val._replace(path=self._val.path, query=new_query), encoded=True )
[ "def", "with_query", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# N.B. doesn't cleanup query/fragment", "new_query", "=", "self", ".", "_get_str_query", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "URL", "(", "self", ...
Return a new URL with query part replaced. Accepts any Mapping (e.g. dict, multidict.MultiDict instances) or str, autoencode the argument if needed. A sequence of (key, value) pairs is supported as well. It also can take an arbitrary number of keyword arguments. Clear query if None is passed.
[ "Return", "a", "new", "URL", "with", "query", "part", "replaced", "." ]
python
train
31.526316
emory-libraries/eulfedora
eulfedora/views.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/views.py#L175-L360
def raw_datastream_old(request, pid, dsid, type=None, repo=None, headers=None, accept_range_request=False, as_of_date=None, streaming=False): ''' .. NOTE:: This version of :meth:`raw_datastream` is deprecated, and you should update to the new :meth:`raw_datastream`. This version is still available if you are using a version of Fedora prior to 3.7 and need the additional functionality. View to display a raw datastream that belongs to a Fedora Object. Returns an :class:`~django.http.HttpResponse` with the response content populated with the content of the datastream. The following HTTP headers may be included in all the responses: - Content-Type: mimetype of the datastream in Fedora - ETag: datastream checksum, as long as the checksum type is not 'DISABLED' The following HTTP headers may be set if the appropriate content is included in the datastream metadata: - Content-MD5: MD5 checksum of the datastream in Fedora, if available - Content-Length: size of the datastream in Fedora If either the datastream or object are not found, raises an :class:`~django.http.Http404` . For any other errors (e.g., permission denied by Fedora), the exception is re-raised and should be handled elsewhere. :param request: HttpRequest :param pid: Fedora object PID :param dsid: datastream ID to be returned :param type: custom object type (should extend :class:`~eulcore.fedora.models.DigitalObject`) (optional) :param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use, in case your application requires custom repository initialization (optional) :param headers: dictionary of additional headers to include in the response :param accept_range_request: enable HTTP Range requests (disabled by default) :param as_of_date: access a historical version of the datastream :param streaming: if True, response will be returned as an instance of :class:`django.http.StreamingHttpResponse` instead of :class:`django.http.HttpResponse`; intended for use with large datastreams, defaults to False. ''' if repo is None: repo = Repository() if headers is None: headers = {} get_obj_opts = {} if type is not None: get_obj_opts['type'] = type obj = repo.get_object(pid, **get_obj_opts) range_request = False partial_request = False try: # NOTE: we could test that pid is actually the requested # obj.has_requisite_content_models but that would mean # an extra API call for every datastream but RELS-EXT # Leaving out for now, for efficiency ds = obj.getDatastreamObject(dsid, as_of_date=as_of_date) if ds and ds.exists: # because retrieving the content is expensive and checking # headers can be useful, explicitly support HEAD requests if request.method == 'HEAD': content = '' elif accept_range_request and request.META.get('HTTP_RANGE', None) is not None: rng = request.META['HTTP_RANGE'] logger.debug('HTTP Range request: %s', rng) range_request = True kind, numbers = rng.split('=') if kind != 'bytes': return HttpResponseRangeNotSatisfiable() try: start, end = numbers.split('-') # NOTE: could potentially be complicated stuff like # this: 0-999,1002-9999,1-9999 # for now, only support the simple case of a single range except ValueError: return HttpResponseRangeNotSatisfiable() start = int(start) if not end: end = ds.info.size - 1 else: end = int(end) # ignore requests where end is before start if end < start: return HttpResponseRangeNotSatisfiable() if start == end: # safari sends this (weird?); don't 500 partial_length = 0 partial_request = True content = '' # special case for bytes=0- elif start == 0 and end == (ds.info.size - 1): # set chunksize and end so range headers can be set on response # partial_length= ds.info.size partial_length = end - start content = ds.get_chunked_content() # range with *NOT* full content requested elif start != 0 or end != (ds.info.size - 1): partial_request = True partial_length = end - start # chunksize = min(end - start, 4096) # sample chunk 370726-3005759 content = get_range_content(ds, start, end) else: # get the datastream content in chunks, to handle larger datastreams content = ds.get_chunked_content() # not using serialize(pretty=True) for XML/RDF datastreams, since # we actually want the raw datastream content. http_response_class = HttpResponse if streaming: http_response_class = StreamingHttpResponse response = http_response_class(content, content_type=ds.mimetype) # NOTE: might want to use StreamingHttpResponse here, at least # over some size threshold or for range requests # if we have a checksum, use it as an ETag # (but checksum not valid when sending partial content) if ds.checksum_type != 'DISABLED' and not partial_request: response['ETag'] = ds.checksum # ds.created is the creation date of this *version* of the datastream, # so it is effectively our last-modified date response['Last-Modified'] = ds.created # Where available, set content length & MD5 checksum in response headers. # (but checksum not valid when sending partial content) if ds.checksum_type == 'MD5' and not partial_request: response['Content-MD5'] = ds.checksum if ds.info.size and not range_request: response['Content-Length'] = ds.info.size if ds.info.size and accept_range_request: response['Accept-Ranges'] = 'bytes' # response['Content-Range'] = '0,%d/%d' % (ds.info.size, ds.info.size) # if partial request, status should be 206 (even for whole file?) if range_request: response.status_code = 206 if partial_request: response['Content-Length'] = partial_length else: response['Content-Length'] = ds.info.size cont_range = 'bytes %d-%d/%d' % (start, end, ds.info.size) response['Content-Range'] = cont_range logger.debug('Content-Length=%s Content-Range=%s', partial_length, cont_range) # set any user-specified headers that were passed in for header, val in six.iteritems(headers): response[header] = val # Fix for old Fedora data bug where the `Content-Length` # was -1. IF it is -1 we're just going to get rid of it. # Setting the value to an arbitrary value led to issues. if int(response['Content-Length']) < 0: del response['Content-Length'] return response else: raise Http404 except RequestFailed as rf: # if object is not the speficied type or if either the object # or the requested datastream doesn't exist, 404 if rf.code == 404 or \ (type is not None and not obj.has_requisite_content_models) or \ not getattr(obj, dsid).exists or not obj.exists: raise Http404 # for anything else, re-raise & let Django's default 500 logic handle it raise
[ "def", "raw_datastream_old", "(", "request", ",", "pid", ",", "dsid", ",", "type", "=", "None", ",", "repo", "=", "None", ",", "headers", "=", "None", ",", "accept_range_request", "=", "False", ",", "as_of_date", "=", "None", ",", "streaming", "=", "Fals...
.. NOTE:: This version of :meth:`raw_datastream` is deprecated, and you should update to the new :meth:`raw_datastream`. This version is still available if you are using a version of Fedora prior to 3.7 and need the additional functionality. View to display a raw datastream that belongs to a Fedora Object. Returns an :class:`~django.http.HttpResponse` with the response content populated with the content of the datastream. The following HTTP headers may be included in all the responses: - Content-Type: mimetype of the datastream in Fedora - ETag: datastream checksum, as long as the checksum type is not 'DISABLED' The following HTTP headers may be set if the appropriate content is included in the datastream metadata: - Content-MD5: MD5 checksum of the datastream in Fedora, if available - Content-Length: size of the datastream in Fedora If either the datastream or object are not found, raises an :class:`~django.http.Http404` . For any other errors (e.g., permission denied by Fedora), the exception is re-raised and should be handled elsewhere. :param request: HttpRequest :param pid: Fedora object PID :param dsid: datastream ID to be returned :param type: custom object type (should extend :class:`~eulcore.fedora.models.DigitalObject`) (optional) :param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use, in case your application requires custom repository initialization (optional) :param headers: dictionary of additional headers to include in the response :param accept_range_request: enable HTTP Range requests (disabled by default) :param as_of_date: access a historical version of the datastream :param streaming: if True, response will be returned as an instance of :class:`django.http.StreamingHttpResponse` instead of :class:`django.http.HttpResponse`; intended for use with large datastreams, defaults to False.
[ "..", "NOTE", "::" ]
python
train
43.650538
openstax/cnx-easybake
cnxeasybake/oven.py
https://github.com/openstax/cnx-easybake/blob/f8edf018fb7499f6f18af0145c326b93a737a782/cnxeasybake/oven.py#L709-L821
def eval_string_value(self, element, value): """Evaluate parsed string. Returns a list of current and delayed values. """ strval = '' vals = [] for term in value: if type(term) is ast.WhitespaceToken: pass elif type(term) is ast.StringToken: strval += term.value elif type(term) is ast.IdentToken: log(DEBUG, u"IdentToken as string: {}".format( term.value).encode('utf-8')) strval += term.value elif type(term) is ast.LiteralToken: log(DEBUG, u"LiteralToken as string: {}".format( term.value).encode('utf-8')) strval += term.value elif type(term) is ast.FunctionBlock: if term.name == 'string': str_args = split(term.arguments, ',') str_name = self.eval_string_value(element, str_args[0])[0] val = self.lookup('strings', str_name) if val == '': if len(str_args) > 1: val = self.eval_string_value(element, str_args[1])[0] else: log(WARN, u"{} blank string" .format(str_name).encode('utf-8')) strval += val elif term.name == u'attr': att_args = split(term.arguments, ',') att_name = self.eval_string_value(element, att_args[0])[0] att_def = '' if len(att_args) > 1: att_def = self.eval_string_value(element, att_args[1])[0] if '|' in att_name: ns, att = att_name.split('|') try: ns = self.css_namespaces[ns] except KeyError: log(WARN, u"Undefined namespace prefix {}" .format(ns).encode('utf-8')) continue att_name = etree.QName(ns, att) strval += element.etree_element.get(att_name, att_def) elif term.name == u'uuid': strval += self.generate_id() elif term.name == u'content': strval += etree.tostring(element.etree_element, encoding='unicode', method='text', with_tail=False) elif term.name.startswith('target-'): if strval: vals.append(strval) strval = '' target_args = split(term.arguments, ',') vref = self.eval_string_value(element, target_args[0])[0] vname = self.eval_string_value(element, target_args[1])[0] vtype = term.name[7:]+'s' vals.append(TargetVal(self, vref[1:], vname, vtype)) elif term.name == u'first-letter': tmpstr = self.eval_string_value(element, term.arguments) if tmpstr: if isinstance(tmpstr[0], basestring): strval += tmpstr[0][0] else: log(WARN, u"Bad string value:" u" nested target-* not allowed. " u"{}".format( serialize(value)).encode( 'utf-8')) # FIXME can we do delayed first-letter elif term.name == 'counter': counterargs = [serialize(t).strip(" \'") for t in split(term.arguments, ',')] count = self.lookup('counters', counterargs) strval += str(count) elif term.name == u'pending': log(WARN, u"Bad string value: pending() not allowed. " u"{}".format(serialize(value)).encode( 'utf-8')) else: log(WARN, u"Bad string value: unknown function: {}. " u"{}".format(term.name, serialize(value)).encode( 'utf-8')) if strval: vals.append(strval) return vals
[ "def", "eval_string_value", "(", "self", ",", "element", ",", "value", ")", ":", "strval", "=", "''", "vals", "=", "[", "]", "for", "term", "in", "value", ":", "if", "type", "(", "term", ")", "is", "ast", ".", "WhitespaceToken", ":", "pass", "elif", ...
Evaluate parsed string. Returns a list of current and delayed values.
[ "Evaluate", "parsed", "string", "." ]
python
train
42.654867
alimanfoo/csvvalidator
csvvalidator.py
https://github.com/alimanfoo/csvvalidator/blob/50a86eefdc549c48f65a91a5c0a66099010ee65d/csvvalidator.py#L943-L955
def match_pattern(regex): """ Return a value check function which raises a ValueError if the value does not match the supplied regular expression, see also `re.match`. """ prog = re.compile(regex) def checker(v): result = prog.match(v) if result is None: raise ValueError(v) return checker
[ "def", "match_pattern", "(", "regex", ")", ":", "prog", "=", "re", ".", "compile", "(", "regex", ")", "def", "checker", "(", "v", ")", ":", "result", "=", "prog", ".", "match", "(", "v", ")", "if", "result", "is", "None", ":", "raise", "ValueError"...
Return a value check function which raises a ValueError if the value does not match the supplied regular expression, see also `re.match`.
[ "Return", "a", "value", "check", "function", "which", "raises", "a", "ValueError", "if", "the", "value", "does", "not", "match", "the", "supplied", "regular", "expression", "see", "also", "re", ".", "match", "." ]
python
valid
25.769231