repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L377-L429
def _cursor_helper(self, document_fields, before, start): """Set values to be used for a ``start_at`` or ``end_at`` cursor. The values will later be used in a query protobuf. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. before (bool): Flag indicating if the document in ``document_fields`` should (:data:`False`) or shouldn't (:data:`True`) be included in the result set. start (Optional[bool]): determines if the cursor is a ``start_at`` cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`). Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "start at" cursor. """ if isinstance(document_fields, tuple): document_fields = list(document_fields) elif isinstance(document_fields, document.DocumentSnapshot): if document_fields.reference._path[:-1] != self._parent._path: raise ValueError( "Cannot use snapshot from another collection as a cursor." ) else: # NOTE: We copy so that the caller can't modify after calling. document_fields = copy.deepcopy(document_fields) cursor_pair = document_fields, before query_kwargs = { "projection": self._projection, "field_filters": self._field_filters, "orders": self._orders, "limit": self._limit, "offset": self._offset, } if start: query_kwargs["start_at"] = cursor_pair query_kwargs["end_at"] = self._end_at else: query_kwargs["start_at"] = self._start_at query_kwargs["end_at"] = cursor_pair return self.__class__(self._parent, **query_kwargs)
[ "def", "_cursor_helper", "(", "self", ",", "document_fields", ",", "before", ",", "start", ")", ":", "if", "isinstance", "(", "document_fields", ",", "tuple", ")", ":", "document_fields", "=", "list", "(", "document_fields", ")", "elif", "isinstance", "(", "...
Set values to be used for a ``start_at`` or ``end_at`` cursor. The values will later be used in a query protobuf. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. before (bool): Flag indicating if the document in ``document_fields`` should (:data:`False`) or shouldn't (:data:`True`) be included in the result set. start (Optional[bool]): determines if the cursor is a ``start_at`` cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`). Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "start at" cursor.
[ "Set", "values", "to", "be", "used", "for", "a", "start_at", "or", "end_at", "cursor", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/rbac_authorization_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/rbac_authorization_v1_api.py#L143-L166
def create_cluster_role_binding(self, body, **kwargs): # noqa: E501 """create_cluster_role_binding # noqa: E501 create a ClusterRoleBinding # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_cluster_role_binding(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1ClusterRoleBinding body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ClusterRoleBinding If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_cluster_role_binding_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_cluster_role_binding_with_http_info(body, **kwargs) # noqa: E501 return data
[ "def", "create_cluster_role_binding", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", ...
create_cluster_role_binding # noqa: E501 create a ClusterRoleBinding # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_cluster_role_binding(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1ClusterRoleBinding body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ClusterRoleBinding If the method is called asynchronously, returns the request thread.
[ "create_cluster_role_binding", "#", "noqa", ":", "E501" ]
python
train
hadrianl/huobi
huobitrade/service.py
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L729-L747
def get_last_depth(self, symbol, _type): """ 获取marketdepth :param symbol :param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 } :return: """ params = {'symbol': symbol, 'type': _type} url = u.MARKET_URL + '/market/depth' def _wrapper(_func): @wraps(_func) def handle(): _func(http_get_request(url, params)) return handle return _wrapper
[ "def", "get_last_depth", "(", "self", ",", "symbol", ",", "_type", ")", ":", "params", "=", "{", "'symbol'", ":", "symbol", ",", "'type'", ":", "_type", "}", "url", "=", "u", ".", "MARKET_URL", "+", "'/market/depth'", "def", "_wrapper", "(", "_func", "...
获取marketdepth :param symbol :param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 } :return:
[ "获取marketdepth", ":", "param", "symbol", ":", "param", "type", ":", "可选值:", "{", "percent10", "step0", "step1", "step2", "step3", "step4", "step5", "}", ":", "return", ":" ]
python
train
developmentseed/landsat-util
landsat/mixins.py
https://github.com/developmentseed/landsat-util/blob/92dc81771ddaa64a8a9124a89a6516b52485374b/landsat/mixins.py#L102-L115
def _print(self, msg, color=None, arrow=False, indent=None): """ Print the msg with the color provided. """ if color: msg = colored(msg, color) if arrow: msg = colored('===> ', 'blue') + msg if indent: msg = (' ' * indent) + msg print(msg) return msg
[ "def", "_print", "(", "self", ",", "msg", ",", "color", "=", "None", ",", "arrow", "=", "False", ",", "indent", "=", "None", ")", ":", "if", "color", ":", "msg", "=", "colored", "(", "msg", ",", "color", ")", "if", "arrow", ":", "msg", "=", "co...
Print the msg with the color provided.
[ "Print", "the", "msg", "with", "the", "color", "provided", "." ]
python
train
google/apitools
apitools/gen/util.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L137-L142
def MethodName(self, name, separator='_'): """Generate a valid method name from name.""" if name is None: return None name = Names.__ToCamel(name, separator=separator) return Names.CleanName(name)
[ "def", "MethodName", "(", "self", ",", "name", ",", "separator", "=", "'_'", ")", ":", "if", "name", "is", "None", ":", "return", "None", "name", "=", "Names", ".", "__ToCamel", "(", "name", ",", "separator", "=", "separator", ")", "return", "Names", ...
Generate a valid method name from name.
[ "Generate", "a", "valid", "method", "name", "from", "name", "." ]
python
train
Anaconda-Platform/anaconda-client
binstar_client/inspect_package/conda.py
https://github.com/Anaconda-Platform/anaconda-client/blob/b276f0572744c73c184a8b43a897cfa7fc1dc523/binstar_client/inspect_package/conda.py#L59-L77
def get_subdir(index): """ Return the sub-directory given the index dictionary. The return value is obtained in the following order: 1. when the 'subdir' key exists, it's value is returned 2. if the 'arch' is None, or does not exist, 'noarch' is returned 3. otherwise, the return value is constructed from the 'platform' key and the 'arch' key (where 'x86' is replaced by '32', and 'x86_64' by '64') """ try: return index['subdir'] except KeyError: arch = index.get('arch') if arch is None: return 'noarch' intel_map = {'x86': '32', 'x86_64': '64'} return '%s-%s' % (index.get('platform'), intel_map.get(arch, arch))
[ "def", "get_subdir", "(", "index", ")", ":", "try", ":", "return", "index", "[", "'subdir'", "]", "except", "KeyError", ":", "arch", "=", "index", ".", "get", "(", "'arch'", ")", "if", "arch", "is", "None", ":", "return", "'noarch'", "intel_map", "=", ...
Return the sub-directory given the index dictionary. The return value is obtained in the following order: 1. when the 'subdir' key exists, it's value is returned 2. if the 'arch' is None, or does not exist, 'noarch' is returned 3. otherwise, the return value is constructed from the 'platform' key and the 'arch' key (where 'x86' is replaced by '32', and 'x86_64' by '64')
[ "Return", "the", "sub", "-", "directory", "given", "the", "index", "dictionary", ".", "The", "return", "value", "is", "obtained", "in", "the", "following", "order", ":" ]
python
train
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L402-L414
def create_domain(self, service_id, version_number, name, comment=None): """Create a domain for a particular service and version.""" body = self._formdata({ "name": name, "comment": comment, }, FastlyDomain.FIELDS) content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body) return FastlyDomain(self, content)
[ "def", "create_domain", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ",", "comment", "=", "None", ")", ":", "body", "=", "self", ".", "_formdata", "(", "{", "\"name\"", ":", "name", ",", "\"comment\"", ":", "comment", ",", "}", "...
Create a domain for a particular service and version.
[ "Create", "a", "domain", "for", "a", "particular", "service", "and", "version", "." ]
python
train
sci-bots/serial-device
serial_device/mqtt.py
https://github.com/sci-bots/serial-device/blob/5de1c3fc447ae829b57d80073ec6ac4fba3283c6/serial_device/mqtt.py#L108-L138
def on_message(self, client, userdata, msg): ''' Callback for when a ``PUBLISH`` message is received from the broker. ''' if msg.topic == 'serial_device/refresh_comports': self.refresh_comports() return match = CRE_MANAGER.match(msg.topic) if match is None: logger.debug('Topic NOT matched: `%s`', msg.topic) else: logger.debug('Topic matched: `%s`', msg.topic) # Message topic matches command. Handle request. command = match.group('command') port = match.group('port') # serial_device/<port>/send # Bytes to send if command == 'send': self._serial_send(port, msg.payload) elif command == 'connect': # serial_device/<port>/connect # Request connection try: request = json.loads(msg.payload) except ValueError as exception: logger.error('Error decoding "%s (%s)" request: %s', command, port, exception) return self._serial_connect(port, request) elif command == 'close': self._serial_close(port)
[ "def", "on_message", "(", "self", ",", "client", ",", "userdata", ",", "msg", ")", ":", "if", "msg", ".", "topic", "==", "'serial_device/refresh_comports'", ":", "self", ".", "refresh_comports", "(", ")", "return", "match", "=", "CRE_MANAGER", ".", "match", ...
Callback for when a ``PUBLISH`` message is received from the broker.
[ "Callback", "for", "when", "a", "PUBLISH", "message", "is", "received", "from", "the", "broker", "." ]
python
train
matllubos/django-is-core
is_core/utils/__init__.py
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/__init__.py#L284-L298
def get_url_from_model_core(request, obj): """ Returns object URL from model core. """ from is_core.site import get_model_core model_core = get_model_core(obj.__class__) if model_core and hasattr(model_core, 'ui_patterns'): edit_pattern = model_core.ui_patterns.get('detail') return ( edit_pattern.get_url_string(request, obj=obj) if edit_pattern and edit_pattern.has_permission('get', request, obj=obj) else None ) else: return None
[ "def", "get_url_from_model_core", "(", "request", ",", "obj", ")", ":", "from", "is_core", ".", "site", "import", "get_model_core", "model_core", "=", "get_model_core", "(", "obj", ".", "__class__", ")", "if", "model_core", "and", "hasattr", "(", "model_core", ...
Returns object URL from model core.
[ "Returns", "object", "URL", "from", "model", "core", "." ]
python
train
MolSSI-BSE/basis_set_exchange
basis_set_exchange/api.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/api.py#L539-L546
def has_basis_notes(family, data_dir=None): '''Check if notes exist for a given basis set Returns True if they exist, false otherwise ''' file_path = _basis_notes_path(family, data_dir) return os.path.isfile(file_path)
[ "def", "has_basis_notes", "(", "family", ",", "data_dir", "=", "None", ")", ":", "file_path", "=", "_basis_notes_path", "(", "family", ",", "data_dir", ")", "return", "os", ".", "path", ".", "isfile", "(", "file_path", ")" ]
Check if notes exist for a given basis set Returns True if they exist, false otherwise
[ "Check", "if", "notes", "exist", "for", "a", "given", "basis", "set" ]
python
train
AoiKuiyuyou/AoikLiveReload
src/aoiklivereload/aoiklivereload.py
https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/src/aoiklivereload/aoiklivereload.py#L263-L315
def _find_short_paths(self, paths): """ Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths. """ # Split each path to parts. # E.g. '/home/aoik' to ['', 'home', 'aoik'] path_parts_s = [path.split(os.path.sep) for path in paths] # Root node root_node = {} # Sort these path parts by length, with the longest being the first. # # Longer paths appear first so that their extra parts are discarded # when a shorter path is found at 5TQ8L. # # Then for each path's parts. for parts in sorted(path_parts_s, key=len, reverse=True): # Start from the root node node = root_node # For each part of the path for part in parts: # Create node of the path node = node.setdefault(part, {}) # 5TQ8L # Clear the last path part's node's child nodes. # # This aims to keep only the shortest path that needs be watched. # node.clear() # Short paths short_path_s = set() # Collect leaf paths self._collect_leaf_paths( node=root_node, path_parts=(), leaf_paths=short_path_s, ) # Return short paths return short_path_s
[ "def", "_find_short_paths", "(", "self", ",", "paths", ")", ":", "# Split each path to parts.", "# E.g. '/home/aoik' to ['', 'home', 'aoik']", "path_parts_s", "=", "[", "path", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "for", "path", "in", "paths", ...
Find short paths of given paths. E.g. if both `/home` and `/home/aoik` exist, only keep `/home`. :param paths: Paths. :return: Set of short paths.
[ "Find", "short", "paths", "of", "given", "paths", "." ]
python
train
CodeReclaimers/neat-python
neat/genome.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/genome.py#L479-L488
def connect_fs_neat_nohidden(self, config): """ Randomly connect one input to all output nodes (FS-NEAT without connections to hidden, if any). Originally connect_fs_neat. """ input_id = choice(config.input_keys) for output_id in config.output_keys: connection = self.create_connection(config, input_id, output_id) self.connections[connection.key] = connection
[ "def", "connect_fs_neat_nohidden", "(", "self", ",", "config", ")", ":", "input_id", "=", "choice", "(", "config", ".", "input_keys", ")", "for", "output_id", "in", "config", ".", "output_keys", ":", "connection", "=", "self", ".", "create_connection", "(", ...
Randomly connect one input to all output nodes (FS-NEAT without connections to hidden, if any). Originally connect_fs_neat.
[ "Randomly", "connect", "one", "input", "to", "all", "output", "nodes", "(", "FS", "-", "NEAT", "without", "connections", "to", "hidden", "if", "any", ")", ".", "Originally", "connect_fs_neat", "." ]
python
train
pulseenergy/vacation
vacation/transactions.py
https://github.com/pulseenergy/vacation/blob/23c6122590852a5e55d84d366143469af6602839/vacation/transactions.py#L20-L41
def execute(tokens): """ Perform the actions described by the input tokens. """ if not validate_rc(): print('Your .vacationrc file has errors!') echo_vacation_rc() return for action, value in tokens: if action == 'show': show() elif action == 'log': log_vacation_days() elif action == 'echo': echo_vacation_rc() elif action == 'take': take(value) elif action == 'cancel': cancel(value) elif action == 'setrate': setrate(value) elif action == 'setdays': setdays(value)
[ "def", "execute", "(", "tokens", ")", ":", "if", "not", "validate_rc", "(", ")", ":", "print", "(", "'Your .vacationrc file has errors!'", ")", "echo_vacation_rc", "(", ")", "return", "for", "action", ",", "value", "in", "tokens", ":", "if", "action", "==", ...
Perform the actions described by the input tokens.
[ "Perform", "the", "actions", "described", "by", "the", "input", "tokens", "." ]
python
train
baguette-io/baguette-messaging
farine/mixins.py
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/mixins.py#L50-L64
def database(self): """ Before the callback is called, initialize the database if needed. :rtype: None """ #1. Initialize self.callback.im_self.db = sql.setup(self.settings) if self.callback.im_self.db: module = '.'.join(self.callback.im_self.__module__.split('.')[:-1]) sql.init(module, self.callback.im_self.db) self.callback.im_self.db.connect() yield #2. Cleanup if self.callback.im_self.db: self.callback.im_self.db.close()
[ "def", "database", "(", "self", ")", ":", "#1. Initialize", "self", ".", "callback", ".", "im_self", ".", "db", "=", "sql", ".", "setup", "(", "self", ".", "settings", ")", "if", "self", ".", "callback", ".", "im_self", ".", "db", ":", "module", "=",...
Before the callback is called, initialize the database if needed. :rtype: None
[ "Before", "the", "callback", "is", "called", "initialize", "the", "database", "if", "needed", ".", ":", "rtype", ":", "None" ]
python
train
inveniosoftware/invenio-pages
invenio_pages/admin.py
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/admin.py#L64-L69
def template_exists(form, field): """Form validation: check that selected template exists.""" try: current_app.jinja_env.get_template(field.data) except TemplateNotFound: raise ValidationError(_("Template selected does not exist"))
[ "def", "template_exists", "(", "form", ",", "field", ")", ":", "try", ":", "current_app", ".", "jinja_env", ".", "get_template", "(", "field", ".", "data", ")", "except", "TemplateNotFound", ":", "raise", "ValidationError", "(", "_", "(", "\"Template selected ...
Form validation: check that selected template exists.
[ "Form", "validation", ":", "check", "that", "selected", "template", "exists", "." ]
python
train
vsergeev/python-periphery
periphery/serial.py
https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L281-L298
def input_waiting(self): """Query the number of bytes waiting to be read from the serial port. Returns: int: number of bytes waiting to be read. Raises: SerialError: if an I/O or OS error occurs. """ # Get input waiting buf = array.array('I', [0]) try: fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True) except OSError as e: raise SerialError(e.errno, "Querying input waiting: " + e.strerror) return buf[0]
[ "def", "input_waiting", "(", "self", ")", ":", "# Get input waiting", "buf", "=", "array", ".", "array", "(", "'I'", ",", "[", "0", "]", ")", "try", ":", "fcntl", ".", "ioctl", "(", "self", ".", "_fd", ",", "termios", ".", "TIOCINQ", ",", "buf", ",...
Query the number of bytes waiting to be read from the serial port. Returns: int: number of bytes waiting to be read. Raises: SerialError: if an I/O or OS error occurs.
[ "Query", "the", "number", "of", "bytes", "waiting", "to", "be", "read", "from", "the", "serial", "port", "." ]
python
train
linkhub-sdk/popbill.py
popbill/base.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/base.py#L140-L151
def getAccessURL(self, CorpNum, UserID): """ 팝빌 로그인 URL args CorpNum : 회원 사업자번호 UserID : 회원 팝빌아이디 return 30초 보안 토큰을 포함한 url raise PopbillException """ result = self._httpget('/?TG=LOGIN', CorpNum, UserID) return result.url
[ "def", "getAccessURL", "(", "self", ",", "CorpNum", ",", "UserID", ")", ":", "result", "=", "self", ".", "_httpget", "(", "'/?TG=LOGIN'", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "url" ]
팝빌 로그인 URL args CorpNum : 회원 사업자번호 UserID : 회원 팝빌아이디 return 30초 보안 토큰을 포함한 url raise PopbillException
[ "팝빌", "로그인", "URL", "args", "CorpNum", ":", "회원", "사업자번호", "UserID", ":", "회원", "팝빌아이디", "return", "30초", "보안", "토큰을", "포함한", "url", "raise", "PopbillException" ]
python
train
osrg/ryu
ryu/services/protocols/bgp/utils/rtfilter.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/utils/rtfilter.py#L120-L138
def update_local_rt_nlris(self): """Does book-keeping of local RT NLRIs based on all configured VRFs. Syncs all import RTs and RT NLRIs. The method should be called when any VRFs are added/removed/changed. """ current_conf_import_rts = set() for vrf in self._vrfs_conf.vrf_confs: current_conf_import_rts.update(vrf.import_rts) removed_rts = self._all_vrfs_import_rts_set - current_conf_import_rts new_rts = current_conf_import_rts - self._all_vrfs_import_rts_set self._all_vrfs_import_rts_set = current_conf_import_rts # Add new and withdraw removed local RtNlris for new_rt in new_rts: self.add_rt_nlri(new_rt) for removed_rt in removed_rts: self.add_rt_nlri(removed_rt, is_withdraw=True)
[ "def", "update_local_rt_nlris", "(", "self", ")", ":", "current_conf_import_rts", "=", "set", "(", ")", "for", "vrf", "in", "self", ".", "_vrfs_conf", ".", "vrf_confs", ":", "current_conf_import_rts", ".", "update", "(", "vrf", ".", "import_rts", ")", "removed...
Does book-keeping of local RT NLRIs based on all configured VRFs. Syncs all import RTs and RT NLRIs. The method should be called when any VRFs are added/removed/changed.
[ "Does", "book", "-", "keeping", "of", "local", "RT", "NLRIs", "based", "on", "all", "configured", "VRFs", "." ]
python
train
gumblex/zhconv
zhconv/zhconv.py
https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L449-L475
def main(): """ Simple stdin/stdout interface. """ if len(sys.argv) == 2 and sys.argv[1] in Locales: locale = sys.argv[1] convertfunc = convert elif len(sys.argv) == 3 and sys.argv[1] == '-w' and sys.argv[2] in Locales: locale = sys.argv[2] convertfunc = convert_for_mw else: thisfile = __file__ if __name__ == '__main__' else 'python -mzhconv' print("usage: %s [-w] {zh-cn|zh-tw|zh-hk|zh-sg|zh-hans|zh-hant|zh} < input > output" % thisfile) sys.exit(1) loaddict() ln = sys.stdin.readline() while ln: l = ln.rstrip('\r\n') if sys.version_info[0] < 3: l = unicode(l, 'utf-8') res = convertfunc(l, locale) if sys.version_info[0] < 3: print(res.encode('utf-8')) else: print(res) ln = sys.stdin.readline()
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "==", "2", "and", "sys", ".", "argv", "[", "1", "]", "in", "Locales", ":", "locale", "=", "sys", ".", "argv", "[", "1", "]", "convertfunc", "=", "convert", "elif", "len", ...
Simple stdin/stdout interface.
[ "Simple", "stdin", "/", "stdout", "interface", "." ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L977-L979
def p_expr_function(p): 'expr : FUNCTION is_reference LPAREN parameter_list RPAREN lexical_vars LBRACE inner_statement_list RBRACE' p[0] = ast.Closure(p[4], p[6], p[8], p[2], lineno=p.lineno(1))
[ "def", "p_expr_function", "(", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Closure", "(", "p", "[", "4", "]", ",", "p", "[", "6", "]", ",", "p", "[", "8", "]", ",", "p", "[", "2", "]", ",", "lineno", "=", "p", ".", "lineno", "(...
expr : FUNCTION is_reference LPAREN parameter_list RPAREN lexical_vars LBRACE inner_statement_list RBRACE
[ "expr", ":", "FUNCTION", "is_reference", "LPAREN", "parameter_list", "RPAREN", "lexical_vars", "LBRACE", "inner_statement_list", "RBRACE" ]
python
train
albahnsen/CostSensitiveClassification
costcla/probcal/probcal.py
https://github.com/albahnsen/CostSensitiveClassification/blob/75778ae32c70671c0cdde6c4651277b6a8b58871/costcla/probcal/probcal.py#L62-L135
def fit(self, y, p): """ Fit the calibration map Parameters ---------- y_true : array-like of shape = [n_samples] True class to be used for calibrating the probabilities y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be used for calibrating the probabilities Returns ------- self : object Returns self. """ # TODO: Check input if p.size != p.shape[0]: p = p[:, 1] fpr, tpr, thresholds = roc_curve(y, p) #works with sklearn 0.11 if fpr.min() > 0 or tpr.min() > 0: fpr = np.hstack((0, fpr)) tpr = np.hstack((0, tpr)) thresholds = np.hstack((1.01, thresholds)) def prob_freq(y, predict_proba): #calculate distribution and return in inverse order proba_bins = np.unique(predict_proba) freq_all = np.bincount(proba_bins.searchsorted(predict_proba)) freq_0_tempa = np.unique(predict_proba[np.nonzero(y == 0)[0]]) freq_0_tempb = np.bincount(freq_0_tempa.searchsorted(predict_proba[np.nonzero(y == 0)[0]])) freq = np.zeros((proba_bins.shape[0], 3)) freq[:, 0] = proba_bins for i in range(freq_0_tempa.shape[0]): freq[np.nonzero(proba_bins == freq_0_tempa[i])[0], 1] = freq_0_tempb[i] freq[:, 2] = freq_all - freq[:, 1] freq = freq[proba_bins.argsort()[::-1], :] pr = freq[:, 2] / freq[:, 1:].sum(axis=1) pr = pr.reshape(freq.shape[0], 1) #fix when no negatives in range pr[pr == 1.0] = 0 freq = np.hstack((freq, pr)) return freq f = prob_freq(y, p) temp_hull = [] for i in range(fpr.shape[0]): temp_hull.append((fpr[i], tpr[i])) #close the plane temp_hull.append((1, 0)) rocch_ = _convexhull(temp_hull) rocch = np.array([(a, b) for (a, b) in rocch_[:-1]]) rocch_find = np.zeros(fpr.shape[0], dtype=np.bool) for i in range(rocch.shape[0]): rocch_find[np.intersect1d(np.nonzero(rocch[i, 0] == fpr)[0], np.nonzero(rocch[i, 1] == tpr)[0])] = True rocch_thresholds = thresholds[rocch_find] #calibrated probabilities using ROCCH f_cal = np.zeros((rocch_thresholds.shape[0] - 1, 5)) for i in range(rocch_thresholds.shape[0] - 1): f_cal[i, 0] = rocch_thresholds[i] f_cal[i, 1] = rocch_thresholds[i + 1] join_elements = np.logical_and(f_cal[i, 1] <= f[:, 0], f_cal[i, 0] > f[:, 0]) f_cal[i, 2] = f[join_elements, 1].sum() f_cal[i, 3] = f[join_elements, 2].sum() f_cal[:, 4] = f_cal[:, 3] / f_cal[:, [2, 3]].sum(axis=1) #fix to add 0 f_cal[-1, 1] = 0 calibrated_map = f_cal[:, [0, 1, 4]] self.calibration_map = calibrated_map
[ "def", "fit", "(", "self", ",", "y", ",", "p", ")", ":", "# TODO: Check input", "if", "p", ".", "size", "!=", "p", ".", "shape", "[", "0", "]", ":", "p", "=", "p", "[", ":", ",", "1", "]", "fpr", ",", "tpr", ",", "thresholds", "=", "roc_curve...
Fit the calibration map Parameters ---------- y_true : array-like of shape = [n_samples] True class to be used for calibrating the probabilities y_prob : array-like of shape = [n_samples, 2] Predicted probabilities to be used for calibrating the probabilities Returns ------- self : object Returns self.
[ "Fit", "the", "calibration", "map" ]
python
train
razorpay/razorpay-python
razorpay/resources/addon.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/addon.py#L22-L29
def delete(self, addon_id, data={}, **kwargs): """ Delete addon for given id Args: addon_id : Id for which addon object has to be deleted """ return super(Addon, self).delete(addon_id, data, **kwargs)
[ "def", "delete", "(", "self", ",", "addon_id", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "Addon", ",", "self", ")", ".", "delete", "(", "addon_id", ",", "data", ",", "*", "*", "kwargs", ")" ]
Delete addon for given id Args: addon_id : Id for which addon object has to be deleted
[ "Delete", "addon", "for", "given", "id" ]
python
train
blue-yonder/turbodbc
python/turbodbc/cursor.py
https://github.com/blue-yonder/turbodbc/blob/5556625e69244d941a708c69eb2c1e7b37c190b1/python/turbodbc/cursor.py#L265-L278
def fetchallnumpy(self): """ Fetches all rows in the active result set generated with ``execute()`` or ``executemany()``. :return: An ``OrderedDict`` of *columns*, where the keys of the dictionary are the column names. The columns are of NumPy's ``MaskedArray`` type, where the optimal data type for each result set column is chosen automatically. """ from numpy.ma import concatenate batches = list(self._numpy_batch_generator()) column_names = [description[0] for description in self.description] return OrderedDict(zip(column_names, [concatenate(column) for column in zip(*batches)]))
[ "def", "fetchallnumpy", "(", "self", ")", ":", "from", "numpy", ".", "ma", "import", "concatenate", "batches", "=", "list", "(", "self", ".", "_numpy_batch_generator", "(", ")", ")", "column_names", "=", "[", "description", "[", "0", "]", "for", "descripti...
Fetches all rows in the active result set generated with ``execute()`` or ``executemany()``. :return: An ``OrderedDict`` of *columns*, where the keys of the dictionary are the column names. The columns are of NumPy's ``MaskedArray`` type, where the optimal data type for each result set column is chosen automatically.
[ "Fetches", "all", "rows", "in", "the", "active", "result", "set", "generated", "with", "execute", "()", "or", "executemany", "()", "." ]
python
train
tensorflow/cleverhans
cleverhans/attack_bundling.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attack_bundling.py#L563-L568
def request_examples(self, attack_config, criteria, run_counts, batch_size): """ Returns a numpy array of integer example indices to run in the next batch. """ raise NotImplementedError(str(type(self)) + "needs to implement request_examples")
[ "def", "request_examples", "(", "self", ",", "attack_config", ",", "criteria", ",", "run_counts", ",", "batch_size", ")", ":", "raise", "NotImplementedError", "(", "str", "(", "type", "(", "self", ")", ")", "+", "\"needs to implement request_examples\"", ")" ]
Returns a numpy array of integer example indices to run in the next batch.
[ "Returns", "a", "numpy", "array", "of", "integer", "example", "indices", "to", "run", "in", "the", "next", "batch", "." ]
python
train
chemlab/chemlab
chemlab/graphics/qt/qttrajectory.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/graphics/qt/qttrajectory.py#L316-L320
def set_text(self, text): '''Update the time indicator in the interface. ''' self.traj_controls.timelabel.setText(self.traj_controls._label_tmp.format(text))
[ "def", "set_text", "(", "self", ",", "text", ")", ":", "self", ".", "traj_controls", ".", "timelabel", ".", "setText", "(", "self", ".", "traj_controls", ".", "_label_tmp", ".", "format", "(", "text", ")", ")" ]
Update the time indicator in the interface.
[ "Update", "the", "time", "indicator", "in", "the", "interface", "." ]
python
train
ioos/compliance-checker
compliance_checker/ioos.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/ioos.py#L133-L146
def check_variable_attributes(self, ds): """ Check IOOS concepts that come from NC variable attributes. :param netCDF4.Dataset ds: An open netCDF dataset """ return [ self._has_var_attr(ds, 'platform', 'long_name', 'Station Long Name'), self._has_var_attr(ds, 'platform', 'short_name', 'Station Short Name'), self._has_var_attr(ds, 'platform', 'source', 'Platform Type'), self._has_var_attr(ds, 'platform', 'ioos_name', 'Station ID'), self._has_var_attr(ds, 'platform', 'wmo_id', 'Station WMO ID'), self._has_var_attr(ds, 'platform', 'comment', 'Station Description'), ]
[ "def", "check_variable_attributes", "(", "self", ",", "ds", ")", ":", "return", "[", "self", ".", "_has_var_attr", "(", "ds", ",", "'platform'", ",", "'long_name'", ",", "'Station Long Name'", ")", ",", "self", ".", "_has_var_attr", "(", "ds", ",", "'platfor...
Check IOOS concepts that come from NC variable attributes. :param netCDF4.Dataset ds: An open netCDF dataset
[ "Check", "IOOS", "concepts", "that", "come", "from", "NC", "variable", "attributes", "." ]
python
train
Azure/blobxfer
blobxfer/models/upload.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/upload.py#L734-L771
def _compute_total_chunks(self, chunk_size): # type: (Descriptor, int) -> int """Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks """ try: chunks = int(math.ceil(self._ase.size / chunk_size)) except ZeroDivisionError: chunks = 1 # for stdin, override and use 1 chunk to start, this will change # dynamically as data as read if self.local_path.use_stdin: chunks = 1 if (self._ase.mode != blobxfer.models.azure.StorageModes.Page and chunks > 50000): max_vector = False if self._ase.mode == blobxfer.models.azure.StorageModes.Block: if self._chunk_size == _MAX_BLOCK_BLOB_CHUNKSIZE_BYTES: max_vector = True elif self._chunk_size == _MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES: max_vector = True if max_vector: raise RuntimeError( ('number of chunks {} exceeds maximum permissible ' 'limit and chunk size is set at the maximum value ' 'for {}. Please try using stripe mode ' 'vectorization to overcome this limitation').format( chunks, self.local_path.absolute_path)) else: raise RuntimeError( ('number of chunks {} exceeds maximum permissible ' 'limit for {}, please adjust chunk size higher or ' 'set to -1 for automatic chunk size selection').format( chunks, self.local_path.absolute_path)) return chunks
[ "def", "_compute_total_chunks", "(", "self", ",", "chunk_size", ")", ":", "# type: (Descriptor, int) -> int", "try", ":", "chunks", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "_ase", ".", "size", "/", "chunk_size", ")", ")", "except", "ZeroDivis...
Compute total number of chunks for entity :param Descriptor self: this :param int chunk_size: chunk size :rtype: int :return: num chunks
[ "Compute", "total", "number", "of", "chunks", "for", "entity", ":", "param", "Descriptor", "self", ":", "this", ":", "param", "int", "chunk_size", ":", "chunk", "size", ":", "rtype", ":", "int", ":", "return", ":", "num", "chunks" ]
python
train
thombashi/thutils
thutils/gfile.py
https://github.com/thombashi/thutils/blob/9eba767cfc26b38cd66b83b99aee0c31b8b90dec/thutils/gfile.py#L149-L163
def chmod(cls, path, permission_text): """ :param str permission_text: "ls -l" style permission string. e.g. -rw-r--r-- """ try: check_file_existence(path) except FileNotFoundError: _, e, _ = sys.exc_info() # for python 2.5 compatibility logger.debug(e) return False logger.debug("chmod %s %s" % (path, permission_text)) os.chmod(path, parseLsPermissionText(permission_text))
[ "def", "chmod", "(", "cls", ",", "path", ",", "permission_text", ")", ":", "try", ":", "check_file_existence", "(", "path", ")", "except", "FileNotFoundError", ":", "_", ",", "e", ",", "_", "=", "sys", ".", "exc_info", "(", ")", "# for python 2.5 compatibi...
:param str permission_text: "ls -l" style permission string. e.g. -rw-r--r--
[ ":", "param", "str", "permission_text", ":", "ls", "-", "l", "style", "permission", "string", ".", "e", ".", "g", ".", "-", "rw", "-", "r", "--", "r", "--" ]
python
train
Chilipp/psyplot
psyplot/data.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/data.py#L4399-L4420
def remove(self, arr): """Removes an array from the list Parameters ---------- arr: str or :class:`InteractiveBase` The array name or the data object in this list to remove Raises ------ ValueError If no array with the specified array name is in the list""" name = arr if isinstance(arr, six.string_types) else arr.psy.arr_name if arr not in self: raise ValueError( "Array {0} not in the list".format(name)) for i, arr in enumerate(self): if arr.psy.arr_name == name: del self[i] return raise ValueError( "No array found with name {0}".format(name))
[ "def", "remove", "(", "self", ",", "arr", ")", ":", "name", "=", "arr", "if", "isinstance", "(", "arr", ",", "six", ".", "string_types", ")", "else", "arr", ".", "psy", ".", "arr_name", "if", "arr", "not", "in", "self", ":", "raise", "ValueError", ...
Removes an array from the list Parameters ---------- arr: str or :class:`InteractiveBase` The array name or the data object in this list to remove Raises ------ ValueError If no array with the specified array name is in the list
[ "Removes", "an", "array", "from", "the", "list" ]
python
train
KnowledgeLinks/rdfframework
rdfframework/connections/connmanager.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/connections/connmanager.py#L151-L164
def load(self, conn_list, **kwargs): """ Takes a list of connections and sets them in the manager args: conn_list: list of connection defitions """ for conn in conn_list: conn['delay_check'] = kwargs.get('delay_check', False) self.set_conn(**conn) if kwargs.get('delay_check'): test = self.wait_for_conns(**kwargs) if not test: log.critical("\n\nEXITING:Unable to establish connections \n" "%s", test)
[ "def", "load", "(", "self", ",", "conn_list", ",", "*", "*", "kwargs", ")", ":", "for", "conn", "in", "conn_list", ":", "conn", "[", "'delay_check'", "]", "=", "kwargs", ".", "get", "(", "'delay_check'", ",", "False", ")", "self", ".", "set_conn", "(...
Takes a list of connections and sets them in the manager args: conn_list: list of connection defitions
[ "Takes", "a", "list", "of", "connections", "and", "sets", "them", "in", "the", "manager" ]
python
train
wummel/linkchecker
linkcheck/plugins/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/__init__.py#L63-L70
def get_plugin_modules(folders, package='plugins', parentpackage='linkcheck.dummy'): """Get plugin modules for given folders.""" for folder in folders: for module in loader.get_folder_modules(folder, parentpackage): yield module for module in loader.get_package_modules(package): yield module
[ "def", "get_plugin_modules", "(", "folders", ",", "package", "=", "'plugins'", ",", "parentpackage", "=", "'linkcheck.dummy'", ")", ":", "for", "folder", "in", "folders", ":", "for", "module", "in", "loader", ".", "get_folder_modules", "(", "folder", ",", "par...
Get plugin modules for given folders.
[ "Get", "plugin", "modules", "for", "given", "folders", "." ]
python
train
Carbonara-Project/Guanciale
guanciale/idblib.py
https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L895-L906
def prettyval(self, val): """ returns the value in a readable format. """ if len(val) == self.wordsize and val[-1:] in (b'\x00', b'\xff'): return "%x" % struct.unpack("<" + self.fmt, val) if len(val) == self.wordsize and re.search(b'[\x00-\x08\x0b\x0c\x0e-\x1f]', val, re.DOTALL): return "%x" % struct.unpack("<" + self.fmt, val) if len(val) < 2 or not re.match(b'^[\x09\x0a\x0d\x20-\xff]+.$', val, re.DOTALL): return hexdump(val) val = val.replace(b"\n", b"\\n") return "'%s'" % val.decode('utf-8', 'ignore')
[ "def", "prettyval", "(", "self", ",", "val", ")", ":", "if", "len", "(", "val", ")", "==", "self", ".", "wordsize", "and", "val", "[", "-", "1", ":", "]", "in", "(", "b'\\x00'", ",", "b'\\xff'", ")", ":", "return", "\"%x\"", "%", "struct", ".", ...
returns the value in a readable format.
[ "returns", "the", "value", "in", "a", "readable", "format", "." ]
python
train
batiste/django-page-cms
pages/admin/views.py
https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/admin/views.py#L21-L32
def change_status(request, page_id): """ Switch the status of a page. """ perm = request.user.has_perm('pages.change_page') if perm and request.method == 'POST': page = Page.objects.get(pk=page_id) page.status = int(request.POST['status']) page.invalidate() page.save() return HttpResponse(str(page.status)) raise Http404
[ "def", "change_status", "(", "request", ",", "page_id", ")", ":", "perm", "=", "request", ".", "user", ".", "has_perm", "(", "'pages.change_page'", ")", "if", "perm", "and", "request", ".", "method", "==", "'POST'", ":", "page", "=", "Page", ".", "object...
Switch the status of a page.
[ "Switch", "the", "status", "of", "a", "page", "." ]
python
train
tensorpack/tensorpack
tensorpack/callbacks/monitor.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/callbacks/monitor.py#L302-L314
def load_existing_json(): """ Look for an existing json under :meth:`logger.get_logger_dir()` named "stats.json", and return the loaded list of statistics if found. Returns None otherwise. """ dir = logger.get_logger_dir() fname = os.path.join(dir, JSONWriter.FILENAME) if tf.gfile.Exists(fname): with open(fname) as f: stats = json.load(f) assert isinstance(stats, list), type(stats) return stats return None
[ "def", "load_existing_json", "(", ")", ":", "dir", "=", "logger", ".", "get_logger_dir", "(", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "JSONWriter", ".", "FILENAME", ")", "if", "tf", ".", "gfile", ".", "Exists", "(", "fname...
Look for an existing json under :meth:`logger.get_logger_dir()` named "stats.json", and return the loaded list of statistics if found. Returns None otherwise.
[ "Look", "for", "an", "existing", "json", "under", ":", "meth", ":", "logger", ".", "get_logger_dir", "()", "named", "stats", ".", "json", "and", "return", "the", "loaded", "list", "of", "statistics", "if", "found", ".", "Returns", "None", "otherwise", "." ...
python
train
tensorflow/probability
tensorflow_probability/python/sts/fitting.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/fitting.py#L267-L282
def _minimize_in_graph(build_loss_fn, num_steps=200, optimizer=None): """Run an optimizer within the graph to minimize a loss function.""" optimizer = tf.compat.v1.train.AdamOptimizer( 0.1) if optimizer is None else optimizer def train_loop_body(step): train_op = optimizer.minimize( build_loss_fn if tf.executing_eagerly() else build_loss_fn()) return tf.tuple(tensors=[tf.add(step, 1)], control_inputs=[train_op]) minimize_op = tf.compat.v1.while_loop( cond=lambda step: step < num_steps, body=train_loop_body, loop_vars=[tf.constant(0)], return_same_structure=True)[0] # Always return a single op. return minimize_op
[ "def", "_minimize_in_graph", "(", "build_loss_fn", ",", "num_steps", "=", "200", ",", "optimizer", "=", "None", ")", ":", "optimizer", "=", "tf", ".", "compat", ".", "v1", ".", "train", ".", "AdamOptimizer", "(", "0.1", ")", "if", "optimizer", "is", "Non...
Run an optimizer within the graph to minimize a loss function.
[ "Run", "an", "optimizer", "within", "the", "graph", "to", "minimize", "a", "loss", "function", "." ]
python
test
coddingtonbear/django-location
location/migrations/0005_move_user_information.py
https://github.com/coddingtonbear/django-location/blob/5dd65b4e94cb65362d6e903080592b58ce37ff05/location/migrations/0005_move_user_information.py#L19-L24
def backwards(self, orm): "Write your backwards methods here." for instance in orm.LocationSource.objects.all(): for point in instance.points.all(): point.user = instance.user point.save()
[ "def", "backwards", "(", "self", ",", "orm", ")", ":", "for", "instance", "in", "orm", ".", "LocationSource", ".", "objects", ".", "all", "(", ")", ":", "for", "point", "in", "instance", ".", "points", ".", "all", "(", ")", ":", "point", ".", "user...
Write your backwards methods here.
[ "Write", "your", "backwards", "methods", "here", "." ]
python
train
hovren/crisp
crisp/znccpyr.py
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L73-L103
def upsample(time_series, scaling_factor): """Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal """ Ns0 = np.size(time_series) Ns = np.int(np.floor(np.size(time_series)*scaling_factor)) ts_out = np.zeros((Ns,1), dtype='float64') for k in range(0,Ns): cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])])) cfrac = cpos-np.floor(cpos) cind = int(np.floor(cpos)) #print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind) if cfrac>0: ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac else: ts_out[k]=time_series[cind] return ts_out
[ "def", "upsample", "(", "time_series", ",", "scaling_factor", ")", ":", "Ns0", "=", "np", ".", "size", "(", "time_series", ")", "Ns", "=", "np", ".", "int", "(", "np", ".", "floor", "(", "np", ".", "size", "(", "time_series", ")", "*", "scaling_facto...
Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal
[ "Upsample", "using", "linear", "interpolation" ]
python
train
spencerahill/aospy
aospy/utils/times.py
https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/utils/times.py#L19-L58
def apply_time_offset(time, years=0, months=0, days=0, hours=0): """Apply a specified offset to the given time array. This is useful for GFDL model output of instantaneous values. For example, 3 hourly data postprocessed to netCDF files spanning 1 year each will actually have time values that are offset by 3 hours, such that the first value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the subsequent year. This causes problems in xarray, e.g. when trying to group by month. It is resolved by manually subtracting off those three hours, such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired. Parameters ---------- time : xarray.DataArray representing a timeseries years, months, days, hours : int, optional The number of years, months, days, and hours, respectively, to offset the time array by. Positive values move the times later. Returns ------- pandas.DatetimeIndex Examples -------- Case of a length-1 input time array: >>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21)) >>> apply_time_offset(times) Timestamp('1900-01-01 00:00:00') Case of input time array with length greater than one: >>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21), ... datetime.datetime(1899, 1, 31, 21)]) >>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]', freq=None) """ return (pd.to_datetime(time.values) + pd.DateOffset(years=years, months=months, days=days, hours=hours))
[ "def", "apply_time_offset", "(", "time", ",", "years", "=", "0", ",", "months", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ")", ":", "return", "(", "pd", ".", "to_datetime", "(", "time", ".", "values", ")", "+", "pd", ".", "DateOffs...
Apply a specified offset to the given time array. This is useful for GFDL model output of instantaneous values. For example, 3 hourly data postprocessed to netCDF files spanning 1 year each will actually have time values that are offset by 3 hours, such that the first value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the subsequent year. This causes problems in xarray, e.g. when trying to group by month. It is resolved by manually subtracting off those three hours, such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired. Parameters ---------- time : xarray.DataArray representing a timeseries years, months, days, hours : int, optional The number of years, months, days, and hours, respectively, to offset the time array by. Positive values move the times later. Returns ------- pandas.DatetimeIndex Examples -------- Case of a length-1 input time array: >>> times = xr.DataArray(datetime.datetime(1899, 12, 31, 21)) >>> apply_time_offset(times) Timestamp('1900-01-01 00:00:00') Case of input time array with length greater than one: >>> times = xr.DataArray([datetime.datetime(1899, 12, 31, 21), ... datetime.datetime(1899, 1, 31, 21)]) >>> apply_time_offset(times) # doctest: +NORMALIZE_WHITESPACE DatetimeIndex(['1900-01-01', '1899-02-01'], dtype='datetime64[ns]', freq=None)
[ "Apply", "a", "specified", "offset", "to", "the", "given", "time", "array", "." ]
python
train
tanghaibao/goatools
goatools/grouper/read_goids.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/read_goids.py#L109-L134
def _read_txt(self, fin_txt, get_goids_only, exclude_ungrouped): """Read GO file. Store results in: section2goids sections_seen. Return goids_fin.""" goids_sec = [] with open(fin_txt) as istrm: # Lines starting with a GO ID will have that GO ID read and stored. # * Lines that do not start with a GO ID will be ignored. # * Text after the 10 characters in a GO ID will be ignored. section_name = None for line in istrm: if line[:3] == "GO:": goids_sec.append(line[:10]) elif not get_goids_only and ":" in line: mtch = self.srch_section.match(line) if mtch: secstr = mtch.group(1) if section_name is not None and goids_sec: self.section2goids[section_name] = goids_sec if not exclude_ungrouped or secstr != HdrgosSections.secdflt: section_name = secstr self.sections_seen.append(section_name) else: section_name = None goids_sec = [] if section_name is not None and goids_sec: self.section2goids[section_name] = goids_sec return goids_sec
[ "def", "_read_txt", "(", "self", ",", "fin_txt", ",", "get_goids_only", ",", "exclude_ungrouped", ")", ":", "goids_sec", "=", "[", "]", "with", "open", "(", "fin_txt", ")", "as", "istrm", ":", "# Lines starting with a GO ID will have that GO ID read and stored.", "#...
Read GO file. Store results in: section2goids sections_seen. Return goids_fin.
[ "Read", "GO", "file", ".", "Store", "results", "in", ":", "section2goids", "sections_seen", ".", "Return", "goids_fin", "." ]
python
train
streamlink/streamlink
src/streamlink/utils/__init__.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/utils/__init__.py#L158-L175
def search_dict(data, key): """ Search for a key in a nested dict, or list of nested dicts, and return the values. :param data: dict/list to search :param key: key to find :return: matches for key """ if isinstance(data, dict): for dkey, value in data.items(): if dkey == key: yield value for result in search_dict(value, key): yield result elif isinstance(data, list): for value in data: for result in search_dict(value, key): yield result
[ "def", "search_dict", "(", "data", ",", "key", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "dkey", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "dkey", "==", "key", ":", "yield", "value", "for", "res...
Search for a key in a nested dict, or list of nested dicts, and return the values. :param data: dict/list to search :param key: key to find :return: matches for key
[ "Search", "for", "a", "key", "in", "a", "nested", "dict", "or", "list", "of", "nested", "dicts", "and", "return", "the", "values", "." ]
python
test
openstack/proliantutils
proliantutils/ilo/ris.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L136-L140
def _get_type(self, obj): """Return the type of an object.""" typever = obj['Type'] typesplit = typever.split('.') return typesplit[0] + '.' + typesplit[1]
[ "def", "_get_type", "(", "self", ",", "obj", ")", ":", "typever", "=", "obj", "[", "'Type'", "]", "typesplit", "=", "typever", ".", "split", "(", "'.'", ")", "return", "typesplit", "[", "0", "]", "+", "'.'", "+", "typesplit", "[", "1", "]" ]
Return the type of an object.
[ "Return", "the", "type", "of", "an", "object", "." ]
python
train
JarryShaw/PyPCAPKit
src/protocols/pcap/header.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/pcap/header.py#L110-L170
def read_header(self): """Read global header of PCAP file. Structure of global header (C): typedef struct pcap_hdr_s { guint32 magic_number; /* magic number */ guint16 version_major; /* major version number */ guint16 version_minor; /* minor version number */ gint32 thiszone; /* GMT to local correction */ guint32 sigfigs; /* accuracy of timestamps */ guint32 snaplen; /* max length of captured packets, in octets */ guint32 network; /* data link type */ } pcap_hdr_t; """ _magn = self._read_fileng(4) if _magn == b'\xd4\xc3\xb2\xa1': lilendian = True self._nsec = False self._byte = 'little' elif _magn == b'\xa1\xb2\xc3\xd4': lilendian = False self._nsec = False self._byte = 'big' elif _magn == b'\x4d\x3c\xb2\xa1': lilendian = True self._nsec = True self._byte = 'little' elif _magn == b'\xa1\xb2\x3c\x4d': lilendian = False self._nsec = True self._byte = 'big' else: raise FileError(5, 'Unknown file format', self._file.name) _vmaj = self._read_unpack(2, lilendian=lilendian) _vmin = self._read_unpack(2, lilendian=lilendian) _zone = self._read_unpack(4, lilendian=lilendian, signed=True) _acts = self._read_unpack(4, lilendian=lilendian) _slen = self._read_unpack(4, lilendian=lilendian) _type = self._read_protos(4) _byte = self._read_packet(24) self._file = io.BytesIO(_byte) header = dict( magic_number=dict( data=_magn, byteorder=self._byte, nanosecond=self._nsec, ), version_major=_vmaj, version_minor=_vmin, thiszone=_zone, sigfigs=_acts, snaplen=_slen, network=_type, packet=_byte, ) return header
[ "def", "read_header", "(", "self", ")", ":", "_magn", "=", "self", ".", "_read_fileng", "(", "4", ")", "if", "_magn", "==", "b'\\xd4\\xc3\\xb2\\xa1'", ":", "lilendian", "=", "True", "self", ".", "_nsec", "=", "False", "self", ".", "_byte", "=", "'little'...
Read global header of PCAP file. Structure of global header (C): typedef struct pcap_hdr_s { guint32 magic_number; /* magic number */ guint16 version_major; /* major version number */ guint16 version_minor; /* minor version number */ gint32 thiszone; /* GMT to local correction */ guint32 sigfigs; /* accuracy of timestamps */ guint32 snaplen; /* max length of captured packets, in octets */ guint32 network; /* data link type */ } pcap_hdr_t;
[ "Read", "global", "header", "of", "PCAP", "file", "." ]
python
train
williballenthin/python-evtx
scripts/evtx_filter_records.py
https://github.com/williballenthin/python-evtx/blob/4e9e29544adde64c79ff9b743269ecb18c677eb4/scripts/evtx_filter_records.py#L18-L32
def xml_records(filename): """ If the second return value is not None, then it is an Exception encountered during parsing. The first return value will be the XML string. @type filename str @rtype: generator of (etree.Element or str), (None or Exception) """ with Evtx(filename) as evtx: for xml, record in evtx_file_xml_view(evtx.get_file_header()): try: yield to_lxml(xml), None except etree.XMLSyntaxError as e: yield xml, e
[ "def", "xml_records", "(", "filename", ")", ":", "with", "Evtx", "(", "filename", ")", "as", "evtx", ":", "for", "xml", ",", "record", "in", "evtx_file_xml_view", "(", "evtx", ".", "get_file_header", "(", ")", ")", ":", "try", ":", "yield", "to_lxml", ...
If the second return value is not None, then it is an Exception encountered during parsing. The first return value will be the XML string. @type filename str @rtype: generator of (etree.Element or str), (None or Exception)
[ "If", "the", "second", "return", "value", "is", "not", "None", "then", "it", "is", "an", "Exception", "encountered", "during", "parsing", ".", "The", "first", "return", "value", "will", "be", "the", "XML", "string", "." ]
python
train
awslabs/sockeye
sockeye/vocab.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/vocab.py#L301-L308
def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]: """ Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id. :param vocab: Input vocabulary. :return: List of tokens. """ return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])]
[ "def", "get_ordered_tokens_from_vocab", "(", "vocab", ":", "Vocab", ")", "->", "List", "[", "str", "]", ":", "return", "[", "token", "for", "token", ",", "token_id", "in", "sorted", "(", "vocab", ".", "items", "(", ")", ",", "key", "=", "lambda", "i", ...
Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id. :param vocab: Input vocabulary. :return: List of tokens.
[ "Returns", "the", "list", "of", "tokens", "in", "a", "vocabulary", "ordered", "by", "increasing", "vocabulary", "id", "." ]
python
train
openvax/pyensembl
pyensembl/database.py
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/database.py#L204-L254
def create( self, overwrite=False): """ Create the local database (including indexing) if it's not already set up. If `overwrite` is True, always re-create the database from scratch. Returns a connection to the database. """ logger.info("Creating database: %s", self.local_db_path) df = self._load_gtf_as_dataframe( usecols=self.restrict_gtf_columns, features=self.restrict_gtf_features) all_index_groups = self._all_possible_indices(df.columns) if self.restrict_gtf_features: feature_names = self.restrict_gtf_features else: # split single DataFrame into dictionary mapping each unique # feature name onto that subset of the data feature_names = df['feature'].unique() dataframes = {} # every table gets the same set of indices indices_dict = {} # if a feature has an ID then make it that table's primary key primary_keys = {} for feature in feature_names: df_subset = df[df.feature == feature] if len(df_subset) == 0: continue dataframes[feature] = df_subset primary_key = self._get_primary_key(feature, df_subset) if primary_key: primary_keys[feature] = primary_key indices_dict[feature] = self._feature_indices( all_index_groups, primary_key, df_subset) self._connection = datacache.db_from_dataframes_with_absolute_path( db_path=self.local_db_path, table_names_to_dataframes=dataframes, table_names_to_primary_keys=primary_keys, table_names_to_indices=indices_dict, overwrite=overwrite, version=DATABASE_SCHEMA_VERSION) return self._connection
[ "def", "create", "(", "self", ",", "overwrite", "=", "False", ")", ":", "logger", ".", "info", "(", "\"Creating database: %s\"", ",", "self", ".", "local_db_path", ")", "df", "=", "self", ".", "_load_gtf_as_dataframe", "(", "usecols", "=", "self", ".", "re...
Create the local database (including indexing) if it's not already set up. If `overwrite` is True, always re-create the database from scratch. Returns a connection to the database.
[ "Create", "the", "local", "database", "(", "including", "indexing", ")", "if", "it", "s", "not", "already", "set", "up", ".", "If", "overwrite", "is", "True", "always", "re", "-", "create", "the", "database", "from", "scratch", "." ]
python
train
clalancette/pycdlib
pycdlib/eltorito.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/eltorito.py#L594-L614
def record(self): # type: () -> bytes ''' Get a string representing this El Torito section header. Parameters: None. Returns: A string representing this El Torito section header. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Section Header not yet initialized') outlist = [struct.pack(self.FMT, self.header_indicator, self.platform_id, self.num_section_entries, self.id_string)] for entry in self.section_entries: outlist.append(entry.record()) return b''.join(outlist)
[ "def", "record", "(", "self", ")", ":", "# type: () -> bytes", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'El Torito Section Header not yet initialized'", ")", "outlist", "=", "[", "struct", ".", "p...
Get a string representing this El Torito section header. Parameters: None. Returns: A string representing this El Torito section header.
[ "Get", "a", "string", "representing", "this", "El", "Torito", "section", "header", "." ]
python
train
GiulioRossetti/dynetx
dynetx/classes/dyngraph.py
https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L238-L291
def interactions_iter(self, nbunch=None, t=None): """Return an iterator over the interaction present in a given snapshot. Edges are returned as tuples in the order (node, neighbor). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. t : snapshot id (default=None) If None the the method returns an iterator over the edges of the flattened graph. Returns ------- edge_iter : iterator An iterator of (u,v) tuples of interaction. See Also -------- interaction : return a list of interaction Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-interaction. Examples -------- >>> G = dn.DynGraph() >>> G.add_path([0,1,2], 0) >>> G.add_interaction(2,3,1) >>> [e for e in G.interactions_iter(t=0)] [(0, 1), (1, 2)] >>> list(G.interactions_iter()) [(0, 1), (1, 2), (2, 3)] """ seen = {} # helper dict to keep track of multiply stored interactions if nbunch is None: nodes_nbrs = self._adj.items() else: nodes_nbrs = ((n, self._adj[n]) for n in self.nbunch_iter(nbunch)) for n, nbrs in nodes_nbrs: for nbr in nbrs: if t is not None: if nbr not in seen and self.__presence_test(n, nbr, t): yield (n, nbr, {"t": [t]}) else: if nbr not in seen: yield (n, nbr, self._adj[n][nbr]) seen[n] = 1 del seen
[ "def", "interactions_iter", "(", "self", ",", "nbunch", "=", "None", ",", "t", "=", "None", ")", ":", "seen", "=", "{", "}", "# helper dict to keep track of multiply stored interactions", "if", "nbunch", "is", "None", ":", "nodes_nbrs", "=", "self", ".", "_adj...
Return an iterator over the interaction present in a given snapshot. Edges are returned as tuples in the order (node, neighbor). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. t : snapshot id (default=None) If None the the method returns an iterator over the edges of the flattened graph. Returns ------- edge_iter : iterator An iterator of (u,v) tuples of interaction. See Also -------- interaction : return a list of interaction Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-interaction. Examples -------- >>> G = dn.DynGraph() >>> G.add_path([0,1,2], 0) >>> G.add_interaction(2,3,1) >>> [e for e in G.interactions_iter(t=0)] [(0, 1), (1, 2)] >>> list(G.interactions_iter()) [(0, 1), (1, 2), (2, 3)]
[ "Return", "an", "iterator", "over", "the", "interaction", "present", "in", "a", "given", "snapshot", "." ]
python
train
bitprophet/spec
spec/cli.py
https://github.com/bitprophet/spec/blob/d9646c5daf8e479937f970d21ebe185ad936a35a/spec/cli.py#L56-L65
def registerGoodClass(self, class_): """ Internal bookkeeping to handle nested classes """ # Class itself added to "good" list self._valid_classes.append(class_) # Recurse into any inner classes for name, cls in class_members(class_): if self.isValidClass(cls): self.registerGoodClass(cls)
[ "def", "registerGoodClass", "(", "self", ",", "class_", ")", ":", "# Class itself added to \"good\" list", "self", ".", "_valid_classes", ".", "append", "(", "class_", ")", "# Recurse into any inner classes", "for", "name", ",", "cls", "in", "class_members", "(", "c...
Internal bookkeeping to handle nested classes
[ "Internal", "bookkeeping", "to", "handle", "nested", "classes" ]
python
valid
markovmodel/PyEMMA
pyemma/coordinates/data/_base/_in_memory_mixin.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/_base/_in_memory_mixin.py#L37-L47
def _map_to_memory(self, stride=1): r"""Maps results to memory. Will be stored in attribute :attr:`_Y`.""" self._mapping_to_mem_active = True try: self._Y = self.get_output(stride=stride) from pyemma.coordinates.data import DataInMemory self._Y_source = DataInMemory(self._Y) finally: self._mapping_to_mem_active = False self._in_memory = True
[ "def", "_map_to_memory", "(", "self", ",", "stride", "=", "1", ")", ":", "self", ".", "_mapping_to_mem_active", "=", "True", "try", ":", "self", ".", "_Y", "=", "self", ".", "get_output", "(", "stride", "=", "stride", ")", "from", "pyemma", ".", "coord...
r"""Maps results to memory. Will be stored in attribute :attr:`_Y`.
[ "r", "Maps", "results", "to", "memory", ".", "Will", "be", "stored", "in", "attribute", ":", "attr", ":", "_Y", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/pathlib2.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L1314-L1323
def write_text(self, data, encoding=None, errors=None): """ Open the file in text mode, write to it, and close the file. """ if not isinstance(data, six.text_type): raise TypeError( 'data must be %s, not %s' % (six.text_type.__class__.__name__, data.__class__.__name__)) with self.open(mode='w', encoding=encoding, errors=errors) as f: return f.write(data)
[ "def", "write_text", "(", "self", ",", "data", ",", "encoding", "=", "None", ",", "errors", "=", "None", ")", ":", "if", "not", "isinstance", "(", "data", ",", "six", ".", "text_type", ")", ":", "raise", "TypeError", "(", "'data must be %s, not %s'", "%"...
Open the file in text mode, write to it, and close the file.
[ "Open", "the", "file", "in", "text", "mode", "write", "to", "it", "and", "close", "the", "file", "." ]
python
train
yahoo/TensorFlowOnSpark
examples/wide_deep/census_dataset.py
https://github.com/yahoo/TensorFlowOnSpark/blob/5e4b6c185ab722fd0104ede0377e1149ea8d6f7c/examples/wide_deep/census_dataset.py#L59-L73
def _download_and_clean_file(filename, url): """Downloads data from url, and makes changes to match the CSV format.""" temp_file, _ = urllib.request.urlretrieve(url) with tf.gfile.Open(temp_file, 'r') as temp_eval_file: with tf.gfile.Open(filename, 'w') as eval_file: for line in temp_eval_file: line = line.strip() line = line.replace(', ', ',') if not line or ',' not in line: continue if line[-1] == '.': line = line[:-1] line += '\n' eval_file.write(line) tf.gfile.Remove(temp_file)
[ "def", "_download_and_clean_file", "(", "filename", ",", "url", ")", ":", "temp_file", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ")", "with", "tf", ".", "gfile", ".", "Open", "(", "temp_file", ",", "'r'", ")", "as", "temp...
Downloads data from url, and makes changes to match the CSV format.
[ "Downloads", "data", "from", "url", "and", "makes", "changes", "to", "match", "the", "CSV", "format", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/views.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/views.py#L150-L159
def get(self, request, *args, **kwargs): ''' Invoices can be viewed only if the validation string is provided, unless the user is logged in and has view_all_invoice permissions ''' user_has_validation_string = self.get_object().validationString user_has_permissions = request.user.has_perm('core.view_all_invoices') if request.GET.get('v', None) == user_has_validation_string or user_has_permissions: return super(ViewInvoiceView, self).get(request, *args, **kwargs) return self.handle_no_permission()
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "user_has_validation_string", "=", "self", ".", "get_object", "(", ")", ".", "validationString", "user_has_permissions", "=", "request", ".", "user", ".", "has_...
Invoices can be viewed only if the validation string is provided, unless the user is logged in and has view_all_invoice permissions
[ "Invoices", "can", "be", "viewed", "only", "if", "the", "validation", "string", "is", "provided", "unless", "the", "user", "is", "logged", "in", "and", "has", "view_all_invoice", "permissions" ]
python
train
sibirrer/lenstronomy
lenstronomy/ImSim/image_model.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/image_model.py#L178-L185
def data_response(self): """ returns the 1d array of the data element that is fitted for (including masking) :return: 1d numpy array """ d = self.ImageNumerics.image2array(self.Data.data * self.ImageNumerics.mask) return d
[ "def", "data_response", "(", "self", ")", ":", "d", "=", "self", ".", "ImageNumerics", ".", "image2array", "(", "self", ".", "Data", ".", "data", "*", "self", ".", "ImageNumerics", ".", "mask", ")", "return", "d" ]
returns the 1d array of the data element that is fitted for (including masking) :return: 1d numpy array
[ "returns", "the", "1d", "array", "of", "the", "data", "element", "that", "is", "fitted", "for", "(", "including", "masking", ")" ]
python
train
xray7224/PyPump
pypump/models/feed.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/feed.py#L80-L89
def get_obj_id(self, item): """ Get the id of a PumpObject. :param item: id string or PumpObject """ if item is not None: if isinstance(item, six.string_types): return item elif hasattr(item, 'id'): return item.id
[ "def", "get_obj_id", "(", "self", ",", "item", ")", ":", "if", "item", "is", "not", "None", ":", "if", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", ":", "return", "item", "elif", "hasattr", "(", "item", ",", "'id'", ")", ":", "r...
Get the id of a PumpObject. :param item: id string or PumpObject
[ "Get", "the", "id", "of", "a", "PumpObject", "." ]
python
train
aws/aws-xray-sdk-python
aws_xray_sdk/core/models/entity.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/entity.py#L210-L231
def add_exception(self, exception, stack, remote=False): """ Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service. """ self._check_ended() self.add_fault_flag() if hasattr(exception, '_recorded'): setattr(self, 'cause', getattr(exception, '_cause_id')) return exceptions = [] exceptions.append(Throwable(exception, stack, remote)) self.cause['exceptions'] = exceptions self.cause['working_directory'] = os.getcwd()
[ "def", "add_exception", "(", "self", ",", "exception", ",", "stack", ",", "remote", "=", "False", ")", ":", "self", ".", "_check_ended", "(", ")", "self", ".", "add_fault_flag", "(", ")", "if", "hasattr", "(", "exception", ",", "'_recorded'", ")", ":", ...
Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service.
[ "Add", "an", "exception", "to", "trace", "entities", "." ]
python
train
lowandrew/OLCTools
spadespipeline/primer_finder_bbduk.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/primer_finder_bbduk.py#L166-L199
def primers(self): """ Read in the primer file, and create a properly formatted output file that takes any degenerate bases into account """ with open(self.formattedprimers, 'w') as formatted: for record in SeqIO.parse(self.primerfile, 'fasta'): # from https://stackoverflow.com/a/27552377 - find any degenerate bases in the primer sequence, and # create all possibilities as a list degenerates = Seq.IUPAC.IUPACData.ambiguous_dna_values try: primerlist = list(map("".join, product(*map(degenerates.get, str(record.seq))))) except TypeError: print("Invalid Primer Sequence: {seq}".format(seq=str(record.seq))) sys.exit() # As the record.id is being updated in the loop below, set the name of the primer here so that will # be able to be recalled when setting the new record.ids primername = record.id # Iterate through all the possible primers created from any degenerate bases for index, primer in enumerate(primerlist): # Update the primer name with the position in the list to keep the name unique record.id = primername + '_{index}'.format(index=index) # Clear the description, as, otherwise, it will be added, and there will be duplicate information record.description = '' # Create a seqrecord from the primer sequence record.seq = Seq.Seq(primer) # Write the properly-formatted records to file SeqIO.write(record, formatted, 'fasta') # Populate a dictionary to store the length of the primers - will be used in determining whether # BLAST hits are full-length self.faidict[record.id] = len(str(record.seq)) # Ensure that the kmer length used in the initial baiting is no larger than the shorted primer if len(str(record.seq)) < self.klength: self.klength = len(str(record.seq))
[ "def", "primers", "(", "self", ")", ":", "with", "open", "(", "self", ".", "formattedprimers", ",", "'w'", ")", "as", "formatted", ":", "for", "record", "in", "SeqIO", ".", "parse", "(", "self", ".", "primerfile", ",", "'fasta'", ")", ":", "# from http...
Read in the primer file, and create a properly formatted output file that takes any degenerate bases into account
[ "Read", "in", "the", "primer", "file", "and", "create", "a", "properly", "formatted", "output", "file", "that", "takes", "any", "degenerate", "bases", "into", "account" ]
python
train
abilian/abilian-core
abilian/i18n.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/i18n.py#L125-L147
def country_choices(first=None, default_country_first=True): """Return a list of (code, countries), alphabetically sorted on localized country name. :param first: Country code to be placed at the top :param default_country_first: :type default_country_first: bool """ locale = _get_locale() territories = [ (code, name) for code, name in locale.territories.items() if len(code) == 2 ] # skip 3-digit regions if first is None and default_country_first: first = default_country() def sortkey(item): if first is not None and item[0] == first: return "0" return to_lower_ascii(item[1]) territories.sort(key=sortkey) return territories
[ "def", "country_choices", "(", "first", "=", "None", ",", "default_country_first", "=", "True", ")", ":", "locale", "=", "_get_locale", "(", ")", "territories", "=", "[", "(", "code", ",", "name", ")", "for", "code", ",", "name", "in", "locale", ".", "...
Return a list of (code, countries), alphabetically sorted on localized country name. :param first: Country code to be placed at the top :param default_country_first: :type default_country_first: bool
[ "Return", "a", "list", "of", "(", "code", "countries", ")", "alphabetically", "sorted", "on", "localized", "country", "name", "." ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L2281-L2303
def dodirot(D, I, Dbar, Ibar): """ Rotate a direction (declination, inclination) by the difference between dec=0 and inc = 90 and the provided desired mean direction Parameters ---------- D : declination to be rotated I : inclination to be rotated Dbar : declination of desired mean Ibar : inclination of desired mean Returns ---------- drot, irot : rotated declination and inclination """ d, irot = dogeo(D, I, Dbar, 90. - Ibar) drot = d - 180. if drot < 360.: drot = drot + 360. if drot > 360.: drot = drot - 360. return drot, irot
[ "def", "dodirot", "(", "D", ",", "I", ",", "Dbar", ",", "Ibar", ")", ":", "d", ",", "irot", "=", "dogeo", "(", "D", ",", "I", ",", "Dbar", ",", "90.", "-", "Ibar", ")", "drot", "=", "d", "-", "180.", "if", "drot", "<", "360.", ":", "drot", ...
Rotate a direction (declination, inclination) by the difference between dec=0 and inc = 90 and the provided desired mean direction Parameters ---------- D : declination to be rotated I : inclination to be rotated Dbar : declination of desired mean Ibar : inclination of desired mean Returns ---------- drot, irot : rotated declination and inclination
[ "Rotate", "a", "direction", "(", "declination", "inclination", ")", "by", "the", "difference", "between", "dec", "=", "0", "and", "inc", "=", "90", "and", "the", "provided", "desired", "mean", "direction" ]
python
train
limpyd/redis-limpyd-jobs
limpyd_jobs/workers.py
https://github.com/limpyd/redis-limpyd-jobs/blob/264c71029bad4377d6132bf8bb9c55c44f3b03a2/limpyd_jobs/workers.py#L516-L529
def job_requeue_message(self, job, queue): """ Return the message to log when a job is requeued """ priority, delayed_until = job.hmget('priority', 'delayed_until') msg = '[%s|%s|%s] requeued with priority %s' args = [queue._cached_name, job.pk.get(), job._cached_identifier, priority] if delayed_until: msg += ', delayed until %s' args.append(delayed_until) return msg % tuple(args)
[ "def", "job_requeue_message", "(", "self", ",", "job", ",", "queue", ")", ":", "priority", ",", "delayed_until", "=", "job", ".", "hmget", "(", "'priority'", ",", "'delayed_until'", ")", "msg", "=", "'[%s|%s|%s] requeued with priority %s'", "args", "=", "[", "...
Return the message to log when a job is requeued
[ "Return", "the", "message", "to", "log", "when", "a", "job", "is", "requeued" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/benchmark.py#L260-L265
def plot_diagram(config, results, images_dir, out_filename): """Plot one diagram""" img_files = plot_temp_diagrams(config, results, images_dir) join_images(img_files, out_filename) for img_file in img_files: os.remove(img_file)
[ "def", "plot_diagram", "(", "config", ",", "results", ",", "images_dir", ",", "out_filename", ")", ":", "img_files", "=", "plot_temp_diagrams", "(", "config", ",", "results", ",", "images_dir", ")", "join_images", "(", "img_files", ",", "out_filename", ")", "f...
Plot one diagram
[ "Plot", "one", "diagram" ]
python
train
log2timeline/dfvfs
dfvfs/helpers/file_system_searcher.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/file_system_searcher.py#L149-L160
def _CheckIsDevice(self, file_entry): """Checks the is_device find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not. """ if definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types: return False return file_entry.IsDevice()
[ "def", "_CheckIsDevice", "(", "self", ",", "file_entry", ")", ":", "if", "definitions", ".", "FILE_ENTRY_TYPE_DEVICE", "not", "in", "self", ".", "_file_entry_types", ":", "return", "False", "return", "file_entry", ".", "IsDevice", "(", ")" ]
Checks the is_device find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
[ "Checks", "the", "is_device", "find", "specification", "." ]
python
train
kwikteam/phy
phy/gui/actions.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/actions.py#L200-L246
def add(self, callback=None, name=None, shortcut=None, alias=None, docstring=None, menu=None, verbose=True): """Add an action with a keyboard shortcut.""" if callback is None: # Allow to use either add(func) or @add or @add(...). return partial(self.add, name=name, shortcut=shortcut, alias=alias, menu=menu) assert callback # Get the name from the callback function if needed. name = name or callback.__name__ alias = alias or _alias(name) name = name.replace('&', '') shortcut = shortcut or self._default_shortcuts.get(name, None) # Skip existing action. if name in self._actions_dict: return # Set the status tip from the function's docstring. docstring = docstring or callback.__doc__ or name docstring = re.sub(r'[ \t\r\f\v]{2,}', ' ', docstring.strip()) # Create and register the action. action = _create_qaction(self.gui, name, callback, shortcut, docstring=docstring, alias=alias, ) action_obj = Bunch(qaction=action, name=name, alias=alias, shortcut=shortcut, callback=callback, menu=menu) if verbose and not name.startswith('_'): logger.log(5, "Add action `%s` (%s).", name, _get_shortcut_string(action.shortcut())) self.gui.addAction(action) # Add the action to the menu. menu = menu or self.menu # Do not show private actions in the menu. if menu and not name.startswith('_'): self.gui.get_menu(menu).addAction(action) self._actions_dict[name] = action_obj # Register the alias -> name mapping. self._aliases[alias] = name # Set the callback method. if callback: setattr(self, name, callback)
[ "def", "add", "(", "self", ",", "callback", "=", "None", ",", "name", "=", "None", ",", "shortcut", "=", "None", ",", "alias", "=", "None", ",", "docstring", "=", "None", ",", "menu", "=", "None", ",", "verbose", "=", "True", ")", ":", "if", "cal...
Add an action with a keyboard shortcut.
[ "Add", "an", "action", "with", "a", "keyboard", "shortcut", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L1974-L1995
def prepare_notification_for_sending(self, notif, contact, macromodulations, timeperiods, host_ref): """Used by scheduler when a notification is ok to be sent (to reactionner). Here we update the command with status of now, and we add the contact to set of contact we notified. And we raise the log entry :param notif: notification to send :type notif: alignak.objects.notification.Notification :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None """ if notif.status == ACT_STATUS_POLLED: self.update_notification_command(notif, contact, macromodulations, timeperiods, host_ref) self.notified_contacts.add(contact.get_name()) self.notified_contacts_ids.add(contact.uuid) self.raise_notification_log_entry(notif, contact, host_ref)
[ "def", "prepare_notification_for_sending", "(", "self", ",", "notif", ",", "contact", ",", "macromodulations", ",", "timeperiods", ",", "host_ref", ")", ":", "if", "notif", ".", "status", "==", "ACT_STATUS_POLLED", ":", "self", ".", "update_notification_command", ...
Used by scheduler when a notification is ok to be sent (to reactionner). Here we update the command with status of now, and we add the contact to set of contact we notified. And we raise the log entry :param notif: notification to send :type notif: alignak.objects.notification.Notification :param macromodulations: Macro modulations objects, used in the notification command :type macromodulations: alignak.objects.macromodulation.Macromodulations :param timeperiods: Timeperiods objects, used to get modulation period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param host_ref: reference host (used for a service) :type host_ref: alignak.object.host.Host :return: None
[ "Used", "by", "scheduler", "when", "a", "notification", "is", "ok", "to", "be", "sent", "(", "to", "reactionner", ")", ".", "Here", "we", "update", "the", "command", "with", "status", "of", "now", "and", "we", "add", "the", "contact", "to", "set", "of"...
python
train
materialsproject/pymatgen-db
matgendb/builders/incr.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/incr.py#L249-L265
def update(self): """Update the position of the mark in the collection. :return: this object, for chaining :rtype: Mark """ rec = self._c.find_one({}, {self._fld: 1}, sort=[(self._fld, -1)], limit=1) if rec is None: self._pos = self._empty_pos() elif not self._fld in rec: _log.error("Tracking field not found. field={} collection={}" .format(self._fld, self._c.name)) _log.warn("Continuing without tracking") self._pos = self._empty_pos() else: self._pos = {self._fld: rec[self._fld]} return self
[ "def", "update", "(", "self", ")", ":", "rec", "=", "self", ".", "_c", ".", "find_one", "(", "{", "}", ",", "{", "self", ".", "_fld", ":", "1", "}", ",", "sort", "=", "[", "(", "self", ".", "_fld", ",", "-", "1", ")", "]", ",", "limit", "...
Update the position of the mark in the collection. :return: this object, for chaining :rtype: Mark
[ "Update", "the", "position", "of", "the", "mark", "in", "the", "collection", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/settings.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/settings.py#L138-L166
def get(self, key, local_default = None, required = False): """Get a parameter value. If parameter is not set, return `local_default` if it is not `None` or the PyXMPP global default otherwise. :Raise `KeyError`: if parameter has no value and no global default :Return: parameter value """ # pylint: disable-msg=W0221 if key in self._settings: return self._settings[key] if local_default is not None: return local_default if key in self._defs: setting_def = self._defs[key] if setting_def.default is not None: return setting_def.default factory = setting_def.factory if factory is None: return None value = factory(self) if setting_def.cache is True: setting_def.default = value return value if required: raise KeyError(key) return local_default
[ "def", "get", "(", "self", ",", "key", ",", "local_default", "=", "None", ",", "required", "=", "False", ")", ":", "# pylint: disable-msg=W0221", "if", "key", "in", "self", ".", "_settings", ":", "return", "self", ".", "_settings", "[", "key", "]", "if",...
Get a parameter value. If parameter is not set, return `local_default` if it is not `None` or the PyXMPP global default otherwise. :Raise `KeyError`: if parameter has no value and no global default :Return: parameter value
[ "Get", "a", "parameter", "value", "." ]
python
valid
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py#L162-L194
def order_lots(id_or_ins, amount, price=None, style=None): """ 指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #买入20手的平安银行股票,并且发送市价单: order_lots('000001.XSHE', 20) #买入10手平安银行股票,并且发送限价单,价格为¥10: order_lots('000001.XSHE', 10, style=LimitOrder(10)) """ order_book_id = assure_stock_order_book_id(id_or_ins) round_lot = int(Environment.get_instance().get_instrument(order_book_id).round_lot) style = cal_style(price, style) return order_shares(id_or_ins, amount * round_lot, style=style)
[ "def", "order_lots", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ")", ":", "order_book_id", "=", "assure_stock_order_book_id", "(", "id_or_ins", ")", "round_lot", "=", "int", "(", "Environment", ".", "get_instance", ...
指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` :param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #买入20手的平安银行股票,并且发送市价单: order_lots('000001.XSHE', 20) #买入10手平安银行股票,并且发送限价单,价格为¥10: order_lots('000001.XSHE', 10, style=LimitOrder(10))
[ "指定手数发送买", "/", "卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market", "order)。" ]
python
train
EventTeam/beliefs
src/beliefs/cells/bools.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/bools.py#L51-L54
def entails(self, other): """ Inverse is_entailed_by """ other = BoolCell.coerce(other) return other.is_entailed_by(self)
[ "def", "entails", "(", "self", ",", "other", ")", ":", "other", "=", "BoolCell", ".", "coerce", "(", "other", ")", "return", "other", ".", "is_entailed_by", "(", "self", ")" ]
Inverse is_entailed_by
[ "Inverse", "is_entailed_by" ]
python
train
estnltk/estnltk
estnltk/syntax/syntax_preprocessing.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/syntax/syntax_preprocessing.py#L280-L297
def _convert_punctuation( line ): ''' Converts given analysis line if it describes punctuation; Uses the set of predefined punctuation conversion rules from _punctConversions; _punctConversions should be a list of lists, where each outer list stands for a single conversion rule and inner list contains a pair of elements: first is the regexp pattern and the second is the replacement, used in re.sub( pattern, replacement, line ) Returns the converted line (same as input, if no conversion was performed); ''' for [pattern, replacement] in _punctConversions: lastline = line line = re.sub(pattern, replacement, line) if lastline != line: break return line
[ "def", "_convert_punctuation", "(", "line", ")", ":", "for", "[", "pattern", ",", "replacement", "]", "in", "_punctConversions", ":", "lastline", "=", "line", "line", "=", "re", ".", "sub", "(", "pattern", ",", "replacement", ",", "line", ")", "if", "las...
Converts given analysis line if it describes punctuation; Uses the set of predefined punctuation conversion rules from _punctConversions; _punctConversions should be a list of lists, where each outer list stands for a single conversion rule and inner list contains a pair of elements: first is the regexp pattern and the second is the replacement, used in re.sub( pattern, replacement, line ) Returns the converted line (same as input, if no conversion was performed);
[ "Converts", "given", "analysis", "line", "if", "it", "describes", "punctuation", ";", "Uses", "the", "set", "of", "predefined", "punctuation", "conversion", "rules", "from", "_punctConversions", ";", "_punctConversions", "should", "be", "a", "list", "of", "lists",...
python
train
jahuth/litus
__init__.py
https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/__init__.py#L543-L559
def recgen_enumerate(gen,n=tuple(), fix_type_errors=True): """ Iterates through generators recursively and flattens them. (see `recgen`) This function adds a tuple with enumerators on each generator visited. """ if not hasattr(gen,'__iter__'): yield (n,gen) else: try: for i_,i in enumerate(gen): for element in recgen_enumerate(i,n+(i_,)): yield element except TypeError: if not fix_type_errors: raise yield (n,gen)
[ "def", "recgen_enumerate", "(", "gen", ",", "n", "=", "tuple", "(", ")", ",", "fix_type_errors", "=", "True", ")", ":", "if", "not", "hasattr", "(", "gen", ",", "'__iter__'", ")", ":", "yield", "(", "n", ",", "gen", ")", "else", ":", "try", ":", ...
Iterates through generators recursively and flattens them. (see `recgen`) This function adds a tuple with enumerators on each generator visited.
[ "Iterates", "through", "generators", "recursively", "and", "flattens", "them", ".", "(", "see", "recgen", ")" ]
python
train
SmokinCaterpillar/pypet
pypet/utils/mpwrappers.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/utils/mpwrappers.py#L373-L392
def _req_rep_retry(self, request): """Returns response and number of retries""" retries_left = self.RETRIES while retries_left: self._logger.log(1, 'Sending REQ `%s`', request) self._send_request(request) socks = dict(self._poll.poll(self.TIMEOUT)) if socks.get(self._socket) == zmq.POLLIN: response = self._receive_response() self._logger.log(1, 'Received REP `%s`', response) return response, self.RETRIES - retries_left else: self._logger.debug('No response from server (%d retries left)' % retries_left) self._close_socket(confused=True) retries_left -= 1 if retries_left == 0: raise RuntimeError('Server seems to be offline!') time.sleep(self.SLEEP) self._start_socket()
[ "def", "_req_rep_retry", "(", "self", ",", "request", ")", ":", "retries_left", "=", "self", ".", "RETRIES", "while", "retries_left", ":", "self", ".", "_logger", ".", "log", "(", "1", ",", "'Sending REQ `%s`'", ",", "request", ")", "self", ".", "_send_req...
Returns response and number of retries
[ "Returns", "response", "and", "number", "of", "retries" ]
python
test
biolink/ontobio
ontobio/io/gafparser.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/io/gafparser.py#L73-L362
def parse_line(self, line): """ Parses a single line of a GAF Return a tuple `(processed_line, associations)`. Typically there will be a single association, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GAF file """ # Returns assocparser.ParseResult parsed = super().validate_line(line) if parsed: return parsed if self.is_header(line): return assocparser.ParseResult(line, [{ "header": True, "line": line.strip() }], False) vals = [el.strip() for el in line.split("\t")] # GAF v1 is defined as 15 cols, GAF v2 as 17. # We treat everything as GAF2 by adding two blank columns. # TODO: check header metadata to see if columns corresponds to declared dataformat version if 17 > len(vals) >= 15: vals += [""] * (17 - len(vals)) if len(vals) > 17: # If we see more than 17 columns, we will just cut off the columns after column 17 self.report.warning(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "", msg="There were more than 17 columns in this line. Proceeding by cutting off extra columns after column 17.", rule=1) vals = vals[:17] if len(vals) != 17: self.report.error(line, assocparser.Report.WRONG_NUMBER_OF_COLUMNS, "", msg="There were {columns} columns found in this line, and there should be 15 (for GAF v1) or 17 (for GAF v2)".format(columns=len(vals)), rule=1) return assocparser.ParseResult(line, [], True) [db, db_object_id, db_object_symbol, qualifier, goid, reference, evidence, withfrom, aspect, db_object_name, db_object_synonym, db_object_type, taxon, date, assigned_by, annotation_xp, gene_product_isoform] = vals split_line = assocparser.SplitLine(line=line, values=vals, taxon=taxon) ## check for missing columns if db == "": self.report.error(line, Report.INVALID_IDSPACE, "EMPTY", "col1 is empty", taxon=taxon, rule=1) return assocparser.ParseResult(line, [], True) if db_object_id == "": self.report.error(line, Report.INVALID_ID, "EMPTY", "col2 is empty", taxon=taxon, rule=1) return assocparser.ParseResult(line, [], True) if taxon == "": self.report.error(line, Report.INVALID_TAXON, "EMPTY", "taxon column is empty", taxon=taxon, rule=1) return assocparser.ParseResult(line, [], True) if reference == "": self.report.error(line, Report.INVALID_ID, "EMPTY", "reference column 6 is empty", taxon=taxon, rule=1) return assocparser.ParseResult(line, [], True) if self.config.group_idspace is not None and assigned_by not in self.config.group_idspace: self.report.warning(line, Report.INVALID_ID, assigned_by, "GORULE:0000027: assigned_by is not present in groups reference", taxon=taxon, rule=27) if self.config.entity_idspaces is not None and db not in self.config.entity_idspaces: # Are we a synonym? upgrade = self.config.entity_idspaces.reverse(db) if upgrade is not None: # If we found a synonym self.report.warning(line, Report.INVALID_ID_DBXREF, db, "GORULE:0000027: {} is a synonym for the correct ID {}, and has been updated".format(db, upgrade), taxon=taxon, rule=27) db = upgrade ## -- ## db + db_object_id. CARD=1 ## -- id = self._pair_to_id(db, db_object_id) if not self._validate_id(id, split_line, allowed_ids=self.config.entity_idspaces): return assocparser.ParseResult(line, [], True) # Using a given gpi file to validate the gene object if self.gpi is not None: entity = self.gpi.get(id, None) if entity is not None: db_object_symbol = entity["symbol"] db_object_name = entity["name"] db_object_synonym = entity["synonyms"] db_object_type = entity["type"] if not self._validate_id(goid, split_line, context=ANNOTATION): print("skipping because {} not validated!".format(goid)) return assocparser.ParseResult(line, [], True) valid_goid = self._validate_ontology_class_id(goid, split_line) if valid_goid == None: return assocparser.ParseResult(line, [], True) goid = valid_goid date = self._normalize_gaf_date(date, split_line) if date == None: return assocparser.ParseResult(line, [], True) vals[13] = date ecomap = self.config.ecomap if ecomap is not None: if ecomap.coderef_to_ecoclass(evidence, reference) is None: self.report.error(line, assocparser.Report.UNKNOWN_EVIDENCE_CLASS, evidence, msg="Expecting a known ECO GAF code, e.g ISS", rule=1) return assocparser.ParseResult(line, [], True) # Throw out the line if it uses GO_REF:0000033, see https://github.com/geneontology/go-site/issues/563#event-1519351033 if "GO_REF:0000033" in reference.split("|"): self.report.error(line, assocparser.Report.INVALID_ID, reference, msg="Disallowing GO_REF:0000033 in reference field as of 03/13/2018", rule=30) return assocparser.ParseResult(line, [], True) references = self.validate_pipe_separated_ids(reference, split_line) if references == None: # Reporting occurs in above function call return assocparser.ParseResult(line, [], True) # With/From withfroms = self.validate_pipe_separated_ids(withfrom, split_line, empty_allowed=True, extra_delims=",") if withfroms == None: # Reporting occurs in above function call return assocparser.ParseResult(line, [], True) # validation self._validate_symbol(db_object_symbol, split_line) # Example use case: mapping from UniProtKB to MOD ID if self.config.entity_map is not None: id = self.map_id(id, self.config.entity_map) toks = id.split(":") db = toks[0] db_object_id = toks[1:] vals[1] = db_object_id if goid.startswith("GO:") and aspect.upper() not in ["C", "F", "P"]: self.report.error(line, assocparser.Report.INVALID_ASPECT, aspect, rule=28) return assocparser.ParseResult(line, [], True) go_rule_results = qc.test_go_rules(vals, self.config) for rule_id, result in go_rule_results.items(): if result.result_type == qc.ResultType.WARNING: self.report.warning(line, assocparser.Report.VIOLATES_GO_RULE, goid, msg="{id}: {message}".format(id=rule_id, message=result.message), rule=int(rule_id.split(":")[1])) # Skip the annotation return assocparser.ParseResult(line, [], True) if result.result_type == qc.ResultType.ERROR: self.report.error(line, assocparser.Report.VIOLATES_GO_RULE, goid, msg="{id}: {message}".format(id=rule_id, message=result.message), rule=int(rule_id.split(":")[1])) # Skip the annotation return assocparser.ParseResult(line, [], True) ## -- ## end of line re-processing ## -- # regenerate line post-mapping line = "\t".join(vals) ## -- ## taxon CARD={1,2} ## -- ## if a second value is specified, this is the interacting taxon ## We do not use the second value taxons = taxon.split("|") normalized_taxon = self._taxon_id(taxons[0], split_line) if normalized_taxon == None: self.report.error(line, assocparser.Report.INVALID_TAXON, taxon, msg="Taxon ID is invalid") return assocparser.ParseResult(line, [], True) self._validate_taxon(normalized_taxon, split_line) interacting_taxon = None if len(taxons) == 2: interacting_taxon = self._taxon_id(taxons[1], split_line) if interacting_taxon == None: self.report.error(line, assocparser.Report.INVALID_TAXON, taxon, msg="Taxon ID is invalid") return assocparser.ParseResult(line, [], True) ## -- ## db_object_synonym CARD=0..* ## -- synonyms = db_object_synonym.split("|") if db_object_synonym == "": synonyms = [] ## -- ## parse annotation extension ## See appendix in http://doi.org/10.1186/1471-2105-15-155 ## -- object_or_exprs = self._parse_full_extension_expression(annotation_xp, line=split_line) ## -- ## qualifier ## -- negated, relation, other_qualifiers = self._parse_qualifier(qualifier, aspect) ## -- ## goid ## -- # TODO We shouldn't overload buildin keywords/functions object = {'id': goid, 'taxon': normalized_taxon} # construct subject dict subject = { 'id': id, 'label': db_object_symbol, 'type': db_object_type, 'fullname': db_object_name, 'synonyms': synonyms, 'taxon': { 'id': normalized_taxon } } ## -- ## gene_product_isoform ## -- ## This is mapped to a more generic concept of subject_extensions subject_extns = [] if gene_product_isoform is not None and gene_product_isoform != '': subject_extns.append({'property': 'isoform', 'filler': gene_product_isoform}) object_extensions = {} if object_or_exprs is not None and len(object_or_exprs) > 0: object_extensions['union_of'] = object_or_exprs ## -- ## evidence ## reference ## withfrom ## -- evidence_obj = { 'type': evidence, 'has_supporting_reference': references, 'with_support_from': withfroms } ## Construct main return dict assoc = { 'source_line': line, 'subject': subject, 'object': object, 'negated': negated, 'qualifiers': other_qualifiers, 'aspect': aspect, 'relation': { 'id': relation }, 'interacting_taxon': interacting_taxon, 'evidence': evidence_obj, 'provided_by': assigned_by, 'date': date, 'subject_extensions': subject_extns, 'object_extensions': object_extensions } return assocparser.ParseResult(line, [assoc], False, evidence.upper())
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "# Returns assocparser.ParseResult", "parsed", "=", "super", "(", ")", ".", "validate_line", "(", "line", ")", "if", "parsed", ":", "return", "parsed", "if", "self", ".", "is_header", "(", "line", ")...
Parses a single line of a GAF Return a tuple `(processed_line, associations)`. Typically there will be a single association, but in some cases there may be none (invalid line) or multiple (disjunctive clause in annotation extensions) Note: most applications will only need to call this directly if they require fine-grained control of parsing. For most purposes, :method:`parse_file` can be used over the whole file Arguments --------- line : str A single tab-seperated line from a GAF file
[ "Parses", "a", "single", "line", "of", "a", "GAF" ]
python
train
cdgriffith/Reusables
reusables/file_operations.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L796-L814
def safe_filename(filename, replacement="_"): """ Replace unsafe filename characters with underscores. Note that this does not test for "legal" names accepted, but a more restricted set of: Letters, numbers, spaces, hyphens, underscores and periods. :param filename: name of a file as a string :param replacement: character to use as a replacement of bad characters :return: safe filename string """ if not isinstance(filename, str): raise TypeError("filename must be a string") if regex.path.linux.filename.search(filename): return filename safe_name = "" for char in filename: safe_name += char if regex.path.linux.filename.search(char) \ else replacement return safe_name
[ "def", "safe_filename", "(", "filename", ",", "replacement", "=", "\"_\"", ")", ":", "if", "not", "isinstance", "(", "filename", ",", "str", ")", ":", "raise", "TypeError", "(", "\"filename must be a string\"", ")", "if", "regex", ".", "path", ".", "linux", ...
Replace unsafe filename characters with underscores. Note that this does not test for "legal" names accepted, but a more restricted set of: Letters, numbers, spaces, hyphens, underscores and periods. :param filename: name of a file as a string :param replacement: character to use as a replacement of bad characters :return: safe filename string
[ "Replace", "unsafe", "filename", "characters", "with", "underscores", ".", "Note", "that", "this", "does", "not", "test", "for", "legal", "names", "accepted", "but", "a", "more", "restricted", "set", "of", ":", "Letters", "numbers", "spaces", "hyphens", "under...
python
train
openstack/proliantutils
proliantutils/redfish/resources/system/storage/array_controller.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/array_controller.py#L74-L80
def logical_drives_maximum_size_mib(self): """Gets the biggest logical drive :returns the size in MiB. """ return utils.max_safe([member.logical_drives.maximum_size_mib for member in self.get_members()])
[ "def", "logical_drives_maximum_size_mib", "(", "self", ")", ":", "return", "utils", ".", "max_safe", "(", "[", "member", ".", "logical_drives", ".", "maximum_size_mib", "for", "member", "in", "self", ".", "get_members", "(", ")", "]", ")" ]
Gets the biggest logical drive :returns the size in MiB.
[ "Gets", "the", "biggest", "logical", "drive" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/tasks.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/tasks.py#L3209-L3217
def gsr_path(self): """Absolute path of the GSR file. Empty string if file is not present.""" # Lazy property to avoid multiple calls to has_abiext. try: return self._gsr_path except AttributeError: path = self.outdir.has_abiext("GSR") if path: self._gsr_path = path return path
[ "def", "gsr_path", "(", "self", ")", ":", "# Lazy property to avoid multiple calls to has_abiext.", "try", ":", "return", "self", ".", "_gsr_path", "except", "AttributeError", ":", "path", "=", "self", ".", "outdir", ".", "has_abiext", "(", "\"GSR\"", ")", "if", ...
Absolute path of the GSR file. Empty string if file is not present.
[ "Absolute", "path", "of", "the", "GSR", "file", ".", "Empty", "string", "if", "file", "is", "not", "present", "." ]
python
train
Julius2342/pyvlx
old_api/pyvlx/interface.py
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/interface.py#L98-L105
def evaluate_response(json_response): """Evaluate rest response.""" if 'errors' in json_response and json_response['errors']: Interface.evaluate_errors(json_response) elif 'result' not in json_response: raise PyVLXException('no element result found in response: {0}'.format(json.dumps(json_response))) elif not json_response['result']: raise PyVLXException('Request failed {0}'.format(json.dumps(json_response)))
[ "def", "evaluate_response", "(", "json_response", ")", ":", "if", "'errors'", "in", "json_response", "and", "json_response", "[", "'errors'", "]", ":", "Interface", ".", "evaluate_errors", "(", "json_response", ")", "elif", "'result'", "not", "in", "json_response"...
Evaluate rest response.
[ "Evaluate", "rest", "response", "." ]
python
train
evolbioinfo/pastml
pastml/acr.py
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/acr.py#L137-L197
def reconstruct_ancestral_states(tree, character, states, prediction_method=MPPA, model=F81, params=None, avg_br_len=None, num_nodes=None, num_tips=None, force_joint=True): """ Reconstructs ancestral states for the given character on the given tree. :param character: character whose ancestral states are to be reconstructed. :type character: str :param tree: tree whose ancestral state are to be reconstructed, annotated with the feature specified as `character` containing node states when known. :type tree: ete3.Tree :param states: possible character states. :type states: numpy.array :param avg_br_len: (optional) average non-zero branch length for this tree. If not specified, will be calculated. :type avg_br_len: float :param model: (optional, default is F81) state evolution model to be used by PASTML. :type model: str :param prediction_method: (optional, default is MPPA) ancestral state prediction method to be used by PASTML. :type prediction_method: str :param num_nodes: (optional) total number of nodes in the given tree (including tips). If not specified, will be calculated. :type num_nodes: int :param num_tips: (optional) total number of tips in the given tree. If not specified, will be calculated. :type num_tips: int :param params: an optional way to fix some parameters, must be in a form {param: value}, where param can be a state (then the value should specify its frequency between 0 and 1), or "scaling factor" (then the value should be the scaling factor for three branches, e.g. set to 1 to keep the original branches). Could also be in a form path_to_param_file. Only makes sense for ML methods. :type params: dict or str :return: ACR result dictionary whose values depend on the prediction method. :rtype: dict """ logging.getLogger('pastml').debug('ACR settings for {}:\n\tMethod:\t{}{}.' .format(character, prediction_method, '\n\tModel:\t{}'.format(model) if model and is_ml(prediction_method) else '')) if COPY == prediction_method: return {CHARACTER: character, STATES: states, METHOD: prediction_method} if not num_nodes: num_nodes = sum(1 for _ in tree.traverse()) if not num_tips: num_tips = len(tree) if is_ml(prediction_method): if avg_br_len is None: avg_br_len = np.mean(n.dist for n in tree.traverse() if n.dist) freqs, sf, kappa = None, None, None if params is not None: freqs, sf, kappa = _parse_pastml_parameters(params, states) return ml_acr(tree=tree, character=character, prediction_method=prediction_method, model=model, states=states, avg_br_len=avg_br_len, num_nodes=num_nodes, num_tips=num_tips, freqs=freqs, sf=sf, kappa=kappa, force_joint=force_joint) if is_parsimonious(prediction_method): return parsimonious_acr(tree, character, prediction_method, states, num_nodes, num_tips) raise ValueError('Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}' .format(prediction_method, ', '.join(ML_METHODS), ', '.join(MP_METHODS), COPY))
[ "def", "reconstruct_ancestral_states", "(", "tree", ",", "character", ",", "states", ",", "prediction_method", "=", "MPPA", ",", "model", "=", "F81", ",", "params", "=", "None", ",", "avg_br_len", "=", "None", ",", "num_nodes", "=", "None", ",", "num_tips", ...
Reconstructs ancestral states for the given character on the given tree. :param character: character whose ancestral states are to be reconstructed. :type character: str :param tree: tree whose ancestral state are to be reconstructed, annotated with the feature specified as `character` containing node states when known. :type tree: ete3.Tree :param states: possible character states. :type states: numpy.array :param avg_br_len: (optional) average non-zero branch length for this tree. If not specified, will be calculated. :type avg_br_len: float :param model: (optional, default is F81) state evolution model to be used by PASTML. :type model: str :param prediction_method: (optional, default is MPPA) ancestral state prediction method to be used by PASTML. :type prediction_method: str :param num_nodes: (optional) total number of nodes in the given tree (including tips). If not specified, will be calculated. :type num_nodes: int :param num_tips: (optional) total number of tips in the given tree. If not specified, will be calculated. :type num_tips: int :param params: an optional way to fix some parameters, must be in a form {param: value}, where param can be a state (then the value should specify its frequency between 0 and 1), or "scaling factor" (then the value should be the scaling factor for three branches, e.g. set to 1 to keep the original branches). Could also be in a form path_to_param_file. Only makes sense for ML methods. :type params: dict or str :return: ACR result dictionary whose values depend on the prediction method. :rtype: dict
[ "Reconstructs", "ancestral", "states", "for", "the", "given", "character", "on", "the", "given", "tree", "." ]
python
train
hydpy-dev/hydpy
hydpy/models/lland/lland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_model.py#L1414-L1520
def calc_qdgz1_qdgz2_v1(self): """Seperate total direct flow into a small and a fast component. Required control parameters: |A1| |A2| Required flux sequence: |QDGZ| Calculated state sequences: |QDGZ1| |QDGZ2| Basic equation: :math:`QDGZ2 = \\frac{(QDGZ-A2)^2}{QDGZ+A1-A2}` :math:`QDGZ1 = QDGZ - QDGZ1` Examples: The formula for calculating the amount of the fast component of direct flow is borrowed from the famous curve number approach. Parameter |A2| would be the initial loss and parameter |A1| the maximum storage, but one should not take this analogy too serious. Instead, with the value of parameter |A1| set to zero, parameter |A2| just defines the maximum amount of "slow" direct runoff per time step: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> a1(0.0) Let us set the value of |A2| to 4 mm/d, which is 2 mm/12h with respect to the selected simulation step size: >>> a2(4.0) >>> a2 a2(4.0) >>> a2.value 2.0 Define a test function and let it calculate |QDGZ1| and |QDGZ1| for values of |QDGZ| ranging from -10 to 100 mm/12h: >>> from hydpy import UnitTest >>> test = UnitTest(model, ... model.calc_qdgz1_qdgz2_v1, ... last_example=6, ... parseqs=(fluxes.qdgz, ... states.qdgz1, ... states.qdgz2)) >>> test.nexts.qdgz = -10.0, 0.0, 1.0, 2.0, 3.0, 100.0 >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 2.0 | 0.0 | | 5 | 3.0 | 2.0 | 1.0 | | 6 | 100.0 | 2.0 | 98.0 | Setting |A2| to zero and |A1| to 4 mm/d (or 2 mm/12h) results in a smoother transition: >>> a2(0.0) >>> a1(4.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | -------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 0.666667 | 0.333333 | | 4 | 2.0 | 1.0 | 1.0 | | 5 | 3.0 | 1.2 | 1.8 | | 6 | 100.0 | 1.960784 | 98.039216 | Alternatively, one can mix these two configurations by setting the values of both parameters to 2 mm/h: >>> a2(2.0) >>> a1(2.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 1.5 | 0.5 | | 5 | 3.0 | 1.666667 | 1.333333 | | 6 | 100.0 | 1.99 | 98.01 | Note the similarity of the results for very high values of total direct flow |QDGZ| in all three examples, which converge to the sum of the values of parameter |A1| and |A2|, representing the maximum value of `slow` direct flow generation per simulation step """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if flu.qdgz > con.a2: sta.qdgz2 = (flu.qdgz-con.a2)**2/(flu.qdgz+con.a1-con.a2) sta.qdgz1 = flu.qdgz-sta.qdgz2 else: sta.qdgz2 = 0. sta.qdgz1 = flu.qdgz
[ "def", "calc_qdgz1_qdgz2_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "sta", "=", "self", ".", "sequences", ".", "states", "....
Seperate total direct flow into a small and a fast component. Required control parameters: |A1| |A2| Required flux sequence: |QDGZ| Calculated state sequences: |QDGZ1| |QDGZ2| Basic equation: :math:`QDGZ2 = \\frac{(QDGZ-A2)^2}{QDGZ+A1-A2}` :math:`QDGZ1 = QDGZ - QDGZ1` Examples: The formula for calculating the amount of the fast component of direct flow is borrowed from the famous curve number approach. Parameter |A2| would be the initial loss and parameter |A1| the maximum storage, but one should not take this analogy too serious. Instead, with the value of parameter |A1| set to zero, parameter |A2| just defines the maximum amount of "slow" direct runoff per time step: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> a1(0.0) Let us set the value of |A2| to 4 mm/d, which is 2 mm/12h with respect to the selected simulation step size: >>> a2(4.0) >>> a2 a2(4.0) >>> a2.value 2.0 Define a test function and let it calculate |QDGZ1| and |QDGZ1| for values of |QDGZ| ranging from -10 to 100 mm/12h: >>> from hydpy import UnitTest >>> test = UnitTest(model, ... model.calc_qdgz1_qdgz2_v1, ... last_example=6, ... parseqs=(fluxes.qdgz, ... states.qdgz1, ... states.qdgz2)) >>> test.nexts.qdgz = -10.0, 0.0, 1.0, 2.0, 3.0, 100.0 >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 2.0 | 0.0 | | 5 | 3.0 | 2.0 | 1.0 | | 6 | 100.0 | 2.0 | 98.0 | Setting |A2| to zero and |A1| to 4 mm/d (or 2 mm/12h) results in a smoother transition: >>> a2(0.0) >>> a1(4.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | -------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 0.666667 | 0.333333 | | 4 | 2.0 | 1.0 | 1.0 | | 5 | 3.0 | 1.2 | 1.8 | | 6 | 100.0 | 1.960784 | 98.039216 | Alternatively, one can mix these two configurations by setting the values of both parameters to 2 mm/h: >>> a2(2.0) >>> a1(2.0) >>> test() | ex. | qdgz | qdgz1 | qdgz2 | ------------------------------------- | 1 | -10.0 | -10.0 | 0.0 | | 2 | 0.0 | 0.0 | 0.0 | | 3 | 1.0 | 1.0 | 0.0 | | 4 | 2.0 | 1.5 | 0.5 | | 5 | 3.0 | 1.666667 | 1.333333 | | 6 | 100.0 | 1.99 | 98.01 | Note the similarity of the results for very high values of total direct flow |QDGZ| in all three examples, which converge to the sum of the values of parameter |A1| and |A2|, representing the maximum value of `slow` direct flow generation per simulation step
[ "Seperate", "total", "direct", "flow", "into", "a", "small", "and", "a", "fast", "component", "." ]
python
train
vintasoftware/django-role-permissions
rolepermissions/checkers.py
https://github.com/vintasoftware/django-role-permissions/blob/28924361e689e994e0c3575e18104a1a5abd8de6/rolepermissions/checkers.py#L38-L49
def has_object_permission(checker_name, user, obj): """Check if a user has permission to perform an action on an object.""" if user and user.is_superuser: return True checker = PermissionsManager.retrieve_checker(checker_name) user_roles = get_user_roles(user) if not user_roles: user_roles = [None] return any([checker(user_role, user, obj) for user_role in user_roles])
[ "def", "has_object_permission", "(", "checker_name", ",", "user", ",", "obj", ")", ":", "if", "user", "and", "user", ".", "is_superuser", ":", "return", "True", "checker", "=", "PermissionsManager", ".", "retrieve_checker", "(", "checker_name", ")", "user_roles"...
Check if a user has permission to perform an action on an object.
[ "Check", "if", "a", "user", "has", "permission", "to", "perform", "an", "action", "on", "an", "object", "." ]
python
train
PrefPy/prefpy
prefpy/mechanism.py
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1096-L1155
def coombstoc_winners(self, profile): """ Returns an integer list that represents all possible winners of a profile under Coombs rule. :ivar Profile profile: A Profile object that represents an election profile. """ ordering = profile.getOrderVectors() m = profile.numCands prefcounts = profile.getPreferenceCounts() rankmaps = profile.getRankMaps() if min(ordering[0]) == 0: startstate = set(range(m)) else: startstate = set(range(1, m + 1)) known_winners = set() # half = math.floor(n / 2.0) # ----------Some statistics-------------- hashtable2 = set() # push the node of start state into the priority queue root = Node(value=startstate) stackNode = [] stackNode.append(root) while stackNode: # ------------pop the current node---------------- node = stackNode.pop() # ------------------------------------------------- state = node.value.copy() # use heuristic to delete all the candidates which satisfy the following condition # goal state 1: if the state set contains only 1 candidate, then stop if len(state) == 1 and list(state)[0] not in known_winners: known_winners.add(list(state)[0]) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners: continue # ----------Compute plurality score for the current remaining candidates------------- reverse_veto_score = self.get_reverse_veto_scores2(prefcounts, rankmaps, state) # print("reverse_veto_score = ",reverse_veto_score) # if current state satisfies one of the 3 goal state, continue to the next loop # After using heuristics, generate children and push them into priority queue # frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners)) maxscore = max(reverse_veto_score.values()) for to_be_deleted in state: if reverse_veto_score[to_be_deleted] == maxscore: child_state = state.copy() child_state.remove(to_be_deleted) tpc = tuple(sorted(child_state)) if tpc in hashtable2: continue else: hashtable2.add(tpc) child_node = Node(value=child_state) stackNode.append(child_node) return sorted(known_winners)
[ "def", "coombstoc_winners", "(", "self", ",", "profile", ")", ":", "ordering", "=", "profile", ".", "getOrderVectors", "(", ")", "m", "=", "profile", ".", "numCands", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "rankmaps", "=", "profi...
Returns an integer list that represents all possible winners of a profile under Coombs rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "an", "integer", "list", "that", "represents", "all", "possible", "winners", "of", "a", "profile", "under", "Coombs", "rule", "." ]
python
train
django-blog-zinnia/cmsplugin-zinnia
cmsplugin_zinnia/menu.py
https://github.com/django-blog-zinnia/cmsplugin-zinnia/blob/7613c0d9ae29affe9ab97527e4b6d5bef124afdc/cmsplugin_zinnia/menu.py#L26-L67
def get_nodes(self, request): """ Return menu's node for entries """ nodes = [] archives = [] attributes = {'hidden': HIDE_ENTRY_MENU} for entry in Entry.published.all(): year = entry.creation_date.strftime('%Y') month = entry.creation_date.strftime('%m') month_text = format(entry.creation_date, 'b').capitalize() day = entry.creation_date.strftime('%d') key_archive_year = 'year-%s' % year key_archive_month = 'month-%s-%s' % (year, month) key_archive_day = 'day-%s-%s-%s' % (year, month, day) if key_archive_year not in archives: nodes.append(NavigationNode( year, reverse('zinnia:entry_archive_year', args=[year]), key_archive_year, attr=attributes)) archives.append(key_archive_year) if key_archive_month not in archives: nodes.append(NavigationNode( month_text, reverse('zinnia:entry_archive_month', args=[year, month]), key_archive_month, key_archive_year, attr=attributes)) archives.append(key_archive_month) if key_archive_day not in archives: nodes.append(NavigationNode( day, reverse('zinnia:entry_archive_day', args=[year, month, day]), key_archive_day, key_archive_month, attr=attributes)) archives.append(key_archive_day) nodes.append(NavigationNode(entry.title, entry.get_absolute_url(), entry.pk, key_archive_day)) return nodes
[ "def", "get_nodes", "(", "self", ",", "request", ")", ":", "nodes", "=", "[", "]", "archives", "=", "[", "]", "attributes", "=", "{", "'hidden'", ":", "HIDE_ENTRY_MENU", "}", "for", "entry", "in", "Entry", ".", "published", ".", "all", "(", ")", ":",...
Return menu's node for entries
[ "Return", "menu", "s", "node", "for", "entries" ]
python
train
saltstack/salt
salt/grains/core.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/core.py#L2403-L2410
def saltpath(): ''' Return the path of the salt module ''' # Provides: # saltpath salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir)) return {'saltpath': os.path.dirname(salt_path)}
[ "def", "saltpath", "(", ")", ":", "# Provides:", "# saltpath", "salt_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "__file__", ",", "os", ".", "path", ".", "pardir", ")", ")", "return", "{", "'saltpath'", ...
Return the path of the salt module
[ "Return", "the", "path", "of", "the", "salt", "module" ]
python
train
Kozea/cairocffi
cairocffi/pixbuf.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/pixbuf.py#L140-L187
def pixbuf_to_cairo_slices(pixbuf): """Convert from PixBuf to ImageSurface, using slice-based byte swapping. This method is 2~5x slower than GDK but does not support an alpha channel. (cairo uses pre-multiplied alpha, but not Pixbuf.) """ assert pixbuf.get_colorspace() == gdk_pixbuf.GDK_COLORSPACE_RGB assert pixbuf.get_n_channels() == 3 assert pixbuf.get_bits_per_sample() == 8 width = pixbuf.get_width() height = pixbuf.get_height() rowstride = pixbuf.get_rowstride() pixels = ffi.buffer(pixbuf.get_pixels(), pixbuf.get_byte_length()) # TODO: remove this when cffi buffers support slicing with a stride. pixels = pixels[:] # Convert GdkPixbuf’s big-endian RGBA to cairo’s native-endian ARGB cairo_stride = ImageSurface.format_stride_for_width( constants.FORMAT_RGB24, width) data = bytearray(cairo_stride * height) big_endian = sys.byteorder == 'big' pixbuf_row_length = width * 3 # stride == row_length + padding cairo_row_length = width * 4 # stride == row_length + padding alpha = b'\xff' * width # opaque for y in range(height): offset = rowstride * y end = offset + pixbuf_row_length red = pixels[offset:end:3] green = pixels[offset + 1:end:3] blue = pixels[offset + 2:end:3] offset = cairo_stride * y end = offset + cairo_row_length if big_endian: # pragma: no cover data[offset:end:4] = alpha data[offset + 1:end:4] = red data[offset + 2:end:4] = green data[offset + 3:end:4] = blue else: data[offset + 3:end:4] = alpha data[offset + 2:end:4] = red data[offset + 1:end:4] = green data[offset:end:4] = blue data = array('B', data) return ImageSurface(constants.FORMAT_RGB24, width, height, data, cairo_stride)
[ "def", "pixbuf_to_cairo_slices", "(", "pixbuf", ")", ":", "assert", "pixbuf", ".", "get_colorspace", "(", ")", "==", "gdk_pixbuf", ".", "GDK_COLORSPACE_RGB", "assert", "pixbuf", ".", "get_n_channels", "(", ")", "==", "3", "assert", "pixbuf", ".", "get_bits_per_s...
Convert from PixBuf to ImageSurface, using slice-based byte swapping. This method is 2~5x slower than GDK but does not support an alpha channel. (cairo uses pre-multiplied alpha, but not Pixbuf.)
[ "Convert", "from", "PixBuf", "to", "ImageSurface", "using", "slice", "-", "based", "byte", "swapping", "." ]
python
train
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L572-L582
def remove_invalid(self): """ Remove entities which declare themselves invalid Alters ---------- self.entities: shortened """ valid = np.array([i.is_valid for i in self.entities], dtype=np.bool) self.entities = self.entities[valid]
[ "def", "remove_invalid", "(", "self", ")", ":", "valid", "=", "np", ".", "array", "(", "[", "i", ".", "is_valid", "for", "i", "in", "self", ".", "entities", "]", ",", "dtype", "=", "np", ".", "bool", ")", "self", ".", "entities", "=", "self", "."...
Remove entities which declare themselves invalid Alters ---------- self.entities: shortened
[ "Remove", "entities", "which", "declare", "themselves", "invalid" ]
python
train
PyPSA/PyPSA
pypsa/components.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/components.py#L378-L406
def set_snapshots(self,snapshots): """ Set the snapshots and reindex all time-dependent data. This will reindex all pandas.Panels of time-dependent data; NaNs are filled with the default value for that quantity. Parameters ---------- snapshots : list or pandas.Index All time steps. Returns ------- None """ self.snapshots = pd.Index(snapshots) self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.) if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0': snapshots = pd.Index(snapshots.values) for component in self.all_components: pnl = self.pnl(component) attrs = self.components[component]["attrs"] for k,default in attrs.default[attrs.varying].iteritems(): pnl[k] = pnl[k].reindex(self.snapshots).fillna(default)
[ "def", "set_snapshots", "(", "self", ",", "snapshots", ")", ":", "self", ".", "snapshots", "=", "pd", ".", "Index", "(", "snapshots", ")", "self", ".", "snapshot_weightings", "=", "self", ".", "snapshot_weightings", ".", "reindex", "(", "self", ".", "snaps...
Set the snapshots and reindex all time-dependent data. This will reindex all pandas.Panels of time-dependent data; NaNs are filled with the default value for that quantity. Parameters ---------- snapshots : list or pandas.Index All time steps. Returns ------- None
[ "Set", "the", "snapshots", "and", "reindex", "all", "time", "-", "dependent", "data", "." ]
python
train
apache/incubator-mxnet
python/mxnet/autograd.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L70-L79
def is_recording(): """Get status on recording/not recording. Returns ------- Current state of recording. """ curr = ctypes.c_bool() check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr))) return curr.value
[ "def", "is_recording", "(", ")", ":", "curr", "=", "ctypes", ".", "c_bool", "(", ")", "check_call", "(", "_LIB", ".", "MXAutogradIsRecording", "(", "ctypes", ".", "byref", "(", "curr", ")", ")", ")", "return", "curr", ".", "value" ]
Get status on recording/not recording. Returns ------- Current state of recording.
[ "Get", "status", "on", "recording", "/", "not", "recording", "." ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L5668-L5696
def remove_datastore(datastore, service_instance=None): ''' Removes a datastore. If multiple datastores an error is raised. datastore Datastore name service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.remove_datastore ds_name ''' log.trace('Removing datastore \'%s\'', datastore) target = _get_proxy_target(service_instance) datastores = salt.utils.vmware.get_datastores( service_instance, reference=target, datastore_names=[datastore]) if not datastores: raise VMwareObjectRetrievalError( 'Datastore \'{0}\' was not found'.format(datastore)) if len(datastores) > 1: raise VMwareObjectRetrievalError( 'Multiple datastores \'{0}\' were found'.format(datastore)) salt.utils.vmware.remove_datastore(service_instance, datastores[0]) return True
[ "def", "remove_datastore", "(", "datastore", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Removing datastore \\'%s\\''", ",", "datastore", ")", "target", "=", "_get_proxy_target", "(", "service_instance", ")", "datastores", "=", "sal...
Removes a datastore. If multiple datastores an error is raised. datastore Datastore name service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.remove_datastore ds_name
[ "Removes", "a", "datastore", ".", "If", "multiple", "datastores", "an", "error", "is", "raised", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L2534-L2551
def save_repo_cache(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ r=save_json_to_file({'json_file':work['dir_cache_repo_uoa'], 'dict':cache_repo_uoa}) if r['return']>0: return r r=save_json_to_file({'json_file':work['dir_cache_repo_info'], 'dict':cache_repo_info}) if r['return']>0: return r return {'return':0}
[ "def", "save_repo_cache", "(", "i", ")", ":", "r", "=", "save_json_to_file", "(", "{", "'json_file'", ":", "work", "[", "'dir_cache_repo_uoa'", "]", ",", "'dict'", ":", "cache_repo_uoa", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return",...
Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
[ "Input", ":", "{}" ]
python
train
roboogle/gtkmvc3
gtkmvco/gtkmvc3/adapters/containers.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/adapters/containers.py#L242-L246
def _on_prop_changed(self, instance, meth_name, res, args, kwargs): """Called by the observation code, we are interested in __setitem__""" if not self._itsme and meth_name == "__setitem__": self.update_widget(args[0]) return
[ "def", "_on_prop_changed", "(", "self", ",", "instance", ",", "meth_name", ",", "res", ",", "args", ",", "kwargs", ")", ":", "if", "not", "self", ".", "_itsme", "and", "meth_name", "==", "\"__setitem__\"", ":", "self", ".", "update_widget", "(", "args", ...
Called by the observation code, we are interested in __setitem__
[ "Called", "by", "the", "observation", "code", "we", "are", "interested", "in", "__setitem__" ]
python
train
datastax/python-driver
cassandra/cluster.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L1228-L1251
def set_core_connections_per_host(self, host_distance, core_connections): """ Sets the minimum number of connections per Session that will be opened for each host with :class:`~.HostDistance` equal to `host_distance`. The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for :attr:`~HostDistance.REMOTE`. Protocol version 1 and 2 are limited in the number of concurrent requests they can send per connection. The driver implements connection pooling to support higher levels of concurrency. If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this is not supported (there is always one connection per host, unless the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) and using this will result in an :exc:`~.UnsupporteOperation`. """ if self.protocol_version >= 3: raise UnsupportedOperation( "Cluster.set_core_connections_per_host() only has an effect " "when using protocol_version 1 or 2.") old = self._core_connections_per_host[host_distance] self._core_connections_per_host[host_distance] = core_connections if old < core_connections: self._ensure_core_connections()
[ "def", "set_core_connections_per_host", "(", "self", ",", "host_distance", ",", "core_connections", ")", ":", "if", "self", ".", "protocol_version", ">=", "3", ":", "raise", "UnsupportedOperation", "(", "\"Cluster.set_core_connections_per_host() only has an effect \"", "\"w...
Sets the minimum number of connections per Session that will be opened for each host with :class:`~.HostDistance` equal to `host_distance`. The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for :attr:`~HostDistance.REMOTE`. Protocol version 1 and 2 are limited in the number of concurrent requests they can send per connection. The driver implements connection pooling to support higher levels of concurrency. If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this is not supported (there is always one connection per host, unless the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`) and using this will result in an :exc:`~.UnsupporteOperation`.
[ "Sets", "the", "minimum", "number", "of", "connections", "per", "Session", "that", "will", "be", "opened", "for", "each", "host", "with", ":", "class", ":", "~", ".", "HostDistance", "equal", "to", "host_distance", ".", "The", "default", "is", "2", "for", ...
python
train
inspirehep/refextract
refextract/references/engine.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L1364-L1375
def remove_leading_garbage_lines_from_reference_section(ref_sectn): """Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses. """ p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE) while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])): ref_sectn.pop(0) return ref_sectn
[ "def", "remove_leading_garbage_lines_from_reference_section", "(", "ref_sectn", ")", ":", "p_email", "=", "re", ".", "compile", "(", "ur'^\\s*e\\-?mail'", ",", "re", ".", "UNICODE", ")", "while", "ref_sectn", "and", "(", "ref_sectn", "[", "0", "]", ".", "isspace...
Sometimes, the first lines of the extracted references are completely blank or email addresses. These must be removed as they are not references. @param ref_sectn: (list) of strings - the reference section lines @return: (list) of strings - the reference section without leading blank lines or email addresses.
[ "Sometimes", "the", "first", "lines", "of", "the", "extracted", "references", "are", "completely", "blank", "or", "email", "addresses", ".", "These", "must", "be", "removed", "as", "they", "are", "not", "references", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/pages/api/tasks.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/api/tasks.py#L30-L103
def API_GET(self, courseid, taskid=None): # pylint: disable=arguments-differ """ List tasks available to the connected client. Returns a dict in the form :: { "taskid1": { "name": "Name of the course", #the name of the course "authors": [], "deadline": "", "status": "success" # can be "succeeded", "failed" or "notattempted" "grade": 0.0, "grade_weight": 0.0, "context": "" # context of the task, in RST "problems": # dict of the subproblems { # see the format of task.yaml for the content of the dict. Contains everything but # responses of multiple-choice and match problems. } } #... } If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id, this dict will contain one entry or the page will return 404 Not Found. """ try: course = self.course_factory.get_course(courseid) except: raise APINotFound("Course not found") if not self.user_manager.course_is_open_to_user(course, lti=False): raise APIForbidden("You are not registered to this course") if taskid is None: tasks = course.get_tasks() else: try: tasks = {taskid: course.get_task(taskid)} except: raise APINotFound("Task not found") output = [] for taskid, task in tasks.items(): task_cache = self.user_manager.get_task_cache(self.user_manager.session_username(), task.get_course_id(), task.get_id()) data = { "id": taskid, "name": task.get_name(self.user_manager.session_language()), "authors": task.get_authors(self.user_manager.session_language()), "deadline": task.get_deadline(), "status": "notviewed" if task_cache is None else "notattempted" if task_cache["tried"] == 0 else "succeeded" if task_cache["succeeded"] else "failed", "grade": task_cache.get("grade", 0.0) if task_cache is not None else 0.0, "grade_weight": task.get_grading_weight(), "context": task.get_context(self.user_manager.session_language()).original_content(), "problems": [] } for problem in task.get_problems(): pcontent = problem.get_original_content() pcontent["id"] = problem.get_id() if pcontent["type"] == "match": del pcontent["answer"] if pcontent["type"] == "multiple_choice": pcontent["choices"] = {key: val["text"] for key, val in enumerate(pcontent["choices"])} pcontent = self._check_for_parsable_text(pcontent) data["problems"].append(pcontent) output.append(data) return 200, output
[ "def", "API_GET", "(", "self", ",", "courseid", ",", "taskid", "=", "None", ")", ":", "# pylint: disable=arguments-differ", "try", ":", "course", "=", "self", ".", "course_factory", ".", "get_course", "(", "courseid", ")", "except", ":", "raise", "APINotFound"...
List tasks available to the connected client. Returns a dict in the form :: { "taskid1": { "name": "Name of the course", #the name of the course "authors": [], "deadline": "", "status": "success" # can be "succeeded", "failed" or "notattempted" "grade": 0.0, "grade_weight": 0.0, "context": "" # context of the task, in RST "problems": # dict of the subproblems { # see the format of task.yaml for the content of the dict. Contains everything but # responses of multiple-choice and match problems. } } #... } If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id, this dict will contain one entry or the page will return 404 Not Found.
[ "List", "tasks", "available", "to", "the", "connected", "client", ".", "Returns", "a", "dict", "in", "the", "form" ]
python
train
totalgood/twip
twip/wip/build_pycon_slides.py
https://github.com/totalgood/twip/blob/5c0411d2acfbe5b421841072814c9152591c03f7/twip/wip/build_pycon_slides.py#L25-L59
def parse_args(args): """ Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace` """ parser = argparse.ArgumentParser( description="Build html reveal.js slides from markdown in docs/ dir") parser.add_argument( '-v', '--verbose', help='Whether to show progress messages on stdout, including HTML', action='store_true') parser.add_argument( '--version', help='print twip package version and exit.', action='version', version='twip {ver}'.format(ver=__version__)) parser.add_argument( '-b', '--blog_path', help='Path to source markdown files. Must contain an `images` subdir', default=BLOG_PATH) parser.add_argument( '-s', '--slide_path', help='Path to dir for output slides (HTML). An images subdir will be added. A slides subdir should already exist.', default=DOCS_PATH) parser.add_argument( '-p', '--presentation', help='Source markdown base file name (without .md extension). The HTML slides will share the same basename.', default='2015-10-27-Hacking-Oregon-Hidden-Political-Connections') return parser.parse_args(args)
[ "def", "parse_args", "(", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Build html reveal.js slides from markdown in docs/ dir\"", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "help", "="...
Parse command line parameters :param args: command line parameters as list of strings :return: command line parameters as :obj:`argparse.Namespace`
[ "Parse", "command", "line", "parameters" ]
python
train
sorgerlab/indra
indra/sources/trips/analyze_ekbs.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/trips/analyze_ekbs.py#L137-L148
def check_event_coverage(patterns, event_list): """Calculate the ratio of patterns that were extracted.""" proportions = [] for pattern_list in patterns: proportion = 0 for pattern in pattern_list: for node in pattern.nodes(): if node in event_list: proportion += 1.0 / len(pattern_list) break proportions.append(proportion) return proportions
[ "def", "check_event_coverage", "(", "patterns", ",", "event_list", ")", ":", "proportions", "=", "[", "]", "for", "pattern_list", "in", "patterns", ":", "proportion", "=", "0", "for", "pattern", "in", "pattern_list", ":", "for", "node", "in", "pattern", ".",...
Calculate the ratio of patterns that were extracted.
[ "Calculate", "the", "ratio", "of", "patterns", "that", "were", "extracted", "." ]
python
train
angr/angr
angr/analyses/ddg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/ddg.py#L1316-L1331
def _stmt_graph_add_edge(self, src, dst, **edge_labels): """ Add an edge in the statement dependence graph from a program location `src` to another program location `dst`. :param CodeLocation src: Source node. :param CodeLocation dst: Destination node. :param edge_labels: All labels associated with the edge. :returns: None """ # Is that edge already in the graph ? # If at least one is new, then we are not redoing the same path again if src in self._stmt_graph and dst in self._stmt_graph[src]: return self._stmt_graph.add_edge(src, dst, **edge_labels)
[ "def", "_stmt_graph_add_edge", "(", "self", ",", "src", ",", "dst", ",", "*", "*", "edge_labels", ")", ":", "# Is that edge already in the graph ?", "# If at least one is new, then we are not redoing the same path again", "if", "src", "in", "self", ".", "_stmt_graph", "an...
Add an edge in the statement dependence graph from a program location `src` to another program location `dst`. :param CodeLocation src: Source node. :param CodeLocation dst: Destination node. :param edge_labels: All labels associated with the edge. :returns: None
[ "Add", "an", "edge", "in", "the", "statement", "dependence", "graph", "from", "a", "program", "location", "src", "to", "another", "program", "location", "dst", "." ]
python
train
python-visualization/folium
folium/map.py
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/map.py#L148-L162
def render(self, **kwargs): """Renders the HTML representation of the element.""" for item in self._parent._children.values(): if not isinstance(item, Layer) or not item.control: continue key = item.layer_name if not item.overlay: self.base_layers[key] = item.get_name() if len(self.base_layers) > 1: self.layers_untoggle[key] = item.get_name() else: self.overlays[key] = item.get_name() if not item.show: self.layers_untoggle[key] = item.get_name() super(LayerControl, self).render()
[ "def", "render", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "item", "in", "self", ".", "_parent", ".", "_children", ".", "values", "(", ")", ":", "if", "not", "isinstance", "(", "item", ",", "Layer", ")", "or", "not", "item", ".", "co...
Renders the HTML representation of the element.
[ "Renders", "the", "HTML", "representation", "of", "the", "element", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/resnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L690-L697
def resnet_imagenet_34_td_unit_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
[ "def", "resnet_imagenet_34_td_unit_05_05", "(", ")", ":", "hp", "=", "resnet_imagenet_34", "(", ")", "hp", ".", "use_td", "=", "\"unit\"", "hp", ".", "targeting_rate", "=", "0.5", "hp", ".", "keep_prob", "=", "0.5", "return", "hp" ]
Set of hyperparameters.
[ "Set", "of", "hyperparameters", "." ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1357-L1364
def settext(self, text, cls='current'): """Set the text for this element. Arguments: text (str): The text cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be only one text content element of each class associated with the element. """ self.replace(TextContent, value=text, cls=cls)
[ "def", "settext", "(", "self", ",", "text", ",", "cls", "=", "'current'", ")", ":", "self", ".", "replace", "(", "TextContent", ",", "value", "=", "text", ",", "cls", "=", "cls", ")" ]
Set the text for this element. Arguments: text (str): The text cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be only one text content element of each class associated with the element.
[ "Set", "the", "text", "for", "this", "element", "." ]
python
train
kolypto/py-good
good/schema/compiler.py
https://github.com/kolypto/py-good/blob/192ef19e79f6fd95c1cbd7c378a3074c7ad7a6d4/good/schema/compiler.py#L359-L397
def _compile_callable(self, schema): """ Compile callable: wrap exceptions with correct paths """ # Prepare self self.compiled_type = const.COMPILED_TYPE.CALLABLE self.name = get_callable_name(schema) # Error utils enrich_exception = lambda e, value: e.enrich( expected=self.name, provided=get_literal_name(value), path=self.path, validator=schema) # Validator def validate_with_callable(v): try: # Try this callable return schema(v) except Invalid as e: # Enrich & re-raise enrich_exception(e, v) raise except const.transformed_exceptions as e: message = _(u'{message}').format( Exception=type(e).__name__, message=six.text_type(e)) e = Invalid(message) raise enrich_exception(e, v) # Matcher if self.matcher: def match_with_callable(v): try: return True, validate_with_callable(v) except Invalid: return False, v return match_with_callable return validate_with_callable
[ "def", "_compile_callable", "(", "self", ",", "schema", ")", ":", "# Prepare self", "self", ".", "compiled_type", "=", "const", ".", "COMPILED_TYPE", ".", "CALLABLE", "self", ".", "name", "=", "get_callable_name", "(", "schema", ")", "# Error utils", "enrich_exc...
Compile callable: wrap exceptions with correct paths
[ "Compile", "callable", ":", "wrap", "exceptions", "with", "correct", "paths" ]
python
train
saltzm/yadi
yadi/datalog2sql/ast2sql/sql_generator.py
https://github.com/saltzm/yadi/blob/755790167c350e650c1e8b15c6f9209a97be9e42/yadi/datalog2sql/ast2sql/sql_generator.py#L234-L264
def get_explicit_constraints(self,constraints,var_dict): ''' Returns SQL representation of an explicit constraint. An explicit constraint is one of the form Element COMP Element type, explicitly listed in the conjunctive query. E.G. R(X,Y), Y>2 specifies that R.2 > 2 ''' constraints_strings = [] for constraint in constraints: ls = constraint.get_left_side() rs = constraint.get_right_side() if not (ls.is_variable() and constraint.is_equality_constraint()): ''' Every Var = Constant constraint where var is only in head. We don't want this type of constraints. We assume that if there is a Var = Constraint, var occurs in the head. Otherwise it would be unified in the preprocessor. ''' if ls.is_variable(): left_side = self.mapVariableToRelationDotField(ls, var_dict) elif ls.is_constant(): left_side = str(ls) if rs.is_variable(): right_side = self.mapVariableToRelationDotField(rs, var_dict) elif rs.is_constant(): right_side = str(rs) constraints_strings.append( str(left_side) + ' ' + str(constraint.get_operator()) + ' ' + str(right_side) ) return constraints_strings
[ "def", "get_explicit_constraints", "(", "self", ",", "constraints", ",", "var_dict", ")", ":", "constraints_strings", "=", "[", "]", "for", "constraint", "in", "constraints", ":", "ls", "=", "constraint", ".", "get_left_side", "(", ")", "rs", "=", "constraint"...
Returns SQL representation of an explicit constraint. An explicit constraint is one of the form Element COMP Element type, explicitly listed in the conjunctive query. E.G. R(X,Y), Y>2 specifies that R.2 > 2
[ "Returns", "SQL", "representation", "of", "an", "explicit", "constraint", ".", "An", "explicit", "constraint", "is", "one", "of", "the", "form", "Element", "COMP", "Element", "type", "explicitly", "listed", "in", "the", "conjunctive", "query", ".", "E", ".", ...
python
train
5monkeys/django-bananas
bananas/admin/api/views.py
https://github.com/5monkeys/django-bananas/blob/cfd318c737f6c4580036c13d2acf32bca96654bf/bananas/admin/api/views.py#L103-L117
def create(self, request): """ Change password for logged in django staff user """ # TODO: Decorate api with sensitive post parameters as Django admin do? password_form = PasswordChangeForm(request.user, data=request.data) if not password_form.is_valid(): raise serializers.ValidationError(password_form.errors) password_form.save() update_session_auth_hash(request, password_form.user) return Response(status=status.HTTP_204_NO_CONTENT)
[ "def", "create", "(", "self", ",", "request", ")", ":", "# TODO: Decorate api with sensitive post parameters as Django admin do?", "password_form", "=", "PasswordChangeForm", "(", "request", ".", "user", ",", "data", "=", "request", ".", "data", ")", "if", "not", "p...
Change password for logged in django staff user
[ "Change", "password", "for", "logged", "in", "django", "staff", "user" ]
python
test